aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_debugfs.c5
-rw-r--r--drivers/net/caif/caif_serial.c26
-rw-r--r--drivers/net/caif/caif_virtio.c6
-rw-r--r--drivers/net/can/Kconfig13
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/flexcan.c138
-rw-r--r--drivers/net/can/janz-ican3.c1
-rw-r--r--drivers/net/can/kvaser_pciefd.c1907
-rw-r--r--drivers/net/can/m_can/Kconfig22
-rw-r--r--drivers/net/can/m_can/Makefile2
-rw-r--r--drivers/net/can/m_can/m_can.c1079
-rw-r--r--drivers/net/can/m_can/m_can.h110
-rw-r--r--drivers/net/can/m_can/m_can_platform.c201
-rw-r--r--drivers/net/can/m_can/tcan4x5x.c527
-rw-r--r--drivers/net/can/rcar/rcar_can.c23
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c2
-rw-r--r--drivers/net/can/sja1000/Kconfig79
-rw-r--r--drivers/net/can/sja1000/Makefile11
-rw-r--r--drivers/net/can/sja1000/f81601.c211
-rw-r--r--drivers/net/can/sja1000/peak_pci.c2
-rw-r--r--drivers/net/can/spi/hi311x.c62
-rw-r--r--drivers/net/can/spi/mcp251x.c121
-rw-r--r--drivers/net/can/sun4i_can.c1
-rw-r--r--drivers/net/can/ti_hecc.c268
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c3
-rw-r--r--drivers/net/can/vcan.c19
-rw-r--r--drivers/net/can/xilinx_can.c292
-rw-r--r--drivers/net/dsa/b53/b53_common.c7
-rw-r--r--drivers/net/dsa/b53/b53_srab.c8
-rw-r--r--drivers/net/dsa/bcm_sf2.c7
-rw-r--r--drivers/net/dsa/lan9303-core.c6
-rw-r--r--drivers/net/dsa/lantiq_gswip.c16
-rw-r--r--drivers/net/dsa/microchip/Kconfig18
-rw-r--r--drivers/net/dsa/microchip/Makefile2
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c1310
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h1004
-rw-r--r--drivers/net/dsa/microchip/ksz8795_spi.c103
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c1
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c1
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c21
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h173
-rw-r--r--drivers/net/dsa/microchip/ksz_priv.h155
-rw-r--r--drivers/net/dsa/mt7530.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c386
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h25
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c95
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c7
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c72
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h12
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_avb.c29
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c38
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c95
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h6
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.c4
-rw-r--r--drivers/net/ethernet/3com/3c59x.c8
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/ni65.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c107
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c33
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c10
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c10
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c10
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c12
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c3
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c11
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c9
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c551
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h96
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c156
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h109
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c15
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c23
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c23
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c21
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c7
-rw-r--r--drivers/net/ethernet/cortina/gemini.c9
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c21
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c39
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c54
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h3
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig9
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile19
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c97
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.h12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c101
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c78
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c339
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h33
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c132
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c43
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h32
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c143
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c109
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c781
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h62
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c47
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c100
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c23
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c24
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c9
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c110
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c80
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c155
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c101
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c61
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c83
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c112
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c187
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c144
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c173
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c498
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c161
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c278
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c11
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c11
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c9
-rw-r--r--drivers/net/ethernet/jme.c15
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c10
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c19
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c75
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c3
-rw-r--r--drivers/net/ethernet/marvell/skge.c45
-rw-r--r--drivers/net/ethernet/marvell/sky2.c3
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c494
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c205
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c404
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c256
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1068
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c446
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c371
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h104
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c267
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h7
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c7
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c5
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c401
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h49
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c145
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.h41
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c11
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c160
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h19
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c197
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c200
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c17
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c3
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c3
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig9
-rw-r--r--drivers/net/ethernet/qlogic/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c34
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c74
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h20
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/Makefile8
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2353
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c2024
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c735
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5027
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c1285
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c12
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c12
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c13
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c3
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c621
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c10
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon_boards.c18
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/sgi/meth.c3
-rw-r--r--drivers/net/ethernet/sis/sis900.c68
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c3
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h132
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c787
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h47
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c192
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c105
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c435
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c516
-rw-r--r--drivers/net/ethernet/sun/cassini.c8
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c7
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c5
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c4
-rw-r--r--drivers/net/hamradio/baycom_epp.c3
-rw-r--r--drivers/net/hamradio/hdlcdrv.c3
-rw-r--r--drivers/net/hamradio/mkiss.c11
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/ieee802154/adf7242.c13
-rw-r--r--drivers/net/ieee802154/at86rf230.c20
-rw-r--r--drivers/net/ieee802154/ca8210.c9
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c1
-rw-r--r--drivers/net/netdevsim/dev.c414
-rw-r--r--drivers/net/netdevsim/netdevsim.h4
-rw-r--r--drivers/net/phy/Kconfig26
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/adin.c720
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/dp83822.c5
-rw-r--r--drivers/net/phy/dp83848.c11
-rw-r--r--drivers/net/phy/dp83tc811.c4
-rw-r--r--drivers/net/phy/mdio-aspeed.c157
-rw-r--r--drivers/net/phy/mdio-cavium.h2
-rw-r--r--drivers/net/phy/mdio-xgene.c4
-rw-r--r--drivers/net/phy/meson-gxl.c2
-rw-r--r--drivers/net/phy/microchip.c1
-rw-r--r--drivers/net/phy/microchip_t1.c1
-rw-r--r--drivers/net/phy/mscc.c4
-rw-r--r--drivers/net/phy/phy-core.c66
-rw-r--r--drivers/net/phy/phy.c60
-rw-r--r--drivers/net/phy/phy_device.c111
-rw-r--r--drivers/net/phy/realtek.c188
-rw-r--r--drivers/net/phy/sfp.c73
-rw-r--r--drivers/net/phy/swphy.c8
-rw-r--r--drivers/net/phy/vitesse.c6
-rw-r--r--drivers/net/slip/slhc.c30
-rw-r--r--drivers/net/thunderbolt.c2
-rw-r--r--drivers/net/usb/asix_common.c9
-rw-r--r--drivers/net/usb/ax88179_178a.c14
-rw-r--r--drivers/net/usb/lan78xx.c23
-rw-r--r--drivers/net/usb/lg-vl600.c4
-rw-r--r--drivers/net/usb/r8152.c599
-rw-r--r--drivers/net/usb/rtl8150.c6
-rw-r--r--drivers/net/usb/smsc75xx.c20
-rw-r--r--drivers/net/usb/sr9800.c9
-rw-r--r--drivers/net/usb/usbnet.c6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c9
-rw-r--r--drivers/net/wimax/i2400m/debugfs.c150
-rw-r--r--drivers/net/wimax/i2400m/driver.c7
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h7
-rw-r--r--drivers/net/wimax/i2400m/usb.c64
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c9
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/phy.c21
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c61
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c13
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c5
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c20
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c14
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/cmd.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c3
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c136
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c9
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c11
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c257
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h1046
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c215
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h803
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c2507
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h369
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c112
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c135
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h73
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c45
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h233
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c74
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c15
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h62
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c462
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.c1564
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c355
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c2635
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c31
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c3
-rw-r--r--drivers/net/wireless/wl3501_cs.c4
-rw-r--r--drivers/net/xen-netback/netback.c13
-rw-r--r--drivers/net/xen-netback/xenbus.c46
-rw-r--r--drivers/net/xen-netfront.c8
523 files changed, 31203 insertions, 21340 deletions
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 1360f1ffe070..f3f86ef68ae0 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -55,11 +55,6 @@ void bond_debug_register(struct bonding *bond)
bond->debug_dir =
debugfs_create_dir(bond->dev->name, bonding_debug_root);
- if (!bond->debug_dir) {
- netdev_warn(bond->dev, "failed to register to debugfs\n");
- return;
- }
-
debugfs_create_file("rlb_hash_table", 0400, bond->debug_dir,
bond, &bond_debug_rlb_hash_fops);
}
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index ed3c437063dc..40b079162804 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -94,26 +94,20 @@ static inline void update_tty_status(struct ser_device *ser)
}
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
{
- ser->debugfs_tty_dir =
- debugfs_create_dir(tty->name, debugfsdir);
- if (!IS_ERR(ser->debugfs_tty_dir)) {
- debugfs_create_blob("last_tx_msg", 0400,
- ser->debugfs_tty_dir,
- &ser->tx_blob);
+ ser->debugfs_tty_dir = debugfs_create_dir(tty->name, debugfsdir);
- debugfs_create_blob("last_rx_msg", 0400,
- ser->debugfs_tty_dir,
- &ser->rx_blob);
+ debugfs_create_blob("last_tx_msg", 0400, ser->debugfs_tty_dir,
+ &ser->tx_blob);
- debugfs_create_x32("ser_state", 0400,
- ser->debugfs_tty_dir,
- (u32 *)&ser->state);
+ debugfs_create_blob("last_rx_msg", 0400, ser->debugfs_tty_dir,
+ &ser->rx_blob);
- debugfs_create_x8("tty_status", 0400,
- ser->debugfs_tty_dir,
- &ser->tty_status);
+ debugfs_create_x32("ser_state", 0400, ser->debugfs_tty_dir,
+ (u32 *)&ser->state);
+
+ debugfs_create_x8("tty_status", 0400, ser->debugfs_tty_dir,
+ &ser->tty_status);
- }
ser->tx_blob.data = ser->tx_data;
ser->tx_blob.size = 0;
ser->rx_blob.data = ser->rx_data;
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 27e93a438dd9..eb426822ad06 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -623,11 +623,7 @@ static void cfv_netdev_setup(struct net_device *netdev)
/* Create debugfs counters for the device */
static inline void debugfs_init(struct cfv_info *cfv)
{
- cfv->debugfs =
- debugfs_create_dir(netdev_name(cfv->ndev), NULL);
-
- if (IS_ERR(cfv->debugfs))
- return;
+ cfv->debugfs = debugfs_create_dir(netdev_name(cfv->ndev), NULL);
debugfs_create_u32("rx-napi-complete", 0400, cfv->debugfs,
&cfv->stats.rx_napi_complete);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index ab585900a057..17c166cc8482 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -120,6 +120,19 @@ config CAN_JANZ_ICAN3
This driver can also be built as a module. If so, the module will be
called janz-ican3.ko.
+config CAN_KVASER_PCIEFD
+ depends on PCI
+ tristate "Kvaser PCIe FD cards"
+ help
+ This is a driver for the Kvaser PCI Express CAN FD family.
+
+ Supported devices:
+ Kvaser PCIEcan 4xHS
+ Kvaser PCIEcan 2xHS v2
+ Kvaser PCIEcan HS v2
+ Kvaser Mini PCI Express HS v2
+ Kvaser Mini PCI Express 2xHS v2
+
config CAN_SUN4I
tristate "Allwinner A10 CAN controller"
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 44922bf29b6a..22164300122d 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
obj-$(CONFIG_CAN_GRCAN) += grcan.o
obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
+obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd.o
obj-$(CONFIG_CAN_MSCAN) += mscan/
obj-$(CONFIG_CAN_M_CAN) += m_can/
obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_canfd/
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index fcec8bcb53d6..dc5695dffc2e 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -24,6 +24,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/regmap.h>
@@ -266,6 +267,7 @@ struct flexcan_stop_mode {
struct flexcan_priv {
struct can_priv can;
struct can_rx_offload offload;
+ struct device *dev;
struct flexcan_regs __iomem *regs;
struct flexcan_mb __iomem *tx_mb;
@@ -273,6 +275,8 @@ struct flexcan_priv {
u8 tx_mb_idx;
u8 mb_count;
u8 mb_size;
+ u8 clk_src; /* clock source of CAN Protocol Engine */
+
u32 reg_ctrl_default;
u32 reg_imask1_default;
u32 reg_imask2_default;
@@ -462,6 +466,27 @@ static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
priv->write(reg_ctrl, &regs->ctrl);
}
+static int flexcan_clks_enable(const struct flexcan_priv *priv)
+{
+ int err;
+
+ err = clk_prepare_enable(priv->clk_ipg);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(priv->clk_per);
+ if (err)
+ clk_disable_unprepare(priv->clk_ipg);
+
+ return err;
+}
+
+static void flexcan_clks_disable(const struct flexcan_priv *priv)
+{
+ clk_disable_unprepare(priv->clk_per);
+ clk_disable_unprepare(priv->clk_ipg);
+}
+
static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
{
if (!priv->reg_xceiver)
@@ -588,19 +613,13 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
const struct flexcan_priv *priv = netdev_priv(dev);
int err;
- err = clk_prepare_enable(priv->clk_ipg);
- if (err)
+ err = pm_runtime_get_sync(priv->dev);
+ if (err < 0)
return err;
- err = clk_prepare_enable(priv->clk_per);
- if (err)
- goto out_disable_ipg;
-
err = __flexcan_get_berr_counter(dev, bec);
- clk_disable_unprepare(priv->clk_per);
- out_disable_ipg:
- clk_disable_unprepare(priv->clk_ipg);
+ pm_runtime_put(priv->dev);
return err;
}
@@ -1233,17 +1252,13 @@ static int flexcan_open(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- err = clk_prepare_enable(priv->clk_ipg);
- if (err)
+ err = pm_runtime_get_sync(priv->dev);
+ if (err < 0)
return err;
- err = clk_prepare_enable(priv->clk_per);
- if (err)
- goto out_disable_ipg;
-
err = open_candev(dev);
if (err)
- goto out_disable_per;
+ goto out_runtime_put;
err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
if (err)
@@ -1306,10 +1321,8 @@ static int flexcan_open(struct net_device *dev)
free_irq(dev->irq, dev);
out_close:
close_candev(dev);
- out_disable_per:
- clk_disable_unprepare(priv->clk_per);
- out_disable_ipg:
- clk_disable_unprepare(priv->clk_ipg);
+ out_runtime_put:
+ pm_runtime_put(priv->dev);
return err;
}
@@ -1324,10 +1337,9 @@ static int flexcan_close(struct net_device *dev)
can_rx_offload_del(&priv->offload);
free_irq(dev->irq, dev);
- clk_disable_unprepare(priv->clk_per);
- clk_disable_unprepare(priv->clk_ipg);
close_candev(dev);
+ pm_runtime_put(priv->dev);
can_led_event(dev, CAN_LED_EVENT_STOP);
@@ -1367,20 +1379,20 @@ static int register_flexcandev(struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg, err;
- err = clk_prepare_enable(priv->clk_ipg);
+ err = flexcan_clks_enable(priv);
if (err)
return err;
- err = clk_prepare_enable(priv->clk_per);
- if (err)
- goto out_disable_ipg;
-
/* select "bus clock", chip must be disabled */
err = flexcan_chip_disable(priv);
if (err)
- goto out_disable_per;
+ goto out_clks_disable;
+
reg = priv->read(&regs->ctrl);
- reg |= FLEXCAN_CTRL_CLK_SRC;
+ if (priv->clk_src)
+ reg |= FLEXCAN_CTRL_CLK_SRC;
+ else
+ reg &= ~FLEXCAN_CTRL_CLK_SRC;
priv->write(reg, &regs->ctrl);
err = flexcan_chip_enable(priv);
@@ -1406,15 +1418,21 @@ static int register_flexcandev(struct net_device *dev)
}
err = register_candev(dev);
+ if (err)
+ goto out_chip_disable;
- /* disable core and turn off clocks */
- out_chip_disable:
+ /* Disable core and let pm_runtime_put() disable the clocks.
+ * If CONFIG_PM is not enabled, the clocks will stay powered.
+ */
flexcan_chip_disable(priv);
- out_disable_per:
- clk_disable_unprepare(priv->clk_per);
- out_disable_ipg:
- clk_disable_unprepare(priv->clk_ipg);
+ pm_runtime_put(priv->dev);
+ return 0;
+
+ out_chip_disable:
+ flexcan_chip_disable(priv);
+ out_clks_disable:
+ flexcan_clks_disable(priv);
return err;
}
@@ -1473,6 +1491,11 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, true);
+ if (of_property_read_bool(np, "wakeup-source"))
+ device_set_wakeup_enable(&pdev->dev, true);
+
+ return 0;
+
out_put_node:
of_node_put(gpr_np);
return ret;
@@ -1508,6 +1531,7 @@ static int flexcan_probe(struct platform_device *pdev)
struct clk *clk_ipg = NULL, *clk_per = NULL;
struct flexcan_regs __iomem *regs;
int err, irq;
+ u8 clk_src = 1;
u32 clock_freq = 0;
reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
@@ -1516,9 +1540,12 @@ static int flexcan_probe(struct platform_device *pdev)
else if (IS_ERR(reg_xceiver))
reg_xceiver = NULL;
- if (pdev->dev.of_node)
+ if (pdev->dev.of_node) {
of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &clock_freq);
+ of_property_read_u8(pdev->dev.of_node,
+ "fsl,clk-source", &clk_src);
+ }
if (!clock_freq) {
clk_ipg = devm_clk_get(&pdev->dev, "ipg");
@@ -1576,6 +1603,7 @@ static int flexcan_probe(struct platform_device *pdev)
priv->write = flexcan_write_le;
}
+ priv->dev = &pdev->dev;
priv->can.clock.freq = clock_freq;
priv->can.bittiming_const = &flexcan_bittiming_const;
priv->can.do_set_mode = flexcan_set_mode;
@@ -1586,9 +1614,14 @@ static int flexcan_probe(struct platform_device *pdev)
priv->regs = regs;
priv->clk_ipg = clk_ipg;
priv->clk_per = clk_per;
+ priv->clk_src = clk_src;
priv->devtype_data = devtype_data;
priv->reg_xceiver = reg_xceiver;
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
err = register_flexcandev(dev);
if (err) {
dev_err(&pdev->dev, "registering netdev failed\n");
@@ -1615,6 +1648,7 @@ static int flexcan_remove(struct platform_device *pdev)
struct net_device *dev = platform_get_drvdata(pdev);
unregister_flexcandev(dev);
+ pm_runtime_disable(&pdev->dev);
free_candev(dev);
return 0;
@@ -1624,7 +1658,7 @@ static int __maybe_unused flexcan_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- int err;
+ int err = 0;
if (netif_running(dev)) {
/* if wakeup is enabled, enter stop mode
@@ -1639,20 +1673,22 @@ static int __maybe_unused flexcan_suspend(struct device *device)
err = flexcan_chip_disable(priv);
if (err)
return err;
+
+ err = pm_runtime_force_suspend(device);
}
netif_stop_queue(dev);
netif_device_detach(dev);
}
priv->can.state = CAN_STATE_SLEEPING;
- return 0;
+ return err;
}
static int __maybe_unused flexcan_resume(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- int err;
+ int err = 0;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
@@ -1661,14 +1697,35 @@ static int __maybe_unused flexcan_resume(struct device *device)
if (device_may_wakeup(device)) {
disable_irq_wake(dev->irq);
} else {
- err = flexcan_chip_enable(priv);
+ err = pm_runtime_force_resume(device);
if (err)
return err;
+
+ err = flexcan_chip_enable(priv);
}
}
+
+ return err;
+}
+
+static int __maybe_unused flexcan_runtime_suspend(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct flexcan_priv *priv = netdev_priv(dev);
+
+ flexcan_clks_disable(priv);
+
return 0;
}
+static int __maybe_unused flexcan_runtime_resume(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct flexcan_priv *priv = netdev_priv(dev);
+
+ return flexcan_clks_enable(priv);
+}
+
static int __maybe_unused flexcan_noirq_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
@@ -1698,6 +1755,7 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
static const struct dev_pm_ops flexcan_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(flexcan_suspend, flexcan_resume)
+ SET_RUNTIME_PM_OPS(flexcan_runtime_suspend, flexcan_runtime_resume, NULL)
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(flexcan_noirq_suspend, flexcan_noirq_resume)
};
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 19d4f52a8f90..a761092e6ac9 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1936,7 +1936,6 @@ static int ican3_probe(struct platform_device *pdev)
/* find our IRQ number */
mod->irq = platform_get_irq(pdev, 0);
if (mod->irq < 0) {
- dev_err(dev, "IRQ line not found\n");
ret = -ENODEV;
goto out_free_ndev;
}
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
new file mode 100644
index 000000000000..f9815fda8840
--- /dev/null
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -0,0 +1,1907 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
+ * Parts of this driver are based on the following:
+ * - Kvaser linux pciefd driver (version 5.25)
+ * - PEAK linux canfd driver
+ * - Altera Avalon EPCS flash controller driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/can/dev.h>
+#include <linux/timer.h>
+#include <linux/netdevice.h>
+#include <linux/crc32.h>
+#include <linux/iopoll.h>
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
+MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
+
+#define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd"
+
+#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
+#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
+#define KVASER_PCIEFD_MAX_ERR_REP 256
+#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17
+#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4
+#define KVASER_PCIEFD_DMA_COUNT 2
+
+#define KVASER_PCIEFD_DMA_SIZE (4 * 1024)
+#define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0)
+
+#define KVASER_PCIEFD_VENDOR 0x1a07
+#define KVASER_PCIEFD_4HS_ID 0x0d
+#define KVASER_PCIEFD_2HS_ID 0x0e
+#define KVASER_PCIEFD_HS_ID 0x0f
+#define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10
+#define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11
+
+/* PCIe IRQ registers */
+#define KVASER_PCIEFD_IRQ_REG 0x40
+#define KVASER_PCIEFD_IEN_REG 0x50
+/* DMA map */
+#define KVASER_PCIEFD_DMA_MAP_BASE 0x1000
+/* Kvaser KCAN CAN controller registers */
+#define KVASER_PCIEFD_KCAN0_BASE 0x10000
+#define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000
+#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
+#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
+#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
+#define KVASER_PCIEFD_KCAN_CMD_REG 0x400
+#define KVASER_PCIEFD_KCAN_IEN_REG 0x408
+#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
+#define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414
+#define KVASER_PCIEFD_KCAN_STAT_REG 0x418
+#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
+#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
+#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
+#define KVASER_PCIEFD_KCAN_PWM_REG 0x430
+/* Loopback control register */
+#define KVASER_PCIEFD_LOOP_REG 0x1f000
+/* System identification and information registers */
+#define KVASER_PCIEFD_SYSID_BASE 0x1f020
+#define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8)
+#define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc)
+#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
+/* Shared receive buffer registers */
+#define KVASER_PCIEFD_SRB_BASE 0x1f200
+#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
+#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
+#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
+#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
+#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
+/* EPCS flash controller registers */
+#define KVASER_PCIEFD_SPI_BASE 0x1fc00
+#define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE
+#define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4)
+#define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8)
+#define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc)
+#define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14)
+
+#define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f
+#define KVASER_PCIEFD_IRQ_SRB BIT(4)
+
+#define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24
+#define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16
+#define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1
+
+/* Reset DMA buffer 0, 1 and FIFO offset */
+#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
+#define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
+#define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
+
+/* DMA packet done, buffer 0 and 1 */
+#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
+#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
+/* DMA overflow, buffer 0 and 1 */
+#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
+#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
+/* DMA underflow, buffer 0 and 1 */
+#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
+#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
+
+/* DMA idle */
+#define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
+/* DMA support */
+#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
+
+/* DMA Enable */
+#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
+
+/* EPCS flash controller definitions */
+#define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024)
+#define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L)
+#define KVASER_PCIEFD_CFG_MAX_PARAMS 256
+#define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d
+#define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24
+#define KVASER_PCIEFD_CFG_SYS_VER 1
+#define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130
+#define KVASER_PCIEFD_SPI_TMT BIT(5)
+#define KVASER_PCIEFD_SPI_TRDY BIT(6)
+#define KVASER_PCIEFD_SPI_RRDY BIT(7)
+#define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14
+/* Commands for controlling the onboard flash */
+#define KVASER_PCIEFD_FLASH_RES_CMD 0xab
+#define KVASER_PCIEFD_FLASH_READ_CMD 0x3
+#define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5
+
+/* Kvaser KCAN definitions */
+#define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29)
+#define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29)
+
+#define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16
+/* Request status packet */
+#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
+/* Abort, flush and reset */
+#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
+
+/* Tx FIFO unaligned read */
+#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
+/* Tx FIFO unaligned end */
+#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
+/* Bus parameter protection error */
+#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
+/* FDF bit when controller is in classic mode */
+#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
+/* Rx FIFO overflow */
+#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
+/* Abort done */
+#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
+/* Tx buffer flush done */
+#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
+/* Tx FIFO overflow */
+#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
+/* Tx FIFO empty */
+#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
+/* Transmitter unaligned */
+#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
+
+#define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16
+
+#define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24
+/* Abort request */
+#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
+/* Idle state. Controller in reset mode and no abort or flush pending */
+#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
+/* Bus off */
+#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
+/* Reset mode request */
+#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
+/* Controller in reset mode */
+#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
+/* Controller got one-shot capability */
+#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
+/* Controller got CAN FD capability */
+#define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
+#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \
+ KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \
+ KVASER_PCIEFD_KCAN_STAT_IRM)
+
+/* Reset mode */
+#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
+/* Listen only mode */
+#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
+/* Error packet enable */
+#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
+/* CAN FD non-ISO */
+#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
+/* Acknowledgment packet type */
+#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
+/* Active error flag enable. Clear to force error passive */
+#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
+/* Classic CAN mode */
+#define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
+
+#define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13
+#define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17
+#define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26
+
+#define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16
+
+/* Kvaser KCAN packet types */
+#define KVASER_PCIEFD_PACK_TYPE_DATA 0
+#define KVASER_PCIEFD_PACK_TYPE_ACK 1
+#define KVASER_PCIEFD_PACK_TYPE_TXRQ 2
+#define KVASER_PCIEFD_PACK_TYPE_ERROR 3
+#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4
+#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5
+#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6
+#define KVASER_PCIEFD_PACK_TYPE_STATUS 8
+#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9
+
+/* Kvaser KCAN packet common definitions */
+#define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff
+#define KVASER_PCIEFD_PACKET_CHID_SHIFT 25
+#define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28
+
+/* Kvaser KCAN TDATA and RDATA first word */
+#define KVASER_PCIEFD_RPACKET_IDE BIT(30)
+#define KVASER_PCIEFD_RPACKET_RTR BIT(29)
+/* Kvaser KCAN TDATA and RDATA second word */
+#define KVASER_PCIEFD_RPACKET_ESI BIT(13)
+#define KVASER_PCIEFD_RPACKET_BRS BIT(14)
+#define KVASER_PCIEFD_RPACKET_FDF BIT(15)
+#define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8
+/* Kvaser KCAN TDATA second word */
+#define KVASER_PCIEFD_TPACKET_SMS BIT(16)
+#define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
+
+/* Kvaser KCAN APACKET */
+#define KVASER_PCIEFD_APACKET_FLU BIT(8)
+#define KVASER_PCIEFD_APACKET_CT BIT(9)
+#define KVASER_PCIEFD_APACKET_ABL BIT(10)
+#define KVASER_PCIEFD_APACKET_NACK BIT(11)
+
+/* Kvaser KCAN SPACK first word */
+#define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8
+#define KVASER_PCIEFD_SPACK_BOFF BIT(16)
+#define KVASER_PCIEFD_SPACK_IDET BIT(20)
+#define KVASER_PCIEFD_SPACK_IRM BIT(21)
+#define KVASER_PCIEFD_SPACK_RMCD BIT(22)
+/* Kvaser KCAN SPACK second word */
+#define KVASER_PCIEFD_SPACK_AUTO BIT(21)
+#define KVASER_PCIEFD_SPACK_EWLR BIT(23)
+#define KVASER_PCIEFD_SPACK_EPLR BIT(24)
+
+struct kvaser_pciefd;
+
+struct kvaser_pciefd_can {
+ struct can_priv can;
+ struct kvaser_pciefd *kv_pcie;
+ void __iomem *reg_base;
+ struct can_berr_counter bec;
+ u8 cmd_seq;
+ int err_rep_cnt;
+ int echo_idx;
+ spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
+ spinlock_t echo_lock; /* Locks the message echo buffer */
+ struct timer_list bec_poll_timer;
+ struct completion start_comp, flush_comp;
+};
+
+struct kvaser_pciefd {
+ struct pci_dev *pci;
+ void __iomem *reg_base;
+ struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
+ void *dma_data[KVASER_PCIEFD_DMA_COUNT];
+ u8 nr_channels;
+ u32 freq;
+ u32 freq_to_ticks_div;
+};
+
+struct kvaser_pciefd_rx_packet {
+ u32 header[2];
+ u64 timestamp;
+};
+
+struct kvaser_pciefd_tx_packet {
+ u32 header[2];
+ u8 data[64];
+};
+
+static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
+ .name = KVASER_PCIEFD_DRV_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 255,
+ .tseg2_min = 1,
+ .tseg2_max = 32,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 4096,
+ .brp_inc = 1,
+};
+
+struct kvaser_pciefd_cfg_param {
+ __le32 magic;
+ __le32 nr;
+ __le32 len;
+ u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ];
+};
+
+struct kvaser_pciefd_cfg_img {
+ __le32 version;
+ __le32 magic;
+ __le32 crc;
+ struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS];
+};
+
+static struct pci_device_id kvaser_pciefd_id_table[] = {
+ { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), },
+ { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), },
+ { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), },
+ { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), },
+ { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), },
+ { 0,},
+};
+MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
+
+/* Onboard flash memory functions */
+static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
+{
+ u32 res;
+ int ret;
+
+ ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
+ res, res & msk, 0, 10);
+
+ return ret;
+}
+
+static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
+ u32 tx_len, u8 *rx, u32 rx_len)
+{
+ int c;
+
+ iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG);
+ iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
+ ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
+
+ c = tx_len;
+ while (c--) {
+ if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
+ return -EIO;
+
+ iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
+
+ if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
+ return -EIO;
+
+ ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
+ }
+
+ c = rx_len;
+ while (c-- > 0) {
+ if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
+ return -EIO;
+
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
+
+ if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
+ return -EIO;
+
+ *rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
+ }
+
+ if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT))
+ return -EIO;
+
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
+
+ if (c != -1) {
+ dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_cfg_img *img)
+{
+ int offset = KVASER_PCIEFD_CFG_IMG_OFFSET;
+ int res, crc;
+ u8 *crc_buff;
+
+ u8 cmd[] = {
+ KVASER_PCIEFD_FLASH_READ_CMD,
+ (u8)((offset >> 16) & 0xff),
+ (u8)((offset >> 8) & 0xff),
+ (u8)(offset & 0xff)
+ };
+
+ res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img,
+ KVASER_PCIEFD_CFG_IMG_SZ);
+ if (res)
+ return res;
+
+ crc_buff = (u8 *)img->params;
+
+ if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) {
+ dev_err(&pcie->pci->dev,
+ "Config flash corrupted, version number is wrong\n");
+ return -ENODEV;
+ }
+
+ if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) {
+ dev_err(&pcie->pci->dev,
+ "Config flash corrupted, magic number is wrong\n");
+ return -ENODEV;
+ }
+
+ crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params));
+ if (le32_to_cpu(img->crc) != crc) {
+ dev_err(&pcie->pci->dev,
+ "Stored CRC does not match flash image contents\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_cfg_img *img)
+{
+ struct kvaser_pciefd_cfg_param *param;
+
+ param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN];
+ memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len));
+}
+
+static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie)
+{
+ int res;
+ struct kvaser_pciefd_cfg_img *img;
+
+ /* Read electronic signature */
+ u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0};
+
+ res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1);
+ if (res)
+ return -EIO;
+
+ img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL);
+ if (!img)
+ return -ENOMEM;
+
+ if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) {
+ dev_err(&pcie->pci->dev,
+ "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n",
+ cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16);
+
+ res = -ENODEV;
+ goto image_free;
+ }
+
+ cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD;
+ res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1);
+ if (res) {
+ goto image_free;
+ } else if (cmd[0] & 1) {
+ res = -EIO;
+ /* No write is ever done, the WIP should never be set */
+ dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n");
+ goto image_free;
+ }
+
+ res = kvaser_pciefd_cfg_read_and_verify(pcie, img);
+ if (res) {
+ res = -EIO;
+ goto image_free;
+ }
+
+ kvaser_pciefd_cfg_read_params(pcie, img);
+
+image_free:
+ kfree(img);
+ return res;
+}
+
+static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
+{
+ u32 cmd;
+
+ cmd = KVASER_PCIEFD_KCAN_CMD_SRQ;
+ cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
+ iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
+}
+
+static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
+{
+ u32 mode;
+ unsigned long irq;
+
+ spin_lock_irqsave(&can->lock, irq);
+ mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) {
+ mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
+ iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ }
+ spin_unlock_irqrestore(&can->lock, irq);
+}
+
+static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
+{
+ u32 mode;
+ unsigned long irq;
+
+ spin_lock_irqsave(&can->lock, irq);
+ mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN;
+ iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ spin_unlock_irqrestore(&can->lock, irq);
+}
+
+static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
+{
+ u32 msk;
+
+ msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF |
+ KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
+ KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
+ KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
+ KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
+
+ iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+
+ return 0;
+}
+
+static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
+{
+ u32 mode;
+ unsigned long irq;
+
+ spin_lock_irqsave(&can->lock, irq);
+
+ mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM;
+ if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+ mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN;
+ else
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
+ } else {
+ mode |= KVASER_PCIEFD_KCAN_MODE_CCM;
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
+ }
+
+ if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
+
+ mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
+ mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
+ /* Use ACK packet type */
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_APT;
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
+ iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+
+ spin_unlock_irqrestore(&can->lock, irq);
+}
+
+static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
+{
+ u32 status;
+ unsigned long irq;
+
+ spin_lock_irqsave(&can->lock, irq);
+ iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
+ can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+
+ status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
+ if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
+ u32 cmd;
+
+ /* If controller is already idle, run abort, flush and reset */
+ cmd = KVASER_PCIEFD_KCAN_CMD_AT;
+ cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
+ iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
+ } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) {
+ u32 mode;
+
+ /* Put controller in reset mode */
+ mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ mode |= KVASER_PCIEFD_KCAN_MODE_RM;
+ iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ }
+
+ spin_unlock_irqrestore(&can->lock, irq);
+}
+
+static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
+{
+ u32 mode;
+ unsigned long irq;
+
+ del_timer(&can->bec_poll_timer);
+
+ if (!completion_done(&can->flush_comp))
+ kvaser_pciefd_start_controller_flush(can);
+
+ if (!wait_for_completion_timeout(&can->flush_comp,
+ KVASER_PCIEFD_WAIT_TIMEOUT)) {
+ netdev_err(can->can.dev, "Timeout during bus on flush\n");
+ return -ETIMEDOUT;
+ }
+
+ spin_lock_irqsave(&can->lock, irq);
+ iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
+ can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+
+ mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
+ iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ spin_unlock_irqrestore(&can->lock, irq);
+
+ if (!wait_for_completion_timeout(&can->start_comp,
+ KVASER_PCIEFD_WAIT_TIMEOUT)) {
+ netdev_err(can->can.dev, "Timeout during bus on reset\n");
+ return -ETIMEDOUT;
+ }
+ /* Reset interrupt handling */
+ iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+
+ kvaser_pciefd_set_tx_irq(can);
+ kvaser_pciefd_setup_controller(can);
+
+ can->can.state = CAN_STATE_ERROR_ACTIVE;
+ netif_wake_queue(can->can.dev);
+ can->bec.txerr = 0;
+ can->bec.rxerr = 0;
+ can->err_rep_cnt = 0;
+
+ return 0;
+}
+
+static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
+{
+ u8 top;
+ u32 pwm_ctrl;
+ unsigned long irq;
+
+ spin_lock_irqsave(&can->lock, irq);
+ pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
+ top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff;
+
+ /* Set duty cycle to zero */
+ pwm_ctrl |= top;
+ iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
+ spin_unlock_irqrestore(&can->lock, irq);
+}
+
+static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
+{
+ int top, trigger;
+ u32 pwm_ctrl;
+ unsigned long irq;
+
+ kvaser_pciefd_pwm_stop(can);
+ spin_lock_irqsave(&can->lock, irq);
+
+ /* Set frequency to 500 KHz*/
+ top = can->can.clock.freq / (2 * 500000) - 1;
+
+ pwm_ctrl = top & 0xff;
+ pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
+ iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
+
+ /* Set duty cycle to 95 */
+ trigger = (100 * top - 95 * (top + 1) + 50) / 100;
+ pwm_ctrl = trigger & 0xff;
+ pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
+ iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
+ spin_unlock_irqrestore(&can->lock, irq);
+}
+
+static int kvaser_pciefd_open(struct net_device *netdev)
+{
+ int err;
+ struct kvaser_pciefd_can *can = netdev_priv(netdev);
+
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ err = kvaser_pciefd_bus_on(can);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int kvaser_pciefd_stop(struct net_device *netdev)
+{
+ struct kvaser_pciefd_can *can = netdev_priv(netdev);
+ int ret = 0;
+
+ /* Don't interrupt ongoing flush */
+ if (!completion_done(&can->flush_comp))
+ kvaser_pciefd_start_controller_flush(can);
+
+ if (!wait_for_completion_timeout(&can->flush_comp,
+ KVASER_PCIEFD_WAIT_TIMEOUT)) {
+ netdev_err(can->can.dev, "Timeout during stop\n");
+ ret = -ETIMEDOUT;
+ } else {
+ iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ del_timer(&can->bec_poll_timer);
+ }
+ close_candev(netdev);
+
+ return ret;
+}
+
+static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
+ struct kvaser_pciefd_can *can,
+ struct sk_buff *skb)
+{
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ int packet_size;
+ int seq = can->echo_idx;
+
+ memset(p, 0, sizeof(*p));
+
+ if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ p->header[0] |= KVASER_PCIEFD_RPACKET_RTR;
+
+ if (cf->can_id & CAN_EFF_FLAG)
+ p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
+
+ p->header[0] |= cf->can_id & CAN_EFF_MASK;
+ p->header[1] |= can_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
+ p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
+
+ if (can_is_canfd_skb(skb)) {
+ p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
+ if (cf->flags & CANFD_BRS)
+ p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
+ if (cf->flags & CANFD_ESI)
+ p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
+ }
+
+ p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK;
+
+ packet_size = cf->len;
+ memcpy(p->data, cf->data, packet_size);
+
+ return DIV_ROUND_UP(packet_size, 4);
+}
+
+static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct kvaser_pciefd_can *can = netdev_priv(netdev);
+ unsigned long irq_flags;
+ struct kvaser_pciefd_tx_packet packet;
+ int nwords;
+ u8 count;
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
+
+ spin_lock_irqsave(&can->echo_lock, irq_flags);
+
+ /* Prepare and save echo skb in internal slot */
+ can_put_echo_skb(skb, netdev, can->echo_idx);
+
+ /* Move echo index to the next slot */
+ can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
+
+ /* Write header to fifo */
+ iowrite32(packet.header[0],
+ can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
+ iowrite32(packet.header[1],
+ can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
+
+ if (nwords) {
+ u32 data_last = ((u32 *)packet.data)[nwords - 1];
+
+ /* Write data to fifo, except last word */
+ iowrite32_rep(can->reg_base +
+ KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
+ nwords - 1);
+ /* Write last word to end of fifo */
+ __raw_writel(data_last, can->reg_base +
+ KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
+ } else {
+ /* Complete write to fifo */
+ __raw_writel(0, can->reg_base +
+ KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
+ }
+
+ count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
+ /* No room for a new message, stop the queue until at least one
+ * successful transmit
+ */
+ if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT ||
+ can->can.echo_skb[can->echo_idx])
+ netif_stop_queue(netdev);
+
+ spin_unlock_irqrestore(&can->echo_lock, irq_flags);
+
+ return NETDEV_TX_OK;
+}
+
+static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
+{
+ u32 mode, test, btrn;
+ unsigned long irq_flags;
+ int ret;
+ struct can_bittiming *bt;
+
+ if (data)
+ bt = &can->can.data_bittiming;
+ else
+ bt = &can->can.bittiming;
+
+ btrn = ((bt->phase_seg2 - 1) & 0x1f) <<
+ KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT |
+ (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) <<
+ KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT |
+ ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT |
+ ((bt->brp - 1) & 0x1fff);
+
+ spin_lock_irqsave(&can->lock, irq_flags);
+ mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+
+ /* Put the circuit in reset mode */
+ iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM,
+ can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+
+ /* Can only set bittiming if in reset mode */
+ ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG,
+ test, test & KVASER_PCIEFD_KCAN_MODE_RM,
+ 0, 10);
+
+ if (ret) {
+ spin_unlock_irqrestore(&can->lock, irq_flags);
+ return -EBUSY;
+ }
+
+ if (data)
+ iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG);
+ else
+ iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG);
+
+ /* Restore previous reset mode status */
+ iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+
+ spin_unlock_irqrestore(&can->lock, irq_flags);
+ return 0;
+}
+
+static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev)
+{
+ return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false);
+}
+
+static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev)
+{
+ return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true);
+}
+
+static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ struct kvaser_pciefd_can *can = netdev_priv(ndev);
+ int ret = 0;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ if (!can->can.restart_ms)
+ ret = kvaser_pciefd_bus_on(can);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct kvaser_pciefd_can *can = netdev_priv(ndev);
+
+ bec->rxerr = can->bec.rxerr;
+ bec->txerr = can->bec.txerr;
+ return 0;
+}
+
+static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
+{
+ struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
+
+ kvaser_pciefd_enable_err_gen(can);
+ kvaser_pciefd_request_status(can);
+ can->err_rep_cnt = 0;
+}
+
+static const struct net_device_ops kvaser_pciefd_netdev_ops = {
+ .ndo_open = kvaser_pciefd_open,
+ .ndo_stop = kvaser_pciefd_stop,
+ .ndo_start_xmit = kvaser_pciefd_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
+{
+ int i;
+
+ for (i = 0; i < pcie->nr_channels; i++) {
+ struct net_device *netdev;
+ struct kvaser_pciefd_can *can;
+ u32 status, tx_npackets;
+
+ netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
+ KVASER_PCIEFD_CAN_TX_MAX_COUNT);
+ if (!netdev)
+ return -ENOMEM;
+
+ can = netdev_priv(netdev);
+ netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
+ can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
+ i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
+
+ can->kv_pcie = pcie;
+ can->cmd_seq = 0;
+ can->err_rep_cnt = 0;
+ can->bec.txerr = 0;
+ can->bec.rxerr = 0;
+
+ init_completion(&can->start_comp);
+ init_completion(&can->flush_comp);
+ timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
+ 0);
+
+ tx_npackets = ioread32(can->reg_base +
+ KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
+ if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
+ 0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) {
+ dev_err(&pcie->pci->dev,
+ "Max Tx count is smaller than expected\n");
+
+ free_candev(netdev);
+ return -ENODEV;
+ }
+
+ can->can.clock.freq = pcie->freq;
+ can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT;
+ can->echo_idx = 0;
+ spin_lock_init(&can->echo_lock);
+ spin_lock_init(&can->lock);
+ can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
+ can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
+
+ can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
+ can->can.do_set_data_bittiming =
+ kvaser_pciefd_set_data_bittiming;
+
+ can->can.do_set_mode = kvaser_pciefd_set_mode;
+ can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
+
+ can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_FD_NON_ISO;
+
+ status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
+ if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
+ dev_err(&pcie->pci->dev,
+ "CAN FD not supported as expected %d\n", i);
+
+ free_candev(netdev);
+ return -ENODEV;
+ }
+
+ if (status & KVASER_PCIEFD_KCAN_STAT_CAP)
+ can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
+
+ netdev->flags |= IFF_ECHO;
+
+ SET_NETDEV_DEV(netdev, &pcie->pci->dev);
+
+ iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
+ KVASER_PCIEFD_KCAN_IRQ_TFD,
+ can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+
+ pcie->can[i] = can;
+ kvaser_pciefd_pwm_start(can);
+ }
+
+ return 0;
+}
+
+static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
+{
+ int i;
+
+ for (i = 0; i < pcie->nr_channels; i++) {
+ int err = register_candev(pcie->can[i]->can.dev);
+
+ if (err) {
+ int j;
+
+ /* Unregister all successfully registered devices. */
+ for (j = 0; j < i; j++)
+ unregister_candev(pcie->can[j]->can.dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int offset)
+{
+ u32 word1, word2;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT;
+ word2 = addr >> 32;
+#else
+ word1 = addr;
+ word2 = 0;
+#endif
+ iowrite32(word1, pcie->reg_base + offset);
+ iowrite32(word2, pcie->reg_base + offset + 4);
+}
+
+static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
+{
+ int i;
+ u32 srb_status;
+ dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
+
+ /* Disable the DMA */
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
+ for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
+ unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i;
+
+ pcie->dma_data[i] =
+ dmam_alloc_coherent(&pcie->pci->dev,
+ KVASER_PCIEFD_DMA_SIZE,
+ &dma_addr[i],
+ GFP_KERNEL);
+
+ if (!pcie->dma_data[i] || !dma_addr[i]) {
+ dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n",
+ KVASER_PCIEFD_DMA_SIZE);
+ return -ENOMEM;
+ }
+
+ kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset);
+ }
+
+ /* Reset Rx FIFO, and both DMA buffers */
+ iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
+ KVASER_PCIEFD_SRB_CMD_RDB1,
+ pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+
+ srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
+ if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
+ dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
+ return -EIO;
+ }
+
+ /* Enable the DMA */
+ iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE,
+ pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
+
+ return 0;
+}
+
+static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
+{
+ u32 sysid, srb_status, build;
+ u8 sysid_nr_chan;
+ int ret;
+
+ ret = kvaser_pciefd_read_cfg(pcie);
+ if (ret)
+ return ret;
+
+ sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG);
+ sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff;
+ if (pcie->nr_channels != sysid_nr_chan) {
+ dev_err(&pcie->pci->dev,
+ "Number of channels does not match: %u vs %u\n",
+ pcie->nr_channels,
+ sysid_nr_chan);
+ return -ENODEV;
+ }
+
+ if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS)
+ pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS;
+
+ build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG);
+ dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n",
+ (sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff,
+ sysid & 0xff,
+ (build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff);
+
+ srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
+ if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
+ dev_err(&pcie->pci->dev,
+ "Hardware without DMA is not supported\n");
+ return -ENODEV;
+ }
+
+ pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG);
+ pcie->freq_to_ticks_div = pcie->freq / 1000000;
+ if (pcie->freq_to_ticks_div == 0)
+ pcie->freq_to_ticks_div = 1;
+
+ /* Turn off all loopback functionality */
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG);
+ return ret;
+}
+
+static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_rx_packet *p,
+ __le32 *data)
+{
+ struct sk_buff *skb;
+ struct canfd_frame *cf;
+ struct can_priv *priv;
+ struct net_device_stats *stats;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+
+ if (ch_id >= pcie->nr_channels)
+ return -EIO;
+
+ priv = &pcie->can[ch_id]->can;
+ stats = &priv->dev->stats;
+
+ if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
+ skb = alloc_canfd_skb(priv->dev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return -ENOMEM;
+ }
+
+ if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS)
+ cf->flags |= CANFD_BRS;
+
+ if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI)
+ cf->flags |= CANFD_ESI;
+ } else {
+ skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return -ENOMEM;
+ }
+ }
+
+ cf->can_id = p->header[0] & CAN_EFF_MASK;
+ if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ cf->len = can_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
+
+ if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, data, cf->len);
+
+ shhwtstamps = skb_hwtstamps(skb);
+
+ shhwtstamps->hwtstamp =
+ ns_to_ktime(div_u64(p->timestamp * 1000,
+ pcie->freq_to_ticks_div));
+
+ stats->rx_bytes += cf->len;
+ stats->rx_packets++;
+
+ return netif_rx(skb);
+}
+
+static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
+ struct can_frame *cf,
+ enum can_state new_state,
+ enum can_state tx_state,
+ enum can_state rx_state)
+{
+ can_change_state(can->can.dev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ struct net_device *ndev = can->can.dev;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&can->lock, irq_flags);
+ netif_stop_queue(can->can.dev);
+ spin_unlock_irqrestore(&can->lock, irq_flags);
+
+ /* Prevent CAN controller from auto recover from bus off */
+ if (!can->can.restart_ms) {
+ kvaser_pciefd_start_controller_flush(can);
+ can_bus_off(ndev);
+ }
+ }
+}
+
+static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
+ struct can_berr_counter *bec,
+ enum can_state *new_state,
+ enum can_state *tx_state,
+ enum can_state *rx_state)
+{
+ if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
+ p->header[0] & KVASER_PCIEFD_SPACK_IRM)
+ *new_state = CAN_STATE_BUS_OFF;
+ else if (bec->txerr >= 255 || bec->rxerr >= 255)
+ *new_state = CAN_STATE_BUS_OFF;
+ else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
+ *new_state = CAN_STATE_ERROR_PASSIVE;
+ else if (bec->txerr >= 128 || bec->rxerr >= 128)
+ *new_state = CAN_STATE_ERROR_PASSIVE;
+ else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR)
+ *new_state = CAN_STATE_ERROR_WARNING;
+ else if (bec->txerr >= 96 || bec->rxerr >= 96)
+ *new_state = CAN_STATE_ERROR_WARNING;
+ else
+ *new_state = CAN_STATE_ERROR_ACTIVE;
+
+ *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0;
+ *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0;
+}
+
+static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct can_berr_counter bec;
+ enum can_state old_state, new_state, tx_state, rx_state;
+ struct net_device *ndev = can->can.dev;
+ struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct net_device_stats *stats = &ndev->stats;
+
+ old_state = can->can.state;
+
+ bec.txerr = p->header[0] & 0xff;
+ bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
+
+ kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
+ &rx_state);
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ if (new_state != old_state) {
+ kvaser_pciefd_change_state(can, cf, new_state, tx_state,
+ rx_state);
+
+ if (old_state == CAN_STATE_BUS_OFF &&
+ new_state == CAN_STATE_ERROR_ACTIVE &&
+ can->can.restart_ms) {
+ can->can.can_stats.restarts++;
+ if (skb)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+ }
+
+ can->err_rep_cnt++;
+ can->can.can_stats.bus_error++;
+ stats->rx_errors++;
+
+ can->bec.txerr = bec.txerr;
+ can->bec.rxerr = bec.rxerr;
+
+ if (!skb) {
+ stats->rx_dropped++;
+ return -ENOMEM;
+ }
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp =
+ ns_to_ktime(div_u64(p->timestamp * 1000,
+ can->kv_pcie->freq_to_ticks_div));
+ cf->can_id |= CAN_ERR_BUSERROR;
+
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ netif_rx(skb);
+ return 0;
+}
+
+static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct kvaser_pciefd_can *can;
+ u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+
+ if (ch_id >= pcie->nr_channels)
+ return -EIO;
+
+ can = pcie->can[ch_id];
+
+ kvaser_pciefd_rx_error_frame(can, p);
+ if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
+ /* Do not report more errors, until bec_poll_timer expires */
+ kvaser_pciefd_disable_err_gen(can);
+ /* Start polling the error counters */
+ mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
+ return 0;
+}
+
+static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct can_berr_counter bec;
+ enum can_state old_state, new_state, tx_state, rx_state;
+
+ old_state = can->can.state;
+
+ bec.txerr = p->header[0] & 0xff;
+ bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
+
+ kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
+ &rx_state);
+
+ if (new_state != old_state) {
+ struct net_device *ndev = can->can.dev;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ struct skb_shared_hwtstamps *shhwtstamps;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb) {
+ struct net_device_stats *stats = &ndev->stats;
+
+ stats->rx_dropped++;
+ return -ENOMEM;
+ }
+
+ kvaser_pciefd_change_state(can, cf, new_state, tx_state,
+ rx_state);
+
+ if (old_state == CAN_STATE_BUS_OFF &&
+ new_state == CAN_STATE_ERROR_ACTIVE &&
+ can->can.restart_ms) {
+ can->can.can_stats.restarts++;
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp =
+ ns_to_ktime(div_u64(p->timestamp * 1000,
+ can->kv_pcie->freq_to_ticks_div));
+
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+
+ netif_rx(skb);
+ }
+ can->bec.txerr = bec.txerr;
+ can->bec.rxerr = bec.rxerr;
+ /* Check if we need to poll the error counters */
+ if (bec.txerr || bec.rxerr)
+ mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
+
+ return 0;
+}
+
+static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct kvaser_pciefd_can *can;
+ u8 cmdseq;
+ u32 status;
+ u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+
+ if (ch_id >= pcie->nr_channels)
+ return -EIO;
+
+ can = pcie->can[ch_id];
+
+ status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
+ cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff;
+
+ /* Reset done, start abort and flush */
+ if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
+ p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
+ p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
+ cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
+ status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
+ u32 cmd;
+
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
+ can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+ cmd = KVASER_PCIEFD_KCAN_CMD_AT;
+ cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
+ iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
+
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
+ can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
+ p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
+ cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
+ status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
+ /* Reset detected, send end of flush if no packet are in FIFO */
+ u8 count = ioread32(can->reg_base +
+ KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
+
+ if (!count)
+ iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
+ can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
+ } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
+ cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) {
+ /* Response to status request received */
+ kvaser_pciefd_handle_status_resp(can, p);
+ if (can->can.state != CAN_STATE_BUS_OFF &&
+ can->can.state != CAN_STATE_ERROR_ACTIVE) {
+ mod_timer(&can->bec_poll_timer,
+ KVASER_PCIEFD_BEC_POLL_FREQ);
+ }
+ } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
+ !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) {
+ /* Reset to bus on detected */
+ if (!completion_done(&can->start_comp))
+ complete(&can->start_comp);
+ }
+
+ return 0;
+}
+
+static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct kvaser_pciefd_can *can;
+ u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+
+ if (ch_id >= pcie->nr_channels)
+ return -EIO;
+
+ can = pcie->can[ch_id];
+
+ /* If this is the last flushed packet, send end of flush */
+ if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
+ u8 count = ioread32(can->reg_base +
+ KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
+
+ if (count == 0)
+ iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
+ can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
+ } else {
+ int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
+ int dlc = can_get_echo_skb(can->can.dev, echo_idx);
+ struct net_device_stats *stats = &can->can.dev->stats;
+
+ stats->tx_bytes += dlc;
+ stats->tx_packets++;
+
+ if (netif_queue_stopped(can->can.dev))
+ netif_wake_queue(can->can.dev);
+ }
+
+ return 0;
+}
+
+static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct sk_buff *skb;
+ struct net_device_stats *stats = &can->can.dev->stats;
+ struct can_frame *cf;
+
+ skb = alloc_can_err_skb(can->can.dev, &cf);
+
+ stats->tx_errors++;
+ if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
+ if (skb)
+ cf->can_id |= CAN_ERR_LOSTARB;
+ can->can.can_stats.arbitration_lost++;
+ } else if (skb) {
+ cf->can_id |= CAN_ERR_ACK;
+ }
+
+ if (skb) {
+ cf->can_id |= CAN_ERR_BUSERROR;
+ stats->rx_bytes += cf->can_dlc;
+ stats->rx_packets++;
+ netif_rx(skb);
+ } else {
+ stats->rx_dropped++;
+ netdev_warn(can->can.dev, "No memory left for err_skb\n");
+ }
+}
+
+static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct kvaser_pciefd_can *can;
+ bool one_shot_fail = false;
+ u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+
+ if (ch_id >= pcie->nr_channels)
+ return -EIO;
+
+ can = pcie->can[ch_id];
+ /* Ignore control packet ACK */
+ if (p->header[0] & KVASER_PCIEFD_APACKET_CT)
+ return 0;
+
+ if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) {
+ kvaser_pciefd_handle_nack_packet(can, p);
+ one_shot_fail = true;
+ }
+
+ if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
+ netdev_dbg(can->can.dev, "Packet was flushed\n");
+ } else {
+ int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
+ int dlc = can_get_echo_skb(can->can.dev, echo_idx);
+ u8 count = ioread32(can->reg_base +
+ KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
+
+ if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT &&
+ netif_queue_stopped(can->can.dev))
+ netif_wake_queue(can->can.dev);
+
+ if (!one_shot_fail) {
+ struct net_device_stats *stats = &can->can.dev->stats;
+
+ stats->tx_bytes += dlc;
+ stats->tx_packets++;
+ }
+ }
+
+ return 0;
+}
+
+static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct kvaser_pciefd_can *can;
+ u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+
+ if (ch_id >= pcie->nr_channels)
+ return -EIO;
+
+ can = pcie->can[ch_id];
+
+ if (!completion_done(&can->flush_comp))
+ complete(&can->flush_comp);
+
+ return 0;
+}
+
+static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
+ int dma_buf)
+{
+ __le32 *buffer = pcie->dma_data[dma_buf];
+ __le64 timestamp;
+ struct kvaser_pciefd_rx_packet packet;
+ struct kvaser_pciefd_rx_packet *p = &packet;
+ u8 type;
+ int pos = *start_pos;
+ int size;
+ int ret = 0;
+
+ size = le32_to_cpu(buffer[pos++]);
+ if (!size) {
+ *start_pos = 0;
+ return 0;
+ }
+
+ p->header[0] = le32_to_cpu(buffer[pos++]);
+ p->header[1] = le32_to_cpu(buffer[pos++]);
+
+ /* Read 64-bit timestamp */
+ memcpy(&timestamp, &buffer[pos], sizeof(__le64));
+ pos += 2;
+ p->timestamp = le64_to_cpu(timestamp);
+
+ type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf;
+ switch (type) {
+ case KVASER_PCIEFD_PACK_TYPE_DATA:
+ ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
+ if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
+ u8 data_len;
+
+ data_len = can_dlc2len(p->header[1] >>
+ KVASER_PCIEFD_RPACKET_DLC_SHIFT);
+ pos += DIV_ROUND_UP(data_len, 4);
+ }
+ break;
+
+ case KVASER_PCIEFD_PACK_TYPE_ACK:
+ ret = kvaser_pciefd_handle_ack_packet(pcie, p);
+ break;
+
+ case KVASER_PCIEFD_PACK_TYPE_STATUS:
+ ret = kvaser_pciefd_handle_status_packet(pcie, p);
+ break;
+
+ case KVASER_PCIEFD_PACK_TYPE_ERROR:
+ ret = kvaser_pciefd_handle_error_packet(pcie, p);
+ break;
+
+ case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
+ ret = kvaser_pciefd_handle_eack_packet(pcie, p);
+ break;
+
+ case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK:
+ ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
+ break;
+
+ case KVASER_PCIEFD_PACK_TYPE_ACK_DATA:
+ case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD:
+ case KVASER_PCIEFD_PACK_TYPE_TXRQ:
+ dev_info(&pcie->pci->dev,
+ "Received unexpected packet type 0x%08X\n", type);
+ break;
+
+ default:
+ dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type);
+ ret = -EIO;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Position does not point to the end of the package,
+ * corrupted packet size?
+ */
+ if ((*start_pos + size) != pos)
+ return -EIO;
+
+ /* Point to the next packet header, if any */
+ *start_pos = pos;
+
+ return ret;
+}
+
+static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
+{
+ int pos = 0;
+ int res = 0;
+
+ do {
+ res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
+ } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
+
+ return res;
+}
+
+static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+{
+ u32 irq;
+
+ irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
+ kvaser_pciefd_read_buffer(pcie, 0);
+ /* Reset DMA buffer 0 */
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
+ pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ }
+
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
+ kvaser_pciefd_read_buffer(pcie, 1);
+ /* Reset DMA buffer 1 */
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
+ pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ }
+
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
+ dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
+
+ iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
+ return 0;
+}
+
+static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
+{
+ u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+
+ if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
+ netdev_err(can->can.dev, "Tx FIFO overflow\n");
+
+ if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
+ u8 count = ioread32(can->reg_base +
+ KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
+
+ if (count == 0)
+ iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
+ can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
+ }
+
+ if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
+ netdev_err(can->can.dev,
+ "Fail to change bittiming, when not in reset mode\n");
+
+ if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC)
+ netdev_err(can->can.dev, "CAN FD frame in CAN mode\n");
+
+ if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF)
+ netdev_err(can->can.dev, "Rx FIFO overflow\n");
+
+ iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+ return 0;
+}
+
+static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
+{
+ struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
+ u32 board_irq;
+ int i;
+
+ board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
+
+ if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK))
+ return IRQ_NONE;
+
+ if (board_irq & KVASER_PCIEFD_IRQ_SRB)
+ kvaser_pciefd_receive_irq(pcie);
+
+ for (i = 0; i < pcie->nr_channels; i++) {
+ if (!pcie->can[i]) {
+ dev_err(&pcie->pci->dev,
+ "IRQ mask points to unallocated controller\n");
+ break;
+ }
+
+ /* Check that mask matches channel (i) IRQ mask */
+ if (board_irq & (1 << i))
+ kvaser_pciefd_transmit_irq(pcie->can[i]);
+ }
+
+ iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
+ return IRQ_HANDLED;
+}
+
+static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
+{
+ int i;
+ struct kvaser_pciefd_can *can;
+
+ for (i = 0; i < pcie->nr_channels; i++) {
+ can = pcie->can[i];
+ if (can) {
+ iowrite32(0,
+ can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ kvaser_pciefd_pwm_stop(can);
+ free_candev(can->can.dev);
+ }
+ }
+}
+
+static int kvaser_pciefd_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int err;
+ struct kvaser_pciefd *pcie;
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, pcie);
+ pcie->pci = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
+ if (err)
+ goto err_disable_pci;
+
+ pcie->reg_base = pci_iomap(pdev, 0, 0);
+ if (!pcie->reg_base) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ err = kvaser_pciefd_setup_board(pcie);
+ if (err)
+ goto err_pci_iounmap;
+
+ err = kvaser_pciefd_setup_dma(pcie);
+ if (err)
+ goto err_pci_iounmap;
+
+ pci_set_master(pdev);
+
+ err = kvaser_pciefd_setup_can_ctrls(pcie);
+ if (err)
+ goto err_teardown_can_ctrls;
+
+ iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
+ pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
+
+ iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 |
+ KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 |
+ KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1,
+ pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG);
+
+ /* Reset IRQ handling, expected to be off before */
+ iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
+ pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
+ iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
+ pcie->reg_base + KVASER_PCIEFD_IEN_REG);
+
+ /* Ready the DMA buffers */
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
+ pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
+ pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+
+ err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
+ IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
+ if (err)
+ goto err_teardown_can_ctrls;
+
+ err = kvaser_pciefd_reg_candev(pcie);
+ if (err)
+ goto err_free_irq;
+
+ return 0;
+
+err_free_irq:
+ free_irq(pcie->pci->irq, pcie);
+
+err_teardown_can_ctrls:
+ kvaser_pciefd_teardown_can_ctrls(pcie);
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
+ pci_clear_master(pdev);
+
+err_pci_iounmap:
+ pci_iounmap(pdev, pcie->reg_base);
+
+err_release_regions:
+ pci_release_regions(pdev);
+
+err_disable_pci:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
+{
+ struct kvaser_pciefd_can *can;
+ int i;
+
+ for (i = 0; i < pcie->nr_channels; i++) {
+ can = pcie->can[i];
+ if (can) {
+ iowrite32(0,
+ can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ unregister_candev(can->can.dev);
+ del_timer(&can->bec_poll_timer);
+ kvaser_pciefd_pwm_stop(can);
+ free_candev(can->can.dev);
+ }
+ }
+}
+
+static void kvaser_pciefd_remove(struct pci_dev *pdev)
+{
+ struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
+
+ kvaser_pciefd_remove_all_ctrls(pcie);
+
+ /* Turn off IRQ generation */
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
+ iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
+ pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
+
+ free_irq(pcie->pci->irq, pcie);
+
+ pci_clear_master(pdev);
+ pci_iounmap(pdev, pcie->reg_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver kvaser_pciefd = {
+ .name = KVASER_PCIEFD_DRV_NAME,
+ .id_table = kvaser_pciefd_id_table,
+ .probe = kvaser_pciefd_probe,
+ .remove = kvaser_pciefd_remove,
+};
+
+module_pci_driver(kvaser_pciefd)
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
index ec4b2e117f66..1ff0b7fe81d6 100644
--- a/drivers/net/can/m_can/Kconfig
+++ b/drivers/net/can/m_can/Kconfig
@@ -1,6 +1,24 @@
# SPDX-License-Identifier: GPL-2.0-only
config CAN_M_CAN
+ tristate "Bosch M_CAN support"
+ ---help---
+ Say Y here if you want support for Bosch M_CAN controller framework.
+ This is common support for devices that embed the Bosch M_CAN IP.
+
+config CAN_M_CAN_PLATFORM
+ tristate "Bosch M_CAN support for io-mapped devices"
depends on HAS_IOMEM
- tristate "Bosch M_CAN devices"
+ depends on CAN_M_CAN
+ ---help---
+ Say Y here if you want support for IO Mapped Bosch M_CAN controller.
+ This support is for devices that have the Bosch M_CAN controller
+ IP embedded into the device and the IP is IO Mapped to the processor.
+
+config CAN_M_CAN_TCAN4X5X
+ depends on CAN_M_CAN
+ depends on REGMAP_SPI
+ tristate "TCAN4X5X M_CAN device"
---help---
- Say Y here if you want to support for Bosch M_CAN controller.
+ Say Y here if you want support for Texas Instruments TCAN4x5x
+ M_CAN controller. This device is a peripherial device that uses the
+ SPI bus for communication.
diff --git a/drivers/net/can/m_can/Makefile b/drivers/net/can/m_can/Makefile
index 599ae69cb4a1..52a4a6fbe527 100644
--- a/drivers/net/can/m_can/Makefile
+++ b/drivers/net/can/m_can/Makefile
@@ -4,3 +4,5 @@
#
obj-$(CONFIG_CAN_M_CAN) += m_can.o
+obj-$(CONFIG_CAN_M_CAN_PLATFORM) += m_can_platform.o
+obj-$(CONFIG_CAN_M_CAN_TCAN4X5X) += tcan4x5x.o
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index deb274a19ba0..562c8317e3aa 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1,20 +1,14 @@
-/*
- * CAN bus driver for Bosch M_CAN controller
- *
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
- * Dong Aisheng <b29396@freescale.com>
- *
- * Bosch M_CAN user manual can be obtained from:
+// SPDX-License-Identifier: GPL-2.0
+// CAN bus driver for Bosch M_CAN controller
+// Copyright (C) 2014 Freescale Semiconductor, Inc.
+// Dong Aisheng <b29396@freescale.com>
+// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
+
+/* Bosch M_CAN user manual can be obtained from:
* http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/
* mcan_users_manual_v302.pdf
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
-#include <linux/clk.h>
-#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -28,11 +22,7 @@
#include <linux/can/dev.h>
#include <linux/pinctrl/consumer.h>
-/* napi related */
-#define M_CAN_NAPI_WEIGHT 64
-
-/* message ram configuration data length */
-#define MRAM_CFG_LEN 8
+#include "m_can.h"
/* registers definition */
enum m_can_reg {
@@ -86,28 +76,11 @@ enum m_can_reg {
M_CAN_TXEFA = 0xf8,
};
-/* m_can lec values */
-enum m_can_lec_type {
- LEC_NO_ERROR = 0,
- LEC_STUFF_ERROR,
- LEC_FORM_ERROR,
- LEC_ACK_ERROR,
- LEC_BIT1_ERROR,
- LEC_BIT0_ERROR,
- LEC_CRC_ERROR,
- LEC_UNUSED,
-};
+/* napi related */
+#define M_CAN_NAPI_WEIGHT 64
-enum m_can_mram_cfg {
- MRAM_SIDF = 0,
- MRAM_XIDF,
- MRAM_RXF0,
- MRAM_RXF1,
- MRAM_RXB,
- MRAM_TXE,
- MRAM_TXB,
- MRAM_CFG_NUM,
-};
+/* message ram configuration data length */
+#define MRAM_CFG_LEN 8
/* Core Release Register (CREL) */
#define CREL_REL_SHIFT 28
@@ -347,90 +320,85 @@ enum m_can_mram_cfg {
#define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT
#define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT)
-/* address offset and element number for each FIFO/Buffer in the Message RAM */
-struct mram_cfg {
- u16 off;
- u8 num;
-};
-
-/* m_can private data structure */
-struct m_can_priv {
- struct can_priv can; /* must be the first member */
- struct napi_struct napi;
- struct net_device *dev;
- struct device *device;
- struct clk *hclk;
- struct clk *cclk;
- void __iomem *base;
- u32 irqstatus;
- int version;
-
- /* message ram configuration */
- void __iomem *mram_base;
- struct mram_cfg mcfg[MRAM_CFG_NUM];
-};
+static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
+{
+ return cdev->ops->read_reg(cdev, reg);
+}
-static inline u32 m_can_read(const struct m_can_priv *priv, enum m_can_reg reg)
+static inline void m_can_write(struct m_can_classdev *cdev, enum m_can_reg reg,
+ u32 val)
{
- return readl(priv->base + reg);
+ cdev->ops->write_reg(cdev, reg, val);
}
-static inline void m_can_write(const struct m_can_priv *priv,
- enum m_can_reg reg, u32 val)
+static u32 m_can_fifo_read(struct m_can_classdev *cdev,
+ u32 fgi, unsigned int offset)
{
- writel(val, priv->base + reg);
+ u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE +
+ offset;
+
+ return cdev->ops->read_fifo(cdev, addr_offset);
}
-static inline u32 m_can_fifo_read(const struct m_can_priv *priv,
- u32 fgi, unsigned int offset)
+static void m_can_fifo_write(struct m_can_classdev *cdev,
+ u32 fpi, unsigned int offset, u32 val)
{
- return readl(priv->mram_base + priv->mcfg[MRAM_RXF0].off +
- fgi * RXF0_ELEMENT_SIZE + offset);
+ u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE +
+ offset;
+
+ cdev->ops->write_fifo(cdev, addr_offset, val);
}
-static inline void m_can_fifo_write(const struct m_can_priv *priv,
- u32 fpi, unsigned int offset, u32 val)
+static inline void m_can_fifo_write_no_off(struct m_can_classdev *cdev,
+ u32 fpi, u32 val)
{
- writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off +
- fpi * TXB_ELEMENT_SIZE + offset);
+ cdev->ops->write_fifo(cdev, fpi, val);
}
-static inline u32 m_can_txe_fifo_read(const struct m_can_priv *priv,
- u32 fgi,
- u32 offset) {
- return readl(priv->mram_base + priv->mcfg[MRAM_TXE].off +
- fgi * TXE_ELEMENT_SIZE + offset);
+static u32 m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset)
+{
+ u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE +
+ offset;
+
+ return cdev->ops->read_fifo(cdev, addr_offset);
}
-static inline bool m_can_tx_fifo_full(const struct m_can_priv *priv)
+static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev)
{
- return !!(m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQF);
+ return !!(m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQF);
}
-static inline void m_can_config_endisable(const struct m_can_priv *priv,
- bool enable)
+void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
{
- u32 cccr = m_can_read(priv, M_CAN_CCCR);
+ u32 cccr = m_can_read(cdev, M_CAN_CCCR);
u32 timeout = 10;
u32 val = 0;
+ /* Clear the Clock stop request if it was set */
+ if (cccr & CCCR_CSR)
+ cccr &= ~CCCR_CSR;
+
if (enable) {
+ /* Clear the Clock stop request if it was set */
+ if (cccr & CCCR_CSR)
+ cccr &= ~CCCR_CSR;
+
/* enable m_can configuration */
- m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT);
+ m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT);
udelay(5);
/* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
- m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
+ m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
} else {
- m_can_write(priv, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
+ m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
}
/* there's a delay for module initialization */
if (enable)
val = CCCR_INIT | CCCR_CCE;
- while ((m_can_read(priv, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
+ while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
if (timeout == 0) {
- netdev_warn(priv->dev, "Failed to init module\n");
+ netdev_warn(cdev->net, "Failed to init module\n");
return;
}
timeout--;
@@ -438,21 +406,38 @@ static inline void m_can_config_endisable(const struct m_can_priv *priv,
}
}
-static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv)
+static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
{
/* Only interrupt line 0 is used in this driver */
- m_can_write(priv, M_CAN_ILE, ILE_EINT0);
+ m_can_write(cdev, M_CAN_ILE, ILE_EINT0);
}
-static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv)
+static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
{
- m_can_write(priv, M_CAN_ILE, 0x0);
+ m_can_write(cdev, M_CAN_ILE, 0x0);
+}
+
+static void m_can_clean(struct net_device *net)
+{
+ struct m_can_classdev *cdev = netdev_priv(net);
+
+ if (cdev->tx_skb) {
+ int putidx = 0;
+
+ net->stats.tx_errors++;
+ if (cdev->version > 30)
+ putidx = ((m_can_read(cdev, M_CAN_TXFQS) &
+ TXFQS_TFQPI_MASK) >> TXFQS_TFQPI_SHIFT);
+
+ can_free_echo_skb(cdev->net, putidx);
+ cdev->tx_skb = NULL;
+ }
}
static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
{
struct net_device_stats *stats = &dev->stats;
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
struct canfd_frame *cf;
struct sk_buff *skb;
u32 id, fgi, dlc;
@@ -460,7 +445,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
/* calculate the fifo get index for where to read data */
fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_SHIFT;
- dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC);
+ dlc = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DLC);
if (dlc & RX_BUF_FDF)
skb = alloc_canfd_skb(dev, &cf);
else
@@ -475,7 +460,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
else
cf->len = get_can_dlc((dlc >> 16) & 0x0F);
- id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID);
+ id = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID);
if (id & RX_BUF_XTD)
cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
@@ -494,12 +479,12 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
for (i = 0; i < cf->len; i += 4)
*(u32 *)(cf->data + i) =
- m_can_fifo_read(priv, fgi,
+ m_can_fifo_read(cdev, fgi,
M_CAN_FIFO_DATA(i / 4));
}
/* acknowledge rx fifo 0 */
- m_can_write(priv, M_CAN_RXF0A, fgi);
+ m_can_write(cdev, M_CAN_RXF0A, fgi);
stats->rx_packets++;
stats->rx_bytes += cf->len;
@@ -509,11 +494,11 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
static int m_can_do_rx_poll(struct net_device *dev, int quota)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
u32 pkts = 0;
u32 rxfs;
- rxfs = m_can_read(priv, M_CAN_RXF0S);
+ rxfs = m_can_read(cdev, M_CAN_RXF0S);
if (!(rxfs & RXFS_FFL_MASK)) {
netdev_dbg(dev, "no messages in fifo0\n");
return 0;
@@ -527,7 +512,7 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
quota--;
pkts++;
- rxfs = m_can_read(priv, M_CAN_RXF0S);
+ rxfs = m_can_read(cdev, M_CAN_RXF0S);
}
if (pkts)
@@ -562,12 +547,12 @@ static int m_can_handle_lost_msg(struct net_device *dev)
static int m_can_handle_lec_err(struct net_device *dev,
enum m_can_lec_type lec_type)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
- priv->can.can_stats.bus_error++;
+ cdev->can.can_stats.bus_error++;
stats->rx_errors++;
/* propagate the error condition to the CAN stack */
@@ -619,47 +604,51 @@ static int m_can_handle_lec_err(struct net_device *dev,
static int __m_can_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
unsigned int ecr;
- ecr = m_can_read(priv, M_CAN_ECR);
+ ecr = m_can_read(cdev, M_CAN_ECR);
bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
bec->txerr = (ecr & ECR_TEC_MASK) >> ECR_TEC_SHIFT;
return 0;
}
-static int m_can_clk_start(struct m_can_priv *priv)
+static int m_can_clk_start(struct m_can_classdev *cdev)
{
int err;
- err = pm_runtime_get_sync(priv->device);
+ if (cdev->pm_clock_support == 0)
+ return 0;
+
+ err = pm_runtime_get_sync(cdev->dev);
if (err < 0) {
- pm_runtime_put_noidle(priv->device);
+ pm_runtime_put_noidle(cdev->dev);
return err;
}
return 0;
}
-static void m_can_clk_stop(struct m_can_priv *priv)
+static void m_can_clk_stop(struct m_can_classdev *cdev)
{
- pm_runtime_put_sync(priv->device);
+ if (cdev->pm_clock_support)
+ pm_runtime_put_sync(cdev->dev);
}
static int m_can_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
int err;
- err = m_can_clk_start(priv);
+ err = m_can_clk_start(cdev);
if (err)
return err;
__m_can_get_berr_counter(dev, bec);
- m_can_clk_stop(priv);
+ m_can_clk_stop(cdev);
return 0;
}
@@ -667,7 +656,7 @@ static int m_can_get_berr_counter(const struct net_device *dev,
static int m_can_handle_state_change(struct net_device *dev,
enum can_state new_state)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
@@ -677,19 +666,19 @@ static int m_can_handle_state_change(struct net_device *dev,
switch (new_state) {
case CAN_STATE_ERROR_ACTIVE:
/* error warning state */
- priv->can.can_stats.error_warning++;
- priv->can.state = CAN_STATE_ERROR_WARNING;
+ cdev->can.can_stats.error_warning++;
+ cdev->can.state = CAN_STATE_ERROR_WARNING;
break;
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
- priv->can.can_stats.error_passive++;
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ cdev->can.can_stats.error_passive++;
+ cdev->can.state = CAN_STATE_ERROR_PASSIVE;
break;
case CAN_STATE_BUS_OFF:
/* bus-off state */
- priv->can.state = CAN_STATE_BUS_OFF;
- m_can_disable_all_interrupts(priv);
- priv->can.can_stats.bus_off++;
+ cdev->can.state = CAN_STATE_BUS_OFF;
+ m_can_disable_all_interrupts(cdev);
+ cdev->can.can_stats.bus_off++;
can_bus_off(dev);
break;
default:
@@ -716,7 +705,7 @@ static int m_can_handle_state_change(struct net_device *dev,
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
cf->can_id |= CAN_ERR_CRTL;
- ecr = m_can_read(priv, M_CAN_ECR);
+ ecr = m_can_read(cdev, M_CAN_ECR);
if (ecr & ECR_RP)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if (bec.txerr > 127)
@@ -741,25 +730,22 @@ static int m_can_handle_state_change(struct net_device *dev,
static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
int work_done = 0;
- if ((psr & PSR_EW) &&
- (priv->can.state != CAN_STATE_ERROR_WARNING)) {
+ if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) {
netdev_dbg(dev, "entered error warning state\n");
work_done += m_can_handle_state_change(dev,
CAN_STATE_ERROR_WARNING);
}
- if ((psr & PSR_EP) &&
- (priv->can.state != CAN_STATE_ERROR_PASSIVE)) {
+ if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) {
netdev_dbg(dev, "entered error passive state\n");
work_done += m_can_handle_state_change(dev,
CAN_STATE_ERROR_PASSIVE);
}
- if ((psr & PSR_BO) &&
- (priv->can.state != CAN_STATE_BUS_OFF)) {
+ if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) {
netdev_dbg(dev, "entered error bus off state\n");
work_done += m_can_handle_state_change(dev,
CAN_STATE_BUS_OFF);
@@ -794,14 +780,14 @@ static inline bool is_lec_err(u32 psr)
static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
u32 psr)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
int work_done = 0;
if (irqstatus & IR_RF0L)
work_done += m_can_handle_lost_msg(dev);
/* handle lec errors on the bus */
- if ((priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+ if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
is_lec_err(psr))
work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED);
@@ -811,14 +797,13 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
return work_done;
}
-static int m_can_poll(struct napi_struct *napi, int quota)
+static int m_can_rx_handler(struct net_device *dev, int quota)
{
- struct net_device *dev = napi->dev;
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
int work_done = 0;
u32 irqstatus, psr;
- irqstatus = priv->irqstatus | m_can_read(priv, M_CAN_IR);
+ irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR);
if (!irqstatus)
goto end;
@@ -832,18 +817,19 @@ static int m_can_poll(struct napi_struct *napi, int quota)
* whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127.
* In this case, reset MCAN_IR.MRAF. No further action is required.
*/
- if ((priv->version <= 31) && (irqstatus & IR_MRAF) &&
- (m_can_read(priv, M_CAN_ECR) & ECR_RP)) {
+ if (cdev->version <= 31 && irqstatus & IR_MRAF &&
+ m_can_read(cdev, M_CAN_ECR) & ECR_RP) {
struct can_berr_counter bec;
__m_can_get_berr_counter(dev, &bec);
if (bec.rxerr == 127) {
- m_can_write(priv, M_CAN_IR, IR_MRAF);
+ m_can_write(cdev, M_CAN_IR, IR_MRAF);
irqstatus &= ~IR_MRAF;
}
}
- psr = m_can_read(priv, M_CAN_PSR);
+ psr = m_can_read(cdev, M_CAN_PSR);
+
if (irqstatus & IR_ERR_STATE)
work_done += m_can_handle_state_errors(dev, psr);
@@ -852,13 +838,33 @@ static int m_can_poll(struct napi_struct *napi, int quota)
if (irqstatus & IR_RF0N)
work_done += m_can_do_rx_poll(dev, (quota - work_done));
+end:
+ return work_done;
+}
+
+static int m_can_rx_peripheral(struct net_device *dev)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ m_can_rx_handler(dev, 1);
+
+ m_can_enable_all_interrupts(cdev);
+
+ return 0;
+}
+static int m_can_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *dev = napi->dev;
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ int work_done;
+
+ work_done = m_can_rx_handler(dev, quota);
if (work_done < quota) {
napi_complete_done(napi, work_done);
- m_can_enable_all_interrupts(priv);
+ m_can_enable_all_interrupts(cdev);
}
-end:
return work_done;
}
@@ -870,11 +876,11 @@ static void m_can_echo_tx_event(struct net_device *dev)
int i = 0;
unsigned int msg_mark;
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
/* read tx event fifo status */
- m_can_txefs = m_can_read(priv, M_CAN_TXEFS);
+ m_can_txefs = m_can_read(cdev, M_CAN_TXEFS);
/* Get Tx Event fifo element count */
txe_count = (m_can_txefs & TXEFS_EFFL_MASK)
@@ -883,15 +889,15 @@ static void m_can_echo_tx_event(struct net_device *dev)
/* Get and process all sent elements */
for (i = 0; i < txe_count; i++) {
/* retrieve get index */
- fgi = (m_can_read(priv, M_CAN_TXEFS) & TXEFS_EFGI_MASK)
+ fgi = (m_can_read(cdev, M_CAN_TXEFS) & TXEFS_EFGI_MASK)
>> TXEFS_EFGI_SHIFT;
/* get message marker */
- msg_mark = (m_can_txe_fifo_read(priv, fgi, 4) &
+ msg_mark = (m_can_txe_fifo_read(cdev, fgi, 4) &
TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT;
/* ack txe element */
- m_can_write(priv, M_CAN_TXEFA, (TXEFA_EFAI_MASK &
+ m_can_write(cdev, M_CAN_TXEFA, (TXEFA_EFAI_MASK &
(fgi << TXEFA_EFAI_SHIFT)));
/* update stats */
@@ -903,17 +909,20 @@ static void m_can_echo_tx_event(struct net_device *dev)
static irqreturn_t m_can_isr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
u32 ir;
- ir = m_can_read(priv, M_CAN_IR);
+ ir = m_can_read(cdev, M_CAN_IR);
if (!ir)
return IRQ_NONE;
/* ACK all irqs */
if (ir & IR_ALL_INT)
- m_can_write(priv, M_CAN_IR, ir);
+ m_can_write(cdev, M_CAN_IR, ir);
+
+ if (cdev->ops->clear_interrupts)
+ cdev->ops->clear_interrupts(cdev);
/* schedule NAPI in case of
* - rx IRQ
@@ -921,12 +930,15 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
* - bus error IRQ and bus error reporting
*/
if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) {
- priv->irqstatus = ir;
- m_can_disable_all_interrupts(priv);
- napi_schedule(&priv->napi);
+ cdev->irqstatus = ir;
+ m_can_disable_all_interrupts(cdev);
+ if (!cdev->is_peripheral)
+ napi_schedule(&cdev->napi);
+ else
+ m_can_rx_peripheral(dev);
}
- if (priv->version == 30) {
+ if (cdev->version == 30) {
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
stats->tx_bytes += can_get_echo_skb(dev, 0);
@@ -940,7 +952,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
m_can_echo_tx_event(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
if (netif_queue_stopped(dev) &&
- !m_can_tx_fifo_full(priv))
+ !m_can_tx_fifo_full(cdev))
netif_wake_queue(dev);
}
}
@@ -998,9 +1010,9 @@ static const struct can_bittiming_const m_can_data_bittiming_const_31X = {
static int m_can_set_bittiming(struct net_device *dev)
{
- struct m_can_priv *priv = netdev_priv(dev);
- const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ const struct can_bittiming *bt = &cdev->can.bittiming;
+ const struct can_bittiming *dbt = &cdev->can.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
u32 reg_btp;
@@ -1010,9 +1022,9 @@ static int m_can_set_bittiming(struct net_device *dev)
tseg2 = bt->phase_seg2 - 1;
reg_btp = (brp << NBTP_NBRP_SHIFT) | (sjw << NBTP_NSJW_SHIFT) |
(tseg1 << NBTP_NTSEG1_SHIFT) | (tseg2 << NBTP_NTSEG2_SHIFT);
- m_can_write(priv, M_CAN_NBTP, reg_btp);
+ m_can_write(cdev, M_CAN_NBTP, reg_btp);
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
reg_btp = 0;
brp = dbt->brp - 1;
sjw = dbt->sjw - 1;
@@ -1034,7 +1046,7 @@ static int m_can_set_bittiming(struct net_device *dev)
/* Equation based on Bosch's M_CAN User Manual's
* Transmitter Delay Compensation Section
*/
- tdco = (priv->can.clock.freq / 1000) *
+ tdco = (cdev->can.clock.freq / 1000) *
ssp / dbt->bitrate;
/* Max valid TDCO value is 127 */
@@ -1045,7 +1057,7 @@ static int m_can_set_bittiming(struct net_device *dev)
}
reg_btp |= DBTP_TDC;
- m_can_write(priv, M_CAN_TDCR,
+ m_can_write(cdev, M_CAN_TDCR,
tdco << TDCR_TDCO_SHIFT);
}
@@ -1054,7 +1066,7 @@ static int m_can_set_bittiming(struct net_device *dev)
(tseg1 << DBTP_DTSEG1_SHIFT) |
(tseg2 << DBTP_DTSEG2_SHIFT);
- m_can_write(priv, M_CAN_DBTP, reg_btp);
+ m_can_write(cdev, M_CAN_DBTP, reg_btp);
}
return 0;
@@ -1071,63 +1083,63 @@ static int m_can_set_bittiming(struct net_device *dev)
*/
static void m_can_chip_config(struct net_device *dev)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
u32 cccr, test;
- m_can_config_endisable(priv, true);
+ m_can_config_endisable(cdev, true);
/* RX Buffer/FIFO Element Size 64 bytes data field */
- m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_64BYTES);
+ m_can_write(cdev, M_CAN_RXESC, M_CAN_RXESC_64BYTES);
/* Accept Non-matching Frames Into FIFO 0 */
- m_can_write(priv, M_CAN_GFC, 0x0);
+ m_can_write(cdev, M_CAN_GFC, 0x0);
- if (priv->version == 30) {
+ if (cdev->version == 30) {
/* only support one Tx Buffer currently */
- m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) |
- priv->mcfg[MRAM_TXB].off);
+ m_can_write(cdev, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) |
+ cdev->mcfg[MRAM_TXB].off);
} else {
/* TX FIFO is used for newer IP Core versions */
- m_can_write(priv, M_CAN_TXBC,
- (priv->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) |
- (priv->mcfg[MRAM_TXB].off));
+ m_can_write(cdev, M_CAN_TXBC,
+ (cdev->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) |
+ (cdev->mcfg[MRAM_TXB].off));
}
/* support 64 bytes payload */
- m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES);
+ m_can_write(cdev, M_CAN_TXESC, TXESC_TBDS_64BYTES);
/* TX Event FIFO */
- if (priv->version == 30) {
- m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) |
- priv->mcfg[MRAM_TXE].off);
+ if (cdev->version == 30) {
+ m_can_write(cdev, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) |
+ cdev->mcfg[MRAM_TXE].off);
} else {
/* Full TX Event FIFO is used */
- m_can_write(priv, M_CAN_TXEFC,
- ((priv->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT)
+ m_can_write(cdev, M_CAN_TXEFC,
+ ((cdev->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT)
& TXEFC_EFS_MASK) |
- priv->mcfg[MRAM_TXE].off);
+ cdev->mcfg[MRAM_TXE].off);
}
/* rx fifo configuration, blocking mode, fifo size 1 */
- m_can_write(priv, M_CAN_RXF0C,
- (priv->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) |
- priv->mcfg[MRAM_RXF0].off);
+ m_can_write(cdev, M_CAN_RXF0C,
+ (cdev->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) |
+ cdev->mcfg[MRAM_RXF0].off);
- m_can_write(priv, M_CAN_RXF1C,
- (priv->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) |
- priv->mcfg[MRAM_RXF1].off);
+ m_can_write(cdev, M_CAN_RXF1C,
+ (cdev->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) |
+ cdev->mcfg[MRAM_RXF1].off);
- cccr = m_can_read(priv, M_CAN_CCCR);
- test = m_can_read(priv, M_CAN_TEST);
+ cccr = m_can_read(cdev, M_CAN_CCCR);
+ test = m_can_read(cdev, M_CAN_TEST);
test &= ~TEST_LBCK;
- if (priv->version == 30) {
+ if (cdev->version == 30) {
/* Version 3.0.x */
cccr &= ~(CCCR_TEST | CCCR_MON |
(CCCR_CMR_MASK << CCCR_CMR_SHIFT) |
(CCCR_CME_MASK << CCCR_CME_SHIFT));
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_FD)
cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT;
} else {
@@ -1136,64 +1148,68 @@ static void m_can_chip_config(struct net_device *dev)
CCCR_NISO);
/* Only 3.2.x has NISO Bit implemented */
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
cccr |= CCCR_NISO;
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_FD)
cccr |= (CCCR_BRSE | CCCR_FDOE);
}
/* Loopback Mode */
- if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
cccr |= CCCR_TEST | CCCR_MON;
test |= TEST_LBCK;
}
/* Enable Monitoring (all versions) */
- if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
cccr |= CCCR_MON;
/* Write config */
- m_can_write(priv, M_CAN_CCCR, cccr);
- m_can_write(priv, M_CAN_TEST, test);
+ m_can_write(cdev, M_CAN_CCCR, cccr);
+ m_can_write(cdev, M_CAN_TEST, test);
/* Enable interrupts */
- m_can_write(priv, M_CAN_IR, IR_ALL_INT);
- if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
- if (priv->version == 30)
- m_can_write(priv, M_CAN_IE, IR_ALL_INT &
+ m_can_write(cdev, M_CAN_IR, IR_ALL_INT);
+ if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+ if (cdev->version == 30)
+ m_can_write(cdev, M_CAN_IE, IR_ALL_INT &
~(IR_ERR_LEC_30X));
else
- m_can_write(priv, M_CAN_IE, IR_ALL_INT &
+ m_can_write(cdev, M_CAN_IE, IR_ALL_INT &
~(IR_ERR_LEC_31X));
else
- m_can_write(priv, M_CAN_IE, IR_ALL_INT);
+ m_can_write(cdev, M_CAN_IE, IR_ALL_INT);
/* route all interrupts to INT0 */
- m_can_write(priv, M_CAN_ILS, ILS_ALL_INT0);
+ m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0);
/* set bittiming params */
m_can_set_bittiming(dev);
- m_can_config_endisable(priv, false);
+ m_can_config_endisable(cdev, false);
+
+ if (cdev->ops->init)
+ cdev->ops->init(cdev);
}
static void m_can_start(struct net_device *dev)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
/* basic m_can configuration */
m_can_chip_config(dev);
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ cdev->can.state = CAN_STATE_ERROR_ACTIVE;
- m_can_enable_all_interrupts(priv);
+ m_can_enable_all_interrupts(cdev);
}
static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
{
switch (mode) {
case CAN_MODE_START:
+ m_can_clean(dev);
m_can_start(dev);
netif_wake_queue(dev);
break;
@@ -1209,20 +1225,17 @@ static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
* else it returns the release and step coded as:
* return value = 10 * <release> + 1 * <step>
*/
-static int m_can_check_core_release(void __iomem *m_can_base)
+static int m_can_check_core_release(struct m_can_classdev *cdev)
{
u32 crel_reg;
u8 rel;
u8 step;
int res;
- struct m_can_priv temp_priv = {
- .base = m_can_base
- };
/* Read Core Release Version and split into version number
* Example: Version 3.2.1 => rel = 3; step = 2; substep = 1;
*/
- crel_reg = m_can_read(&temp_priv, M_CAN_CREL);
+ crel_reg = m_can_read(cdev, M_CAN_CREL);
rel = (u8)((crel_reg & CREL_REL_MASK) >> CREL_REL_SHIFT);
step = (u8)((crel_reg & CREL_STEP_MASK) >> CREL_STEP_SHIFT);
@@ -1240,152 +1253,142 @@ static int m_can_check_core_release(void __iomem *m_can_base)
/* Selectable Non ISO support only in version 3.2.x
* This function checks if the bit is writable.
*/
-static bool m_can_niso_supported(const struct m_can_priv *priv)
+static bool m_can_niso_supported(struct m_can_classdev *cdev)
{
- u32 cccr_reg, cccr_poll;
- int niso_timeout;
+ u32 cccr_reg, cccr_poll = 0;
+ int niso_timeout = -ETIMEDOUT;
+ int i;
- m_can_config_endisable(priv, true);
- cccr_reg = m_can_read(priv, M_CAN_CCCR);
+ m_can_config_endisable(cdev, true);
+ cccr_reg = m_can_read(cdev, M_CAN_CCCR);
cccr_reg |= CCCR_NISO;
- m_can_write(priv, M_CAN_CCCR, cccr_reg);
+ m_can_write(cdev, M_CAN_CCCR, cccr_reg);
- niso_timeout = readl_poll_timeout((priv->base + M_CAN_CCCR), cccr_poll,
- (cccr_poll == cccr_reg), 0, 10);
+ for (i = 0; i <= 10; i++) {
+ cccr_poll = m_can_read(cdev, M_CAN_CCCR);
+ if (cccr_poll == cccr_reg) {
+ niso_timeout = 0;
+ break;
+ }
+
+ usleep_range(1, 5);
+ }
/* Clear NISO */
cccr_reg &= ~(CCCR_NISO);
- m_can_write(priv, M_CAN_CCCR, cccr_reg);
+ m_can_write(cdev, M_CAN_CCCR, cccr_reg);
- m_can_config_endisable(priv, false);
+ m_can_config_endisable(cdev, false);
/* return false if time out (-ETIMEDOUT), else return true */
return !niso_timeout;
}
-static int m_can_dev_setup(struct platform_device *pdev, struct net_device *dev,
- void __iomem *addr)
+static int m_can_dev_setup(struct m_can_classdev *m_can_dev)
{
- struct m_can_priv *priv;
+ struct net_device *dev = m_can_dev->net;
int m_can_version;
- m_can_version = m_can_check_core_release(addr);
+ m_can_version = m_can_check_core_release(m_can_dev);
/* return if unsupported version */
if (!m_can_version) {
- dev_err(&pdev->dev, "Unsupported version number: %2d",
+ dev_err(m_can_dev->dev, "Unsupported version number: %2d",
m_can_version);
return -EINVAL;
}
- priv = netdev_priv(dev);
- netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT);
+ if (!m_can_dev->is_peripheral)
+ netif_napi_add(dev, &m_can_dev->napi,
+ m_can_poll, M_CAN_NAPI_WEIGHT);
/* Shared properties of all M_CAN versions */
- priv->version = m_can_version;
- priv->dev = dev;
- priv->base = addr;
- priv->can.do_set_mode = m_can_set_mode;
- priv->can.do_get_berr_counter = m_can_get_berr_counter;
+ m_can_dev->version = m_can_version;
+ m_can_dev->can.do_set_mode = m_can_set_mode;
+ m_can_dev->can.do_get_berr_counter = m_can_get_berr_counter;
/* Set M_CAN supported operations */
- priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ m_can_dev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD;
/* Set properties depending on M_CAN version */
- switch (priv->version) {
+ switch (m_can_dev->version) {
case 30:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
- priv->can.bittiming_const = &m_can_bittiming_const_30X;
- priv->can.data_bittiming_const =
- &m_can_data_bittiming_const_30X;
+ m_can_dev->can.bittiming_const = m_can_dev->bit_timing ?
+ m_can_dev->bit_timing : &m_can_bittiming_const_30X;
+
+ m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ?
+ m_can_dev->data_timing :
+ &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
- priv->can.bittiming_const = &m_can_bittiming_const_31X;
- priv->can.data_bittiming_const =
- &m_can_data_bittiming_const_31X;
+ m_can_dev->can.bittiming_const = m_can_dev->bit_timing ?
+ m_can_dev->bit_timing : &m_can_bittiming_const_31X;
+
+ m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ?
+ m_can_dev->data_timing :
+ &m_can_data_bittiming_const_31X;
break;
case 32:
- priv->can.bittiming_const = &m_can_bittiming_const_31X;
- priv->can.data_bittiming_const =
- &m_can_data_bittiming_const_31X;
- priv->can.ctrlmode_supported |= (m_can_niso_supported(priv)
+ m_can_dev->can.bittiming_const = m_can_dev->bit_timing ?
+ m_can_dev->bit_timing : &m_can_bittiming_const_31X;
+
+ m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ?
+ m_can_dev->data_timing :
+ &m_can_data_bittiming_const_31X;
+
+ m_can_dev->can.ctrlmode_supported |=
+ (m_can_niso_supported(m_can_dev)
? CAN_CTRLMODE_FD_NON_ISO
: 0);
break;
default:
- dev_err(&pdev->dev, "Unsupported version number: %2d",
- priv->version);
+ dev_err(m_can_dev->dev, "Unsupported version number: %2d",
+ m_can_dev->version);
return -EINVAL;
}
- return 0;
-}
-
-static int m_can_open(struct net_device *dev)
-{
- struct m_can_priv *priv = netdev_priv(dev);
- int err;
-
- err = m_can_clk_start(priv);
- if (err)
- return err;
-
- /* open the can device */
- err = open_candev(dev);
- if (err) {
- netdev_err(dev, "failed to open can device\n");
- goto exit_disable_clks;
- }
-
- /* register interrupt handler */
- err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
- dev);
- if (err < 0) {
- netdev_err(dev, "failed to request interrupt\n");
- goto exit_irq_fail;
- }
-
- /* start the m_can controller */
- m_can_start(dev);
-
- can_led_event(dev, CAN_LED_EVENT_OPEN);
- napi_enable(&priv->napi);
- netif_start_queue(dev);
+ if (m_can_dev->ops->init)
+ m_can_dev->ops->init(m_can_dev);
return 0;
-
-exit_irq_fail:
- close_candev(dev);
-exit_disable_clks:
- m_can_clk_stop(priv);
- return err;
}
static void m_can_stop(struct net_device *dev)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
/* disable all interrupts */
- m_can_disable_all_interrupts(priv);
+ m_can_disable_all_interrupts(cdev);
/* set the state as STOPPED */
- priv->can.state = CAN_STATE_STOPPED;
+ cdev->can.state = CAN_STATE_STOPPED;
}
static int m_can_close(struct net_device *dev)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
netif_stop_queue(dev);
- napi_disable(&priv->napi);
+
+ if (!cdev->is_peripheral)
+ napi_disable(&cdev->napi);
+
m_can_stop(dev);
- m_can_clk_stop(priv);
+ m_can_clk_stop(cdev);
free_irq(dev->irq, dev);
+
+ if (cdev->is_peripheral) {
+ cdev->tx_skb = NULL;
+ destroy_workqueue(cdev->tx_wq);
+ cdev->tx_wq = NULL;
+ }
+
close_candev(dev);
can_led_event(dev, CAN_LED_EVENT_STOP);
@@ -1394,30 +1397,27 @@ static int m_can_close(struct net_device *dev)
static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
/*get wrap around for loopback skb index */
- unsigned int wrap = priv->can.echo_skb_max;
+ unsigned int wrap = cdev->can.echo_skb_max;
int next_idx;
/* calculate next index */
next_idx = (++putidx >= wrap ? 0 : putidx);
/* check if occupied */
- return !!priv->can.echo_skb[next_idx];
+ return !!cdev->can.echo_skb[next_idx];
}
-static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
{
- struct m_can_priv *priv = netdev_priv(dev);
- struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data;
+ struct net_device *dev = cdev->net;
+ struct sk_buff *skb = cdev->tx_skb;
u32 id, cccr, fdflags;
int i;
int putidx;
- if (can_dropped_invalid_skb(dev, skb))
- return NETDEV_TX_OK;
-
/* Generate ID field for TX buffer Element */
/* Common to all supported M_CAN versions */
if (cf->can_id & CAN_EFF_FLAG) {
@@ -1430,23 +1430,23 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
if (cf->can_id & CAN_RTR_FLAG)
id |= TX_BUF_RTR;
- if (priv->version == 30) {
+ if (cdev->version == 30) {
netif_stop_queue(dev);
/* message ram configuration */
- m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id);
- m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC,
+ m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, id);
+ m_can_fifo_write(cdev, 0, M_CAN_FIFO_DLC,
can_len2dlc(cf->len) << 16);
for (i = 0; i < cf->len; i += 4)
- m_can_fifo_write(priv, 0,
+ m_can_fifo_write(cdev, 0,
M_CAN_FIFO_DATA(i / 4),
*(u32 *)(cf->data + i));
can_put_echo_skb(skb, dev, 0);
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
- cccr = m_can_read(priv, M_CAN_CCCR);
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
+ cccr = m_can_read(cdev, M_CAN_CCCR);
cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT);
if (can_is_canfd_skb(skb)) {
if (cf->flags & CANFD_BRS)
@@ -1458,28 +1458,35 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
} else {
cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT;
}
- m_can_write(priv, M_CAN_CCCR, cccr);
+ m_can_write(cdev, M_CAN_CCCR, cccr);
}
- m_can_write(priv, M_CAN_TXBTIE, 0x1);
- m_can_write(priv, M_CAN_TXBAR, 0x1);
+ m_can_write(cdev, M_CAN_TXBTIE, 0x1);
+ m_can_write(cdev, M_CAN_TXBAR, 0x1);
/* End of xmit function for version 3.0.x */
} else {
/* Transmit routine for version >= v3.1.x */
/* Check if FIFO full */
- if (m_can_tx_fifo_full(priv)) {
+ if (m_can_tx_fifo_full(cdev)) {
/* This shouldn't happen */
netif_stop_queue(dev);
netdev_warn(dev,
"TX queue active although FIFO is full.");
- return NETDEV_TX_BUSY;
+
+ if (cdev->is_peripheral) {
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ } else {
+ return NETDEV_TX_BUSY;
+ }
}
/* get put index for frame */
- putidx = ((m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQPI_MASK)
+ putidx = ((m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQPI_MASK)
>> TXFQS_TFQPI_SHIFT);
/* Write ID Field to FIFO Element */
- m_can_fifo_write(priv, putidx, M_CAN_FIFO_ID, id);
+ m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, id);
/* get CAN FD configuration of frame */
fdflags = 0;
@@ -1494,14 +1501,14 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
* it is used in TX interrupt for
* sending the correct echo frame
*/
- m_can_fifo_write(priv, putidx, M_CAN_FIFO_DLC,
+ m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DLC,
((putidx << TX_BUF_MM_SHIFT) &
TX_BUF_MM_MASK) |
(can_len2dlc(cf->len) << 16) |
fdflags | TX_BUF_EFC);
for (i = 0; i < cf->len; i += 4)
- m_can_fifo_write(priv, putidx, M_CAN_FIFO_DATA(i / 4),
+ m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA(i / 4),
*(u32 *)(cf->data + i));
/* Push loopback echo.
@@ -1510,17 +1517,123 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
can_put_echo_skb(skb, dev, putidx);
/* Enable TX FIFO element to start transfer */
- m_can_write(priv, M_CAN_TXBAR, (1 << putidx));
+ m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
/* stop network queue if fifo full */
- if (m_can_tx_fifo_full(priv) ||
- m_can_next_echo_skb_occupied(dev, putidx))
- netif_stop_queue(dev);
+ if (m_can_tx_fifo_full(cdev) ||
+ m_can_next_echo_skb_occupied(dev, putidx))
+ netif_stop_queue(dev);
}
return NETDEV_TX_OK;
}
+static void m_can_tx_work_queue(struct work_struct *ws)
+{
+ struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev,
+ tx_work);
+
+ m_can_tx_handler(cdev);
+ cdev->tx_skb = NULL;
+}
+
+static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ if (cdev->is_peripheral) {
+ if (cdev->tx_skb) {
+ netdev_err(dev, "hard_xmit called while tx busy\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ if (cdev->can.state == CAN_STATE_BUS_OFF) {
+ m_can_clean(dev);
+ } else {
+ /* Need to stop the queue to avoid numerous requests
+ * from being sent. Suggested improvement is to create
+ * a queueing mechanism that will queue the skbs and
+ * process them in order.
+ */
+ cdev->tx_skb = skb;
+ netif_stop_queue(cdev->net);
+ queue_work(cdev->tx_wq, &cdev->tx_work);
+ }
+ } else {
+ cdev->tx_skb = skb;
+ return m_can_tx_handler(cdev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int m_can_open(struct net_device *dev)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ int err;
+
+ err = m_can_clk_start(cdev);
+ if (err)
+ return err;
+
+ /* open the can device */
+ err = open_candev(dev);
+ if (err) {
+ netdev_err(dev, "failed to open can device\n");
+ goto exit_disable_clks;
+ }
+
+ /* register interrupt handler */
+ if (cdev->is_peripheral) {
+ cdev->tx_skb = NULL;
+ cdev->tx_wq = alloc_workqueue("mcan_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
+ if (!cdev->tx_wq) {
+ err = -ENOMEM;
+ goto out_wq_fail;
+ }
+
+ INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
+
+ err = request_threaded_irq(dev->irq, NULL, m_can_isr,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ dev->name, dev);
+ } else {
+ err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
+ dev);
+ }
+
+ if (err < 0) {
+ netdev_err(dev, "failed to request interrupt\n");
+ goto exit_irq_fail;
+ }
+
+ /* start the m_can controller */
+ m_can_start(dev);
+
+ can_led_event(dev, CAN_LED_EVENT_OPEN);
+
+ if (!cdev->is_peripheral)
+ napi_enable(&cdev->napi);
+
+ netif_start_queue(dev);
+
+ return 0;
+
+exit_irq_fail:
+ if (cdev->is_peripheral)
+ destroy_workqueue(cdev->tx_wq);
+out_wq_fail:
+ close_candev(dev);
+exit_disable_clks:
+ m_can_clk_stop(cdev);
+ return err;
+}
+
static const struct net_device_ops m_can_netdev_ops = {
.ndo_open = m_can_open,
.ndo_stop = m_can_close,
@@ -1536,114 +1649,91 @@ static int register_m_can_dev(struct net_device *dev)
return register_candev(dev);
}
-static void m_can_init_ram(struct m_can_priv *priv)
-{
- int end, i, start;
-
- /* initialize the entire Message RAM in use to avoid possible
- * ECC/parity checksum errors when reading an uninitialized buffer
- */
- start = priv->mcfg[MRAM_SIDF].off;
- end = priv->mcfg[MRAM_TXB].off +
- priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
- for (i = start; i < end; i += 4)
- writel(0x0, priv->mram_base + i);
-}
-
-static void m_can_of_parse_mram(struct m_can_priv *priv,
+static void m_can_of_parse_mram(struct m_can_classdev *cdev,
const u32 *mram_config_vals)
{
- priv->mcfg[MRAM_SIDF].off = mram_config_vals[0];
- priv->mcfg[MRAM_SIDF].num = mram_config_vals[1];
- priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off +
- priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE;
- priv->mcfg[MRAM_XIDF].num = mram_config_vals[2];
- priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off +
- priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE;
- priv->mcfg[MRAM_RXF0].num = mram_config_vals[3] &
+ cdev->mcfg[MRAM_SIDF].off = mram_config_vals[0];
+ cdev->mcfg[MRAM_SIDF].num = mram_config_vals[1];
+ cdev->mcfg[MRAM_XIDF].off = cdev->mcfg[MRAM_SIDF].off +
+ cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE;
+ cdev->mcfg[MRAM_XIDF].num = mram_config_vals[2];
+ cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off +
+ cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE;
+ cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] &
(RXFC_FS_MASK >> RXFC_FS_SHIFT);
- priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off +
- priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE;
- priv->mcfg[MRAM_RXF1].num = mram_config_vals[4] &
+ cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off +
+ cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE;
+ cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] &
(RXFC_FS_MASK >> RXFC_FS_SHIFT);
- priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off +
- priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE;
- priv->mcfg[MRAM_RXB].num = mram_config_vals[5];
- priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off +
- priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE;
- priv->mcfg[MRAM_TXE].num = mram_config_vals[6];
- priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off +
- priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE;
- priv->mcfg[MRAM_TXB].num = mram_config_vals[7] &
+ cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off +
+ cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE;
+ cdev->mcfg[MRAM_RXB].num = mram_config_vals[5];
+ cdev->mcfg[MRAM_TXE].off = cdev->mcfg[MRAM_RXB].off +
+ cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE;
+ cdev->mcfg[MRAM_TXE].num = mram_config_vals[6];
+ cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off +
+ cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE;
+ cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] &
(TXBC_NDTB_MASK >> TXBC_NDTB_SHIFT);
- dev_dbg(priv->device,
- "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
- priv->mram_base,
- priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num,
- priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num,
- priv->mcfg[MRAM_RXF0].off, priv->mcfg[MRAM_RXF0].num,
- priv->mcfg[MRAM_RXF1].off, priv->mcfg[MRAM_RXF1].num,
- priv->mcfg[MRAM_RXB].off, priv->mcfg[MRAM_RXB].num,
- priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num,
- priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num);
-
- m_can_init_ram(priv);
+ dev_dbg(cdev->dev,
+ "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
+ cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num,
+ cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num,
+ cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num,
+ cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num,
+ cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num,
+ cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num,
+ cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num);
}
-static int m_can_plat_probe(struct platform_device *pdev)
+void m_can_init_ram(struct m_can_classdev *cdev)
{
- struct net_device *dev;
- struct m_can_priv *priv;
- struct resource *res;
- void __iomem *addr;
- void __iomem *mram_addr;
- struct clk *hclk, *cclk;
- int irq, ret;
- struct device_node *np;
- u32 mram_config_vals[MRAM_CFG_LEN];
- u32 tx_fifo_size;
-
- np = pdev->dev.of_node;
+ int end, i, start;
- hclk = devm_clk_get(&pdev->dev, "hclk");
- cclk = devm_clk_get(&pdev->dev, "cclk");
+ /* initialize the entire Message RAM in use to avoid possible
+ * ECC/parity checksum errors when reading an uninitialized buffer
+ */
+ start = cdev->mcfg[MRAM_SIDF].off;
+ end = cdev->mcfg[MRAM_TXB].off +
+ cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
- if (IS_ERR(hclk) || IS_ERR(cclk)) {
- dev_err(&pdev->dev, "no clock found\n");
- ret = -ENODEV;
- goto failed_ret;
- }
+ for (i = start; i < end; i += 4)
+ m_can_fifo_write_no_off(cdev, i, 0x0);
+}
+EXPORT_SYMBOL_GPL(m_can_init_ram);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
- addr = devm_ioremap_resource(&pdev->dev, res);
- irq = platform_get_irq_byname(pdev, "int0");
+int m_can_class_get_clocks(struct m_can_classdev *m_can_dev)
+{
+ int ret = 0;
- if (IS_ERR(addr) || irq < 0) {
- ret = -EINVAL;
- goto failed_ret;
- }
+ m_can_dev->hclk = devm_clk_get(m_can_dev->dev, "hclk");
+ m_can_dev->cclk = devm_clk_get(m_can_dev->dev, "cclk");
- /* message ram could be shared */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
- if (!res) {
+ if (IS_ERR(m_can_dev->cclk)) {
+ dev_err(m_can_dev->dev, "no clock found\n");
ret = -ENODEV;
- goto failed_ret;
}
- mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!mram_addr) {
- ret = -ENOMEM;
- goto failed_ret;
- }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(m_can_class_get_clocks);
- /* get message ram configuration */
- ret = of_property_read_u32_array(np, "bosch,mram-cfg",
- mram_config_vals,
- sizeof(mram_config_vals) / 4);
+struct m_can_classdev *m_can_class_allocate_dev(struct device *dev)
+{
+ struct m_can_classdev *class_dev = NULL;
+ u32 mram_config_vals[MRAM_CFG_LEN];
+ struct net_device *net_dev;
+ u32 tx_fifo_size;
+ int ret;
+
+ ret = fwnode_property_read_u32_array(dev_fwnode(dev),
+ "bosch,mram-cfg",
+ mram_config_vals,
+ sizeof(mram_config_vals) / 4);
if (ret) {
- dev_err(&pdev->dev, "Could not get Message RAM configuration.");
- goto failed_ret;
+ dev_err(dev, "Could not get Message RAM configuration.");
+ goto out;
}
/* Get TX FIFO size
@@ -1652,101 +1742,110 @@ static int m_can_plat_probe(struct platform_device *pdev)
tx_fifo_size = mram_config_vals[7];
/* allocate the m_can device */
- dev = alloc_candev(sizeof(*priv), tx_fifo_size);
- if (!dev) {
- ret = -ENOMEM;
- goto failed_ret;
+ net_dev = alloc_candev(sizeof(*class_dev), tx_fifo_size);
+ if (!net_dev) {
+ dev_err(dev, "Failed to allocate CAN device");
+ goto out;
}
- priv = netdev_priv(dev);
- dev->irq = irq;
- priv->device = &pdev->dev;
- priv->hclk = hclk;
- priv->cclk = cclk;
- priv->can.clock.freq = clk_get_rate(cclk);
- priv->mram_base = mram_addr;
+ class_dev = netdev_priv(net_dev);
+ if (!class_dev) {
+ dev_err(dev, "Failed to init netdev cdevate");
+ goto out;
+ }
- platform_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
+ class_dev->net = net_dev;
+ class_dev->dev = dev;
+ SET_NETDEV_DEV(net_dev, dev);
- /* Enable clocks. Necessary to read Core Release in order to determine
- * M_CAN version
- */
- pm_runtime_enable(&pdev->dev);
- ret = m_can_clk_start(priv);
- if (ret)
- goto pm_runtime_fail;
+ m_can_of_parse_mram(class_dev, mram_config_vals);
+out:
+ return class_dev;
+}
+EXPORT_SYMBOL_GPL(m_can_class_allocate_dev);
+
+int m_can_class_register(struct m_can_classdev *m_can_dev)
+{
+ int ret;
- ret = m_can_dev_setup(pdev, dev, addr);
+ if (m_can_dev->pm_clock_support) {
+ pm_runtime_enable(m_can_dev->dev);
+ ret = m_can_clk_start(m_can_dev);
+ if (ret)
+ goto pm_runtime_fail;
+ }
+
+ ret = m_can_dev_setup(m_can_dev);
if (ret)
goto clk_disable;
- ret = register_m_can_dev(dev);
+ ret = register_m_can_dev(m_can_dev->net);
if (ret) {
- dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
- KBUILD_MODNAME, ret);
+ dev_err(m_can_dev->dev, "registering %s failed (err=%d)\n",
+ m_can_dev->net->name, ret);
goto clk_disable;
}
- m_can_of_parse_mram(priv, mram_config_vals);
-
- devm_can_led_init(dev);
+ devm_can_led_init(m_can_dev->net);
- of_can_transceiver(dev);
+ of_can_transceiver(m_can_dev->net);
- dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n",
- KBUILD_MODNAME, dev->irq, priv->version);
+ dev_info(m_can_dev->dev, "%s device registered (irq=%d, version=%d)\n",
+ KBUILD_MODNAME, m_can_dev->net->irq, m_can_dev->version);
/* Probe finished
* Stop clocks. They will be reactivated once the M_CAN device is opened
*/
clk_disable:
- m_can_clk_stop(priv);
+ m_can_clk_stop(m_can_dev);
pm_runtime_fail:
if (ret) {
- pm_runtime_disable(&pdev->dev);
- free_candev(dev);
+ if (m_can_dev->pm_clock_support)
+ pm_runtime_disable(m_can_dev->dev);
+ free_candev(m_can_dev->net);
}
-failed_ret:
+
return ret;
}
+EXPORT_SYMBOL_GPL(m_can_class_register);
-static __maybe_unused int m_can_suspend(struct device *dev)
+int m_can_class_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
- struct m_can_priv *priv = netdev_priv(ndev);
+ struct m_can_classdev *cdev = netdev_priv(ndev);
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
m_can_stop(ndev);
- m_can_clk_stop(priv);
+ m_can_clk_stop(cdev);
}
pinctrl_pm_select_sleep_state(dev);
- priv->can.state = CAN_STATE_SLEEPING;
+ cdev->can.state = CAN_STATE_SLEEPING;
return 0;
}
+EXPORT_SYMBOL_GPL(m_can_class_suspend);
-static __maybe_unused int m_can_resume(struct device *dev)
+int m_can_class_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
- struct m_can_priv *priv = netdev_priv(ndev);
+ struct m_can_classdev *cdev = netdev_priv(ndev);
pinctrl_pm_select_default_state(dev);
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ cdev->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) {
int ret;
- ret = m_can_clk_start(priv);
+ ret = m_can_clk_start(cdev);
if (ret)
return ret;
- m_can_init_ram(priv);
+ m_can_init_ram(cdev);
m_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);
@@ -1754,79 +1853,19 @@ static __maybe_unused int m_can_resume(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(m_can_class_resume);
-static void unregister_m_can_dev(struct net_device *dev)
+void m_can_class_unregister(struct m_can_classdev *m_can_dev)
{
- unregister_candev(dev);
-}
+ unregister_candev(m_can_dev->net);
-static int m_can_plat_remove(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
+ m_can_clk_stop(m_can_dev);
- unregister_m_can_dev(dev);
-
- pm_runtime_disable(&pdev->dev);
-
- platform_set_drvdata(pdev, NULL);
-
- free_candev(dev);
-
- return 0;
-}
-
-static int __maybe_unused m_can_runtime_suspend(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct m_can_priv *priv = netdev_priv(ndev);
-
- clk_disable_unprepare(priv->cclk);
- clk_disable_unprepare(priv->hclk);
-
- return 0;
-}
-
-static int __maybe_unused m_can_runtime_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct m_can_priv *priv = netdev_priv(ndev);
- int err;
-
- err = clk_prepare_enable(priv->hclk);
- if (err)
- return err;
-
- err = clk_prepare_enable(priv->cclk);
- if (err)
- clk_disable_unprepare(priv->hclk);
-
- return err;
+ free_candev(m_can_dev->net);
}
-
-static const struct dev_pm_ops m_can_pmops = {
- SET_RUNTIME_PM_OPS(m_can_runtime_suspend,
- m_can_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume)
-};
-
-static const struct of_device_id m_can_of_table[] = {
- { .compatible = "bosch,m_can", .data = NULL },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, m_can_of_table);
-
-static struct platform_driver m_can_plat_driver = {
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = m_can_of_table,
- .pm = &m_can_pmops,
- },
- .probe = m_can_plat_probe,
- .remove = m_can_plat_remove,
-};
-
-module_platform_driver(m_can_plat_driver);
+EXPORT_SYMBOL_GPL(m_can_class_unregister);
MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
new file mode 100644
index 000000000000..49f42b50627a
--- /dev/null
+++ b/drivers/net/can/m_can/m_can.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* CAN bus driver for Bosch M_CAN controller
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef _CAN_M_CAN_H_
+#define _CAN_M_CAN_H_
+
+#include <linux/can/core.h>
+#include <linux/can/led.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/freezer.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/iopoll.h>
+#include <linux/can/dev.h>
+#include <linux/pinctrl/consumer.h>
+
+/* m_can lec values */
+enum m_can_lec_type {
+ LEC_NO_ERROR = 0,
+ LEC_STUFF_ERROR,
+ LEC_FORM_ERROR,
+ LEC_ACK_ERROR,
+ LEC_BIT1_ERROR,
+ LEC_BIT0_ERROR,
+ LEC_CRC_ERROR,
+ LEC_UNUSED,
+};
+
+enum m_can_mram_cfg {
+ MRAM_SIDF = 0,
+ MRAM_XIDF,
+ MRAM_RXF0,
+ MRAM_RXF1,
+ MRAM_RXB,
+ MRAM_TXE,
+ MRAM_TXB,
+ MRAM_CFG_NUM,
+};
+
+/* address offset and element number for each FIFO/Buffer in the Message RAM */
+struct mram_cfg {
+ u16 off;
+ u8 num;
+};
+
+struct m_can_classdev;
+struct m_can_ops {
+ /* Device specific call backs */
+ int (*clear_interrupts)(struct m_can_classdev *cdev);
+ u32 (*read_reg)(struct m_can_classdev *cdev, int reg);
+ int (*write_reg)(struct m_can_classdev *cdev, int reg, int val);
+ u32 (*read_fifo)(struct m_can_classdev *cdev, int addr_offset);
+ int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset,
+ int val);
+ int (*init)(struct m_can_classdev *cdev);
+};
+
+struct m_can_classdev {
+ struct can_priv can;
+ struct napi_struct napi;
+ struct net_device *net;
+ struct device *dev;
+ struct clk *hclk;
+ struct clk *cclk;
+
+ struct workqueue_struct *tx_wq;
+ struct work_struct tx_work;
+ struct sk_buff *tx_skb;
+
+ struct can_bittiming_const *bit_timing;
+ struct can_bittiming_const *data_timing;
+
+ struct m_can_ops *ops;
+
+ void *device_data;
+
+ int version;
+ int freq;
+ u32 irqstatus;
+
+ int pm_clock_support;
+ int is_peripheral;
+
+ struct mram_cfg mcfg[MRAM_CFG_NUM];
+};
+
+struct m_can_classdev *m_can_class_allocate_dev(struct device *dev);
+int m_can_class_register(struct m_can_classdev *cdev);
+void m_can_class_unregister(struct m_can_classdev *cdev);
+int m_can_class_get_clocks(struct m_can_classdev *cdev);
+void m_can_init_ram(struct m_can_classdev *priv);
+void m_can_config_endisable(struct m_can_classdev *priv, bool enable);
+
+int m_can_class_suspend(struct device *dev);
+int m_can_class_resume(struct device *dev);
+#endif /* _CAN_M_H_ */
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
new file mode 100644
index 000000000000..6ac4c35f247a
--- /dev/null
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+// IOMapped CAN bus driver for Bosch M_CAN controller
+// Copyright (C) 2014 Freescale Semiconductor, Inc.
+// Dong Aisheng <b29396@freescale.com>
+//
+// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
+
+#include <linux/platform_device.h>
+
+#include "m_can.h"
+
+struct m_can_plat_priv {
+ void __iomem *base;
+ void __iomem *mram_base;
+};
+
+static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
+{
+ struct m_can_plat_priv *priv = cdev->device_data;
+
+ return readl(priv->base + reg);
+}
+
+static u32 iomap_read_fifo(struct m_can_classdev *cdev, int offset)
+{
+ struct m_can_plat_priv *priv = cdev->device_data;
+
+ return readl(priv->mram_base + offset);
+}
+
+static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val)
+{
+ struct m_can_plat_priv *priv = cdev->device_data;
+
+ writel(val, priv->base + reg);
+
+ return 0;
+}
+
+static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, int val)
+{
+ struct m_can_plat_priv *priv = cdev->device_data;
+
+ writel(val, priv->mram_base + offset);
+
+ return 0;
+}
+
+static struct m_can_ops m_can_plat_ops = {
+ .read_reg = iomap_read_reg,
+ .write_reg = iomap_write_reg,
+ .write_fifo = iomap_write_fifo,
+ .read_fifo = iomap_read_fifo,
+};
+
+static int m_can_plat_probe(struct platform_device *pdev)
+{
+ struct m_can_classdev *mcan_class;
+ struct m_can_plat_priv *priv;
+ struct resource *res;
+ void __iomem *addr;
+ void __iomem *mram_addr;
+ int irq, ret = 0;
+
+ mcan_class = m_can_class_allocate_dev(&pdev->dev);
+ if (!mcan_class)
+ return -ENOMEM;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mcan_class->device_data = priv;
+
+ m_can_class_get_clocks(mcan_class);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ irq = platform_get_irq_byname(pdev, "int0");
+ if (IS_ERR(addr) || irq < 0) {
+ ret = -EINVAL;
+ goto failed_ret;
+ }
+
+ /* message ram could be shared */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
+ if (!res) {
+ ret = -ENODEV;
+ goto failed_ret;
+ }
+
+ mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!mram_addr) {
+ ret = -ENOMEM;
+ goto failed_ret;
+ }
+
+ priv->base = addr;
+ priv->mram_base = mram_addr;
+
+ mcan_class->net->irq = irq;
+ mcan_class->pm_clock_support = 1;
+ mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk);
+ mcan_class->dev = &pdev->dev;
+
+ mcan_class->ops = &m_can_plat_ops;
+
+ mcan_class->is_peripheral = false;
+
+ platform_set_drvdata(pdev, mcan_class->dev);
+
+ m_can_init_ram(mcan_class);
+
+ ret = m_can_class_register(mcan_class);
+
+failed_ret:
+ return ret;
+}
+
+static __maybe_unused int m_can_suspend(struct device *dev)
+{
+ return m_can_class_suspend(dev);
+}
+
+static __maybe_unused int m_can_resume(struct device *dev)
+{
+ return m_can_class_resume(dev);
+}
+
+static int m_can_plat_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct m_can_classdev *mcan_class = netdev_priv(dev);
+
+ m_can_class_unregister(mcan_class);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int __maybe_unused m_can_runtime_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct m_can_classdev *mcan_class = netdev_priv(ndev);
+
+ m_can_class_suspend(dev);
+
+ clk_disable_unprepare(mcan_class->cclk);
+ clk_disable_unprepare(mcan_class->hclk);
+
+ return 0;
+}
+
+static int __maybe_unused m_can_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct m_can_classdev *mcan_class = netdev_priv(ndev);
+ int err;
+
+ err = clk_prepare_enable(mcan_class->hclk);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(mcan_class->cclk);
+ if (err)
+ clk_disable_unprepare(mcan_class->hclk);
+
+ m_can_class_resume(dev);
+
+ return err;
+}
+
+static const struct dev_pm_ops m_can_pmops = {
+ SET_RUNTIME_PM_OPS(m_can_runtime_suspend,
+ m_can_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume)
+};
+
+static const struct of_device_id m_can_of_table[] = {
+ { .compatible = "bosch,m_can", .data = NULL },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, m_can_of_table);
+
+static struct platform_driver m_can_plat_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = m_can_of_table,
+ .pm = &m_can_pmops,
+ },
+ .probe = m_can_plat_probe,
+ .remove = m_can_plat_remove,
+};
+
+module_platform_driver(m_can_plat_driver);
+
+MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("M_CAN driver for IO Mapped Bosch controllers");
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
new file mode 100644
index 000000000000..a697996d81b4
--- /dev/null
+++ b/drivers/net/can/m_can/tcan4x5x.c
@@ -0,0 +1,527 @@
+// SPDX-License-Identifier: GPL-2.0
+// SPI to CAN driver for the Texas Instruments TCAN4x5x
+// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
+
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
+
+#include "m_can.h"
+
+#define DEVICE_NAME "tcan4x5x"
+#define TCAN4X5X_EXT_CLK_DEF 40000000
+
+#define TCAN4X5X_DEV_ID0 0x00
+#define TCAN4X5X_DEV_ID1 0x04
+#define TCAN4X5X_REV 0x08
+#define TCAN4X5X_STATUS 0x0C
+#define TCAN4X5X_ERROR_STATUS 0x10
+#define TCAN4X5X_CONTROL 0x14
+
+#define TCAN4X5X_CONFIG 0x800
+#define TCAN4X5X_TS_PRESCALE 0x804
+#define TCAN4X5X_TEST_REG 0x808
+#define TCAN4X5X_INT_FLAGS 0x820
+#define TCAN4X5X_MCAN_INT_REG 0x824
+#define TCAN4X5X_INT_EN 0x830
+
+/* Interrupt bits */
+#define TCAN4X5X_CANBUSTERMOPEN_INT_EN BIT(30)
+#define TCAN4X5X_CANHCANL_INT_EN BIT(29)
+#define TCAN4X5X_CANHBAT_INT_EN BIT(28)
+#define TCAN4X5X_CANLGND_INT_EN BIT(27)
+#define TCAN4X5X_CANBUSOPEN_INT_EN BIT(26)
+#define TCAN4X5X_CANBUSGND_INT_EN BIT(25)
+#define TCAN4X5X_CANBUSBAT_INT_EN BIT(24)
+#define TCAN4X5X_UVSUP_INT_EN BIT(22)
+#define TCAN4X5X_UVIO_INT_EN BIT(21)
+#define TCAN4X5X_TSD_INT_EN BIT(19)
+#define TCAN4X5X_ECCERR_INT_EN BIT(16)
+#define TCAN4X5X_CANINT_INT_EN BIT(15)
+#define TCAN4X5X_LWU_INT_EN BIT(14)
+#define TCAN4X5X_CANSLNT_INT_EN BIT(10)
+#define TCAN4X5X_CANDOM_INT_EN BIT(8)
+#define TCAN4X5X_CANBUS_ERR_INT_EN BIT(5)
+#define TCAN4X5X_BUS_FAULT BIT(4)
+#define TCAN4X5X_MCAN_INT BIT(1)
+#define TCAN4X5X_ENABLE_TCAN_INT \
+ (TCAN4X5X_MCAN_INT | TCAN4X5X_BUS_FAULT | \
+ TCAN4X5X_CANBUS_ERR_INT_EN | TCAN4X5X_CANINT_INT_EN)
+
+/* MCAN Interrupt bits */
+#define TCAN4X5X_MCAN_IR_ARA BIT(29)
+#define TCAN4X5X_MCAN_IR_PED BIT(28)
+#define TCAN4X5X_MCAN_IR_PEA BIT(27)
+#define TCAN4X5X_MCAN_IR_WD BIT(26)
+#define TCAN4X5X_MCAN_IR_BO BIT(25)
+#define TCAN4X5X_MCAN_IR_EW BIT(24)
+#define TCAN4X5X_MCAN_IR_EP BIT(23)
+#define TCAN4X5X_MCAN_IR_ELO BIT(22)
+#define TCAN4X5X_MCAN_IR_BEU BIT(21)
+#define TCAN4X5X_MCAN_IR_BEC BIT(20)
+#define TCAN4X5X_MCAN_IR_DRX BIT(19)
+#define TCAN4X5X_MCAN_IR_TOO BIT(18)
+#define TCAN4X5X_MCAN_IR_MRAF BIT(17)
+#define TCAN4X5X_MCAN_IR_TSW BIT(16)
+#define TCAN4X5X_MCAN_IR_TEFL BIT(15)
+#define TCAN4X5X_MCAN_IR_TEFF BIT(14)
+#define TCAN4X5X_MCAN_IR_TEFW BIT(13)
+#define TCAN4X5X_MCAN_IR_TEFN BIT(12)
+#define TCAN4X5X_MCAN_IR_TFE BIT(11)
+#define TCAN4X5X_MCAN_IR_TCF BIT(10)
+#define TCAN4X5X_MCAN_IR_TC BIT(9)
+#define TCAN4X5X_MCAN_IR_HPM BIT(8)
+#define TCAN4X5X_MCAN_IR_RF1L BIT(7)
+#define TCAN4X5X_MCAN_IR_RF1F BIT(6)
+#define TCAN4X5X_MCAN_IR_RF1W BIT(5)
+#define TCAN4X5X_MCAN_IR_RF1N BIT(4)
+#define TCAN4X5X_MCAN_IR_RF0L BIT(3)
+#define TCAN4X5X_MCAN_IR_RF0F BIT(2)
+#define TCAN4X5X_MCAN_IR_RF0W BIT(1)
+#define TCAN4X5X_MCAN_IR_RF0N BIT(0)
+#define TCAN4X5X_ENABLE_MCAN_INT \
+ (TCAN4X5X_MCAN_IR_TC | TCAN4X5X_MCAN_IR_RF0N | \
+ TCAN4X5X_MCAN_IR_RF1N | TCAN4X5X_MCAN_IR_RF0F | \
+ TCAN4X5X_MCAN_IR_RF1F)
+
+#define TCAN4X5X_MRAM_START 0x8000
+#define TCAN4X5X_MCAN_OFFSET 0x1000
+#define TCAN4X5X_MAX_REGISTER 0x8fff
+
+#define TCAN4X5X_CLEAR_ALL_INT 0xffffffff
+#define TCAN4X5X_SET_ALL_INT 0xffffffff
+
+#define TCAN4X5X_WRITE_CMD (0x61 << 24)
+#define TCAN4X5X_READ_CMD (0x41 << 24)
+
+#define TCAN4X5X_MODE_SEL_MASK (BIT(7) | BIT(6))
+#define TCAN4X5X_MODE_SLEEP 0x00
+#define TCAN4X5X_MODE_STANDBY BIT(6)
+#define TCAN4X5X_MODE_NORMAL BIT(7)
+
+#define TCAN4X5X_SW_RESET BIT(2)
+
+#define TCAN4X5X_MCAN_CONFIGURED BIT(5)
+#define TCAN4X5X_WATCHDOG_EN BIT(3)
+#define TCAN4X5X_WD_60_MS_TIMER 0
+#define TCAN4X5X_WD_600_MS_TIMER BIT(28)
+#define TCAN4X5X_WD_3_S_TIMER BIT(29)
+#define TCAN4X5X_WD_6_S_TIMER (BIT(28) | BIT(29))
+
+struct tcan4x5x_priv {
+ struct regmap *regmap;
+ struct spi_device *spi;
+
+ struct m_can_classdev *mcan_dev;
+
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *interrupt_gpio;
+ struct gpio_desc *device_wake_gpio;
+ struct gpio_desc *device_state_gpio;
+ struct regulator *power;
+
+ /* Register based ip */
+ int mram_start;
+ int reg_offset;
+};
+
+static struct can_bittiming_const tcan4x5x_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 31,
+ .tseg2_min = 2,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 32,
+ .brp_inc = 1,
+};
+
+static struct can_bittiming_const tcan4x5x_data_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 32,
+ .brp_inc = 1,
+};
+
+static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
+{
+ int wake_state = 0;
+
+ if (priv->device_state_gpio)
+ wake_state = gpiod_get_value(priv->device_state_gpio);
+
+ if (priv->device_wake_gpio && wake_state) {
+ gpiod_set_value(priv->device_wake_gpio, 0);
+ usleep_range(5, 50);
+ gpiod_set_value(priv->device_wake_gpio, 1);
+ }
+}
+
+static int regmap_spi_gather_write(void *context, const void *reg,
+ size_t reg_len, const void *val,
+ size_t val_len)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_message m;
+ u32 addr;
+ struct spi_transfer t[2] = {
+ { .tx_buf = &addr, .len = reg_len, .cs_change = 0,},
+ { .tx_buf = val, .len = val_len, },
+ };
+
+ addr = TCAN4X5X_WRITE_CMD | (*((u16 *)reg) << 8) | val_len >> 2;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ return spi_sync(spi, &m);
+}
+
+static int tcan4x5x_regmap_write(void *context, const void *data, size_t count)
+{
+ u16 *reg = (u16 *)(data);
+ const u32 *val = data + 4;
+
+ return regmap_spi_gather_write(context, reg, 4, val, count - 4);
+}
+
+static int regmap_spi_async_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len,
+ struct regmap_async *a)
+{
+ return -ENOTSUPP;
+}
+
+static struct regmap_async *regmap_spi_async_alloc(void)
+{
+ return NULL;
+}
+
+static int tcan4x5x_regmap_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ u32 addr = TCAN4X5X_READ_CMD | (*((u16 *)reg) << 8) | val_size >> 2;
+
+ return spi_write_then_read(spi, &addr, reg_size, (u32 *)val, val_size);
+}
+
+static struct regmap_bus tcan4x5x_bus = {
+ .write = tcan4x5x_regmap_write,
+ .gather_write = regmap_spi_gather_write,
+ .async_write = regmap_spi_async_write,
+ .async_alloc = regmap_spi_async_alloc,
+ .read = tcan4x5x_regmap_read,
+ .read_flag_mask = 0x00,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static u32 tcan4x5x_read_reg(struct m_can_classdev *cdev, int reg)
+{
+ struct tcan4x5x_priv *priv = cdev->device_data;
+ u32 val;
+
+ tcan4x5x_check_wake(priv);
+
+ regmap_read(priv->regmap, priv->reg_offset + reg, &val);
+
+ return val;
+}
+
+static u32 tcan4x5x_read_fifo(struct m_can_classdev *cdev, int addr_offset)
+{
+ struct tcan4x5x_priv *priv = cdev->device_data;
+ u32 val;
+
+ tcan4x5x_check_wake(priv);
+
+ regmap_read(priv->regmap, priv->mram_start + addr_offset, &val);
+
+ return val;
+}
+
+static int tcan4x5x_write_reg(struct m_can_classdev *cdev, int reg, int val)
+{
+ struct tcan4x5x_priv *priv = cdev->device_data;
+
+ tcan4x5x_check_wake(priv);
+
+ return regmap_write(priv->regmap, priv->reg_offset + reg, val);
+}
+
+static int tcan4x5x_write_fifo(struct m_can_classdev *cdev,
+ int addr_offset, int val)
+{
+ struct tcan4x5x_priv *priv = cdev->device_data;
+
+ tcan4x5x_check_wake(priv);
+
+ return regmap_write(priv->regmap, priv->mram_start + addr_offset, val);
+}
+
+static int tcan4x5x_power_enable(struct regulator *reg, int enable)
+{
+ if (IS_ERR_OR_NULL(reg))
+ return 0;
+
+ if (enable)
+ return regulator_enable(reg);
+ else
+ return regulator_disable(reg);
+}
+
+static int tcan4x5x_write_tcan_reg(struct m_can_classdev *cdev,
+ int reg, int val)
+{
+ struct tcan4x5x_priv *priv = cdev->device_data;
+
+ tcan4x5x_check_wake(priv);
+
+ return regmap_write(priv->regmap, reg, val);
+}
+
+static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+ int ret;
+
+ tcan4x5x_check_wake(tcan4x5x);
+
+ ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_STATUS,
+ TCAN4X5X_CLEAR_ALL_INT);
+ if (ret)
+ return ret;
+
+ ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_MCAN_INT_REG,
+ TCAN4X5X_ENABLE_MCAN_INT);
+ if (ret)
+ return ret;
+
+ ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS,
+ TCAN4X5X_CLEAR_ALL_INT);
+ if (ret)
+ return ret;
+
+ ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS,
+ TCAN4X5X_CLEAR_ALL_INT);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int tcan4x5x_init(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+ int ret;
+
+ tcan4x5x_check_wake(tcan4x5x);
+
+ ret = tcan4x5x_clear_interrupts(cdev);
+ if (ret)
+ return ret;
+
+ ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_EN,
+ TCAN4X5X_ENABLE_TCAN_INT);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL);
+ if (ret)
+ return ret;
+
+ /* Zero out the MCAN buffers */
+ m_can_init_ram(cdev);
+
+ return ret;
+}
+
+static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+
+ tcan4x5x->interrupt_gpio = devm_gpiod_get(cdev->dev, "data-ready",
+ GPIOD_IN);
+ if (IS_ERR(tcan4x5x->interrupt_gpio)) {
+ dev_err(cdev->dev, "data-ready gpio not defined\n");
+ return -EINVAL;
+ }
+
+ tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(tcan4x5x->device_wake_gpio)) {
+ dev_err(cdev->dev, "device-wake gpio not defined\n");
+ return -EINVAL;
+ }
+
+ tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(tcan4x5x->reset_gpio))
+ tcan4x5x->reset_gpio = NULL;
+
+ tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
+ "device-state",
+ GPIOD_IN);
+ if (IS_ERR(tcan4x5x->device_state_gpio))
+ tcan4x5x->device_state_gpio = NULL;
+
+ cdev->net->irq = gpiod_to_irq(tcan4x5x->interrupt_gpio);
+
+ tcan4x5x->power = devm_regulator_get_optional(cdev->dev,
+ "vsup");
+ if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+
+static const struct regmap_config tcan4x5x_regmap = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .cache_type = REGCACHE_NONE,
+ .max_register = TCAN4X5X_MAX_REGISTER,
+};
+
+static struct m_can_ops tcan4x5x_ops = {
+ .init = tcan4x5x_init,
+ .read_reg = tcan4x5x_read_reg,
+ .write_reg = tcan4x5x_write_reg,
+ .write_fifo = tcan4x5x_write_fifo,
+ .read_fifo = tcan4x5x_read_fifo,
+ .clear_interrupts = tcan4x5x_clear_interrupts,
+};
+
+static int tcan4x5x_can_probe(struct spi_device *spi)
+{
+ struct tcan4x5x_priv *priv;
+ struct m_can_classdev *mcan_class;
+ int freq, ret;
+
+ mcan_class = m_can_class_allocate_dev(&spi->dev);
+ if (!mcan_class)
+ return -ENOMEM;
+
+ priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mcan_class->device_data = priv;
+
+ m_can_class_get_clocks(mcan_class);
+ if (IS_ERR(mcan_class->cclk)) {
+ dev_err(&spi->dev, "no CAN clock source defined\n");
+ freq = TCAN4X5X_EXT_CLK_DEF;
+ } else {
+ freq = clk_get_rate(mcan_class->cclk);
+ }
+
+ /* Sanity check */
+ if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF)
+ return -ERANGE;
+
+ priv->reg_offset = TCAN4X5X_MCAN_OFFSET;
+ priv->mram_start = TCAN4X5X_MRAM_START;
+ priv->spi = spi;
+ priv->mcan_dev = mcan_class;
+
+ mcan_class->pm_clock_support = 0;
+ mcan_class->can.clock.freq = freq;
+ mcan_class->dev = &spi->dev;
+ mcan_class->ops = &tcan4x5x_ops;
+ mcan_class->is_peripheral = true;
+ mcan_class->bit_timing = &tcan4x5x_bittiming_const;
+ mcan_class->data_timing = &tcan4x5x_data_bittiming_const;
+
+ spi_set_drvdata(spi, priv);
+
+ ret = tcan4x5x_parse_config(mcan_class);
+ if (ret)
+ goto out_clk;
+
+ /* Configure the SPI bus */
+ spi->bits_per_word = 32;
+ ret = spi_setup(spi);
+ if (ret)
+ goto out_clk;
+
+ priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
+ &spi->dev, &tcan4x5x_regmap);
+
+ tcan4x5x_power_enable(priv->power, 1);
+
+ ret = m_can_class_register(mcan_class);
+ if (ret)
+ goto out_power;
+
+ netdev_info(mcan_class->net, "TCAN4X5X successfully initialized.\n");
+ return 0;
+
+out_power:
+ tcan4x5x_power_enable(priv->power, 0);
+out_clk:
+ if (!IS_ERR(mcan_class->cclk)) {
+ clk_disable_unprepare(mcan_class->cclk);
+ clk_disable_unprepare(mcan_class->hclk);
+ }
+
+ dev_err(&spi->dev, "Probe failed, err=%d\n", ret);
+ return ret;
+}
+
+static int tcan4x5x_can_remove(struct spi_device *spi)
+{
+ struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
+
+ tcan4x5x_power_enable(priv->power, 0);
+
+ m_can_class_unregister(priv->mcan_dev);
+
+ return 0;
+}
+
+static const struct of_device_id tcan4x5x_of_match[] = {
+ { .compatible = "ti,tcan4x5x", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tcan4x5x_of_match);
+
+static const struct spi_device_id tcan4x5x_id_table[] = {
+ {
+ .name = "tcan4x5x",
+ .driver_data = 0,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, tcan4x5x_id_table);
+
+static struct spi_driver tcan4x5x_can_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = tcan4x5x_of_match,
+ .pm = NULL,
+ },
+ .id_table = tcan4x5x_id_table,
+ .probe = tcan4x5x_can_probe,
+ .remove = tcan4x5x_can_remove,
+};
+module_spi_driver(tcan4x5x_can_driver);
+
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments TCAN4x5x CAN driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 13e66297b65f..bf5adea9c0a3 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -15,11 +15,17 @@
#include <linux/can/led.h>
#include <linux/can/dev.h>
#include <linux/clk.h>
-#include <linux/can/platform/rcar_can.h>
#include <linux/of.h>
#define RCAR_CAN_DRV_NAME "rcar_can"
+/* Clock Select Register settings */
+enum CLKR {
+ CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */
+ CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */
+ CLKR_CLKEXT = 3, /* Externally input clock */
+};
+
#define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \
BIT(CLKR_CLKEXT))
@@ -736,7 +742,6 @@ static const char * const clock_names[] = {
static int rcar_can_probe(struct platform_device *pdev)
{
- struct rcar_can_platform_data *pdata;
struct rcar_can_priv *priv;
struct net_device *ndev;
struct resource *mem;
@@ -745,21 +750,11 @@ static int rcar_can_probe(struct platform_device *pdev)
int err = -ENODEV;
int irq;
- if (pdev->dev.of_node) {
- of_property_read_u32(pdev->dev.of_node,
- "renesas,can-clock-select", &clock_select);
- } else {
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "No platform data provided!\n");
- goto fail;
- }
- clock_select = pdata->clock_select;
- }
+ of_property_read_u32(pdev->dev.of_node, "renesas,can-clock-select",
+ &clock_select);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "No IRQ resource\n");
err = irq;
goto fail;
}
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index de34a4b82d4a..edaa1ca972c1 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1652,14 +1652,12 @@ static int rcar_canfd_probe(struct platform_device *pdev)
ch_irq = platform_get_irq(pdev, 0);
if (ch_irq < 0) {
- dev_err(&pdev->dev, "no Channel IRQ resource\n");
err = ch_irq;
goto fail_dev;
}
g_irq = platform_get_irq(pdev, 1);
if (g_irq < 0) {
- dev_err(&pdev->dev, "no Global IRQ resource\n");
err = g_irq;
goto fail_dev;
}
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 6b72da2f18a6..32d242dc0d9f 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -1,26 +1,18 @@
# SPDX-License-Identifier: GPL-2.0-only
+
menuconfig CAN_SJA1000
tristate "Philips/NXP SJA1000 devices"
depends on HAS_IOMEM
if CAN_SJA1000
-config CAN_SJA1000_ISA
- tristate "ISA Bus based legacy SJA1000 driver"
- ---help---
- This driver adds legacy support for SJA1000 chips connected to
- the ISA bus using I/O port, memory mapped or indirect access.
-
-config CAN_SJA1000_PLATFORM
- tristate "Generic Platform Bus based SJA1000 driver"
+config CAN_EMS_PCI
+ tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card"
+ depends on PCI
---help---
- This driver adds support for the SJA1000 chips connected to
- the "platform bus" (Linux abstraction for directly to the
- processor attached devices). Which can be found on various
- boards from Phytec (http://www.phytec.de) like the PCM027,
- PCM038. It also provides the OpenFirmware "platform bus" found
- on embedded systems with OpenFirmware bindings, e.g. if you
- have a PowerPC based system you may want to enable this option.
+ This driver is for the one, two or four channel CPC-PCI,
+ CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
+ (http://www.ems-wuensche.de).
config CAN_EMS_PCMCIA
tristate "EMS CPC-CARD Card"
@@ -29,23 +21,22 @@ config CAN_EMS_PCMCIA
This driver is for the one or two channel CPC-CARD cards from
EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
-config CAN_EMS_PCI
- tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card"
+config CAN_F81601
+ tristate "Fintek F81601 PCIE to 2 CAN Controller"
depends on PCI
- ---help---
- This driver is for the one, two or four channel CPC-PCI,
- CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
- (http://www.ems-wuensche.de).
+ help
+ This driver adds support for Fintek F81601 PCIE to 2 CAN
+ Controller. It had internal 24MHz clock source, but it can
+ be changed by manufacturer. Use modinfo to get usage for
+ parameters. Visit http://www.fintek.com.tw to get more
+ information.
-config CAN_PEAK_PCMCIA
- tristate "PEAK PCAN-PC Card"
- depends on PCMCIA
- depends on HAS_IOPORT_MAP
+config CAN_KVASER_PCI
+ tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
+ depends on PCI
---help---
- This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
- from PEAK-System (http://www.peak-system.com). To compile this
- driver as a module, choose M here: the module will be called
- peak_pcmcia.
+ This driver is for the PCIcanx and PCIcan cards (1, 2 or
+ 4 channel) from Kvaser (http://www.kvaser.com).
config CAN_PEAK_PCI
tristate "PEAK PCAN-PCI/PCIe/miniPCI Cards"
@@ -66,12 +57,15 @@ config CAN_PEAK_PCIEC
Technik. This will also automatically select I2C and I2C_ALGO
configuration options.
-config CAN_KVASER_PCI
- tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
- depends on PCI
+config CAN_PEAK_PCMCIA
+ tristate "PEAK PCAN-PC Card"
+ depends on PCMCIA
+ depends on HAS_IOPORT_MAP
---help---
- This driver is for the PCIcanx and PCIcan cards (1, 2 or
- 4 channel) from Kvaser (http://www.kvaser.com).
+ This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
+ from PEAK-System (http://www.peak-system.com). To compile this
+ driver as a module, choose M here: the module will be called
+ peak_pcmcia.
config CAN_PLX_PCI
tristate "PLX90xx PCI-bridge based Cards"
@@ -91,6 +85,23 @@ config CAN_PLX_PCI
- Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card (http://www.connecttech.com)
- ASEM CAN raw - 2 isolated CAN channels (www.asem.it)
+config CAN_SJA1000_ISA
+ tristate "ISA Bus based legacy SJA1000 driver"
+ ---help---
+ This driver adds legacy support for SJA1000 chips connected to
+ the ISA bus using I/O port, memory mapped or indirect access.
+
+config CAN_SJA1000_PLATFORM
+ tristate "Generic Platform Bus based SJA1000 driver"
+ ---help---
+ This driver adds support for the SJA1000 chips connected to
+ the "platform bus" (Linux abstraction for directly to the
+ processor attached devices). Which can be found on various
+ boards from Phytec (http://www.phytec.de) like the PCM027,
+ PCM038. It also provides the OpenFirmware "platform bus" found
+ on embedded systems with OpenFirmware bindings, e.g. if you
+ have a PowerPC based system you may want to enable this option.
+
config CAN_TSCAN1
tristate "TS-CAN1 PC104 boards"
depends on ISA
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index 9253aaf9e739..500ce1dddaec 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -3,13 +3,14 @@
# Makefile for the SJA1000 CAN controller drivers.
#
-obj-$(CONFIG_CAN_SJA1000) += sja1000.o
-obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o
-obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
-obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
+obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o
+obj-$(CONFIG_CAN_F81601) += f81601.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
-obj-$(CONFIG_CAN_PEAK_PCMCIA) += peak_pcmcia.o
obj-$(CONFIG_CAN_PEAK_PCI) += peak_pci.o
+obj-$(CONFIG_CAN_PEAK_PCMCIA) += peak_pcmcia.o
obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
+obj-$(CONFIG_CAN_SJA1000) += sja1000.o
+obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o
+obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
diff --git a/drivers/net/can/sja1000/f81601.c b/drivers/net/can/sja1000/f81601.c
new file mode 100644
index 000000000000..8f25e95814ef
--- /dev/null
+++ b/drivers/net/can/sja1000/f81601.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Fintek F81601 PCIE to 2 CAN controller driver
+ *
+ * Copyright (C) 2019 Peter Hong <peter_hong@fintek.com.tw>
+ * Copyright (C) 2019 Linux Foundation
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/can/dev.h>
+#include <linux/io.h>
+
+#include "sja1000.h"
+
+#define F81601_PCI_MAX_CHAN 2
+
+#define F81601_DECODE_REG 0x209
+#define F81601_IO_MODE BIT(7)
+#define F81601_MEM_MODE BIT(6)
+#define F81601_CFG_MODE BIT(5)
+#define F81601_CAN2_INTERNAL_CLK BIT(3)
+#define F81601_CAN1_INTERNAL_CLK BIT(2)
+#define F81601_CAN2_EN BIT(1)
+#define F81601_CAN1_EN BIT(0)
+
+#define F81601_TRAP_REG 0x20a
+#define F81601_CAN2_HAS_EN BIT(4)
+
+struct f81601_pci_card {
+ void __iomem *addr;
+ spinlock_t lock; /* use this spin lock only for write access */
+ struct pci_dev *dev;
+ struct net_device *net_dev[F81601_PCI_MAX_CHAN];
+};
+
+static const struct pci_device_id f81601_pci_tbl[] = {
+ { PCI_DEVICE(0x1c29, 0x1703) },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(pci, f81601_pci_tbl);
+
+static bool internal_clk = true;
+module_param(internal_clk, bool, 0444);
+MODULE_PARM_DESC(internal_clk, "Use internal clock, default true (24MHz)");
+
+static unsigned int external_clk;
+module_param(external_clk, uint, 0444);
+MODULE_PARM_DESC(external_clk, "External clock when internal_clk disabled");
+
+static u8 f81601_pci_read_reg(const struct sja1000_priv *priv, int port)
+{
+ return readb(priv->reg_base + port);
+}
+
+static void f81601_pci_write_reg(const struct sja1000_priv *priv, int port,
+ u8 val)
+{
+ struct f81601_pci_card *card = priv->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ writeb(val, priv->reg_base + port);
+ readb(priv->reg_base);
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
+static void f81601_pci_remove(struct pci_dev *pdev)
+{
+ struct f81601_pci_card *card = pci_get_drvdata(pdev);
+ struct net_device *dev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(card->net_dev); i++) {
+ dev = card->net_dev[i];
+ if (!dev)
+ continue;
+
+ dev_info(&pdev->dev, "%s: Removing %s\n", __func__, dev->name);
+
+ unregister_sja1000dev(dev);
+ free_sja1000dev(dev);
+ }
+}
+
+/* Probe F81601 based device for the SJA1000 chips and register each
+ * available CAN channel to SJA1000 Socket-CAN subsystem.
+ */
+static int f81601_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct sja1000_priv *priv;
+ struct net_device *dev;
+ struct f81601_pci_card *card;
+ int err, i, count;
+ u8 tmp;
+
+ if (pcim_enable_device(pdev) < 0) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return -ENODEV;
+ }
+
+ dev_info(&pdev->dev, "Detected card at slot #%i\n",
+ PCI_SLOT(pdev->devfn));
+
+ card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->dev = pdev;
+ spin_lock_init(&card->lock);
+
+ pci_set_drvdata(pdev, card);
+
+ tmp = F81601_IO_MODE | F81601_MEM_MODE | F81601_CFG_MODE |
+ F81601_CAN2_EN | F81601_CAN1_EN;
+
+ if (internal_clk) {
+ tmp |= F81601_CAN2_INTERNAL_CLK | F81601_CAN1_INTERNAL_CLK;
+
+ dev_info(&pdev->dev,
+ "F81601 running with internal clock: 24Mhz\n");
+ } else {
+ dev_info(&pdev->dev,
+ "F81601 running with external clock: %dMhz\n",
+ external_clk / 1000000);
+ }
+
+ pci_write_config_byte(pdev, F81601_DECODE_REG, tmp);
+
+ card->addr = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
+
+ if (!card->addr) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "%s: Failed to remap BAR\n", __func__);
+ goto failure_cleanup;
+ }
+
+ /* read CAN2_HW_EN strap pin to detect how many CANBUS do we have */
+ count = ARRAY_SIZE(card->net_dev);
+ pci_read_config_byte(pdev, F81601_TRAP_REG, &tmp);
+ if (!(tmp & F81601_CAN2_HAS_EN))
+ count = 1;
+
+ for (i = 0; i < count; i++) {
+ dev = alloc_sja1000dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto failure_cleanup;
+ }
+
+ priv = netdev_priv(dev);
+ priv->priv = card;
+ priv->irq_flags = IRQF_SHARED;
+ priv->reg_base = card->addr + 0x80 * i;
+ priv->read_reg = f81601_pci_read_reg;
+ priv->write_reg = f81601_pci_write_reg;
+
+ if (internal_clk)
+ priv->can.clock.freq = 24000000 / 2;
+ else
+ priv->can.clock.freq = external_clk / 2;
+
+ priv->ocr = OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL;
+ priv->cdr = CDR_CBP;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->dev_id = i;
+ dev->irq = pdev->irq;
+
+ /* Register SJA1000 device */
+ err = register_sja1000dev(dev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "%s: Registering device failed: %x\n", __func__,
+ err);
+ free_sja1000dev(dev);
+ goto failure_cleanup;
+ }
+
+ card->net_dev[i] = dev;
+ dev_info(&pdev->dev, "Channel #%d, %s at 0x%p, irq %d\n", i,
+ dev->name, priv->reg_base, dev->irq);
+ }
+
+ return 0;
+
+ failure_cleanup:
+ dev_err(&pdev->dev, "%s: failed: %d. Cleaning Up.\n", __func__, err);
+ f81601_pci_remove(pdev);
+
+ return err;
+}
+
+static struct pci_driver f81601_pci_driver = {
+ .name = "f81601",
+ .id_table = f81601_pci_tbl,
+ .probe = f81601_pci_probe,
+ .remove = f81601_pci_remove,
+};
+
+MODULE_DESCRIPTION("Fintek F81601 PCIE to 2 CANBUS adaptor driver");
+MODULE_AUTHOR("Peter Hong <peter_hong@fintek.com.tw>");
+MODULE_LICENSE("GPL v2");
+
+module_pci_driver(f81601_pci_driver);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 68366d57916c..8c0244f51059 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -417,7 +417,7 @@ static void peak_pciec_write_reg(const struct sja1000_priv *priv,
peak_pci_write_reg(priv, port, val);
}
-static struct i2c_algo_bit_data peak_pciec_i2c_bit_ops = {
+static const struct i2c_algo_bit_data peak_pciec_i2c_bit_ops = {
.setsda = pita_setsda,
.setscl = pita_setscl,
.getsda = pita_getsda,
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 03a711c3221b..73d48c3b8ded 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -21,7 +21,6 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/dma-mapping.h>
#include <linux/freezer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -126,10 +125,6 @@
#define DEVICE_NAME "hi3110"
-static int hi3110_enable_dma = 1; /* Enable SPI DMA. Default: 1 (On) */
-module_param(hi3110_enable_dma, int, 0444);
-MODULE_PARM_DESC(hi3110_enable_dma, "Enable SPI DMA. Default: 1 (On)");
-
static const struct can_bittiming_const hi3110_bittiming_const = {
.name = DEVICE_NAME,
.tseg1_min = 2,
@@ -156,8 +151,6 @@ struct hi3110_priv {
u8 *spi_tx_buf;
u8 *spi_rx_buf;
- dma_addr_t spi_tx_dma;
- dma_addr_t spi_rx_dma;
struct sk_buff *tx_skb;
int tx_len;
@@ -184,8 +177,7 @@ static void hi3110_clean(struct net_device *net)
if (priv->tx_skb || priv->tx_len)
net->stats.tx_errors++;
- if (priv->tx_skb)
- dev_kfree_skb(priv->tx_skb);
+ dev_kfree_skb(priv->tx_skb);
if (priv->tx_len)
can_free_echo_skb(priv->net, 0);
priv->tx_skb = NULL;
@@ -217,13 +209,6 @@ static int hi3110_spi_trans(struct spi_device *spi, int len)
int ret;
spi_message_init(&m);
-
- if (hi3110_enable_dma) {
- t.tx_dma = priv->spi_tx_dma;
- t.rx_dma = priv->spi_rx_dma;
- m.is_dma_mapped = 1;
- }
-
spi_message_add_tail(&t, &m);
ret = spi_sync(spi, &m);
@@ -915,43 +900,18 @@ static int hi3110_can_probe(struct spi_device *spi)
priv->spi = spi;
mutex_init(&priv->hi3110_lock);
- /* If requested, allocate DMA buffers */
- if (hi3110_enable_dma) {
- spi->dev.coherent_dma_mask = ~0;
-
- /* Minimum coherent DMA allocation is PAGE_SIZE, so allocate
- * that much and share it between Tx and Rx DMA buffers.
- */
- priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev,
- PAGE_SIZE,
- &priv->spi_tx_dma,
- GFP_DMA);
-
- if (priv->spi_tx_buf) {
- priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
- priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
- (PAGE_SIZE / 2));
- } else {
- /* Fall back to non-DMA */
- hi3110_enable_dma = 0;
- }
+ priv->spi_tx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN,
+ GFP_KERNEL);
+ if (!priv->spi_tx_buf) {
+ ret = -ENOMEM;
+ goto error_probe;
}
+ priv->spi_rx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN,
+ GFP_KERNEL);
- /* Allocate non-DMA buffers */
- if (!hi3110_enable_dma) {
- priv->spi_tx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN,
- GFP_KERNEL);
- if (!priv->spi_tx_buf) {
- ret = -ENOMEM;
- goto error_probe;
- }
- priv->spi_rx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN,
- GFP_KERNEL);
-
- if (!priv->spi_rx_buf) {
- ret = -ENOMEM;
- goto error_probe;
- }
+ if (!priv->spi_rx_buf) {
+ ret = -ENOMEM;
+ goto error_probe;
}
SET_NETDEV_DEV(net, &spi->dev);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 12358f06d194..58992fd61cb9 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * CAN bus driver for Microchip 251x/25625 CAN Controller with SPI Interface
+/* CAN bus driver for Microchip 251x/25625 CAN Controller with SPI Interface
*
* MCP2510 support and bug fixes by Christian Pellegrin
* <chripell@evolware.org>
@@ -48,7 +47,6 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/dma-mapping.h>
#include <linux/freezer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -75,7 +73,6 @@
#define RTS_TXB2 0x04
#define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07))
-
/* MPC251x registers */
#define CANSTAT 0x0e
#define CANCTRL 0x0f
@@ -191,8 +188,7 @@
#define SET_BYTE(val, byte) \
(((val) & 0xff) << ((byte) * 8))
-/*
- * Buffer size required for the largest SPI transfer (i.e., reading a
+/* Buffer size required for the largest SPI transfer (i.e., reading a
* frame)
*/
#define CAN_FRAME_MAX_DATA_LEN 8
@@ -205,10 +201,6 @@
#define DEVICE_NAME "mcp251x"
-static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
-module_param(mcp251x_enable_dma, int, 0444);
-MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)");
-
static const struct can_bittiming_const mcp251x_bittiming_const = {
.name = DEVICE_NAME,
.tseg1_min = 3,
@@ -237,8 +229,6 @@ struct mcp251x_priv {
u8 *spi_tx_buf;
u8 *spi_rx_buf;
- dma_addr_t spi_tx_dma;
- dma_addr_t spi_rx_dma;
struct sk_buff *tx_skb;
int tx_len;
@@ -274,16 +264,14 @@ static void mcp251x_clean(struct net_device *net)
if (priv->tx_skb || priv->tx_len)
net->stats.tx_errors++;
- if (priv->tx_skb)
- dev_kfree_skb(priv->tx_skb);
+ dev_kfree_skb(priv->tx_skb);
if (priv->tx_len)
can_free_echo_skb(priv->net, 0);
priv->tx_skb = NULL;
priv->tx_len = 0;
}
-/*
- * Note about handling of error return of mcp251x_spi_trans: accessing
+/* Note about handling of error return of mcp251x_spi_trans: accessing
* registers via SPI is not really different conceptually than using
* normal I/O assembler instructions, although it's much more
* complicated from a practical POV. So it's not advisable to always
@@ -308,13 +296,6 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len)
int ret;
spi_message_init(&m);
-
- if (mcp251x_enable_dma) {
- t.tx_dma = priv->spi_tx_dma;
- t.rx_dma = priv->spi_rx_dma;
- m.is_dma_mapped = 1;
- }
-
spi_message_add_tail(&t, &m);
ret = spi_sync(spi, &m);
@@ -323,7 +304,7 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len)
return ret;
}
-static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
+static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
u8 val = 0;
@@ -337,8 +318,7 @@ static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
return val;
}
-static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg,
- uint8_t *v1, uint8_t *v2)
+static void mcp251x_read_2regs(struct spi_device *spi, u8 reg, u8 *v1, u8 *v2)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -351,7 +331,7 @@ static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg,
*v2 = priv->spi_rx_buf[3];
}
-static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
+static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -363,7 +343,7 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
}
static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
- u8 mask, uint8_t val)
+ u8 mask, u8 val)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -565,8 +545,7 @@ static int mcp251x_set_normal_mode(struct spi_device *spi)
while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) {
schedule();
if (time_after(jiffies, timeout)) {
- dev_err(&spi->dev, "MCP251x didn't"
- " enter in normal mode\n");
+ dev_err(&spi->dev, "MCP251x didn't enter in normal mode\n");
return -EBUSY;
}
}
@@ -612,7 +591,7 @@ static int mcp251x_setup(struct net_device *net, struct spi_device *spi)
static int mcp251x_hw_reset(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
- u8 reg;
+ unsigned long timeout;
int ret;
/* Wait for oscillator startup timer after power up */
@@ -626,10 +605,19 @@ static int mcp251x_hw_reset(struct spi_device *spi)
/* Wait for oscillator startup timer after reset */
mdelay(MCP251X_OST_DELAY_MS);
- reg = mcp251x_read_reg(spi, CANSTAT);
- if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
- return -ENODEV;
-
+ /* Wait for reset to finish */
+ timeout = jiffies + HZ;
+ while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
+ CANCTRL_REQOP_CONF) {
+ usleep_range(MCP251X_OST_DELAY_MS * 1000,
+ MCP251X_OST_DELAY_MS * 1000 * 2);
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(&spi->dev,
+ "MCP251x didn't enter in conf mode after reset\n");
+ return -EBUSY;
+ }
+ }
return 0;
}
@@ -799,7 +787,8 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
* (The MCP2515/25625 does this automatically.)
*/
if (mcp251x_is_2510(spi))
- mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00);
+ mcp251x_write_bits(spi, CANINTF,
+ CANINTF_RX0IF, 0x00);
}
/* receive buffer 1 */
@@ -900,7 +889,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
}
netif_wake_queue(net);
}
-
}
mutex_unlock(&priv->mcp_lock);
return IRQ_HANDLED;
@@ -910,7 +898,7 @@ static int mcp251x_open(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING;
+ unsigned long flags = 0;
int ret;
ret = open_candev(net);
@@ -926,8 +914,12 @@ static int mcp251x_open(struct net_device *net)
priv->tx_skb = NULL;
priv->tx_len = 0;
+ if (!spi->dev.of_node)
+ flags = IRQF_TRIGGER_FALLING;
+
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
- flags | IRQF_ONESHOT, DEVICE_NAME, priv);
+ flags | IRQF_ONESHOT, dev_name(&spi->dev),
+ priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
goto out_close;
@@ -1090,43 +1082,18 @@ static int mcp251x_can_probe(struct spi_device *spi)
priv->spi = spi;
mutex_init(&priv->mcp_lock);
- /* If requested, allocate DMA buffers */
- if (mcp251x_enable_dma) {
- spi->dev.coherent_dma_mask = ~0;
-
- /*
- * Minimum coherent DMA allocation is PAGE_SIZE, so allocate
- * that much and share it between Tx and Rx DMA buffers.
- */
- priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev,
- PAGE_SIZE,
- &priv->spi_tx_dma,
- GFP_DMA);
-
- if (priv->spi_tx_buf) {
- priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
- priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
- (PAGE_SIZE / 2));
- } else {
- /* Fall back to non-DMA */
- mcp251x_enable_dma = 0;
- }
+ priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+ GFP_KERNEL);
+ if (!priv->spi_tx_buf) {
+ ret = -ENOMEM;
+ goto error_probe;
}
- /* Allocate non-DMA buffers */
- if (!mcp251x_enable_dma) {
- priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
- GFP_KERNEL);
- if (!priv->spi_tx_buf) {
- ret = -ENOMEM;
- goto error_probe;
- }
- priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
- GFP_KERNEL);
- if (!priv->spi_rx_buf) {
- ret = -ENOMEM;
- goto error_probe;
- }
+ priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+ GFP_KERNEL);
+ if (!priv->spi_rx_buf) {
+ ret = -ENOMEM;
+ goto error_probe;
}
SET_NETDEV_DEV(net, &spi->dev);
@@ -1135,7 +1102,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
ret = mcp251x_hw_probe(spi);
if (ret) {
if (ret == -ENODEV)
- dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n", priv->model);
+ dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n",
+ priv->model);
goto error_probe;
}
@@ -1189,8 +1157,7 @@ static int __maybe_unused mcp251x_can_suspend(struct device *dev)
priv->force_quit = 1;
disable_irq(spi->irq);
- /*
- * Note: at this point neither IST nor workqueues are running.
+ /* Note: at this point neither IST nor workqueues are running.
* open/stop cannot be called anyway so locking is not needed
*/
if (netif_running(net)) {
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 093fc9a529f0..f4cd88196404 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -787,7 +787,6 @@ static int sun4ican_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "could not get a valid irq\n");
err = -ENODEV;
goto exit;
}
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index db6ea936dc3f..f8b19eef5d26 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -5,6 +5,7 @@
* specs for the same is available at <http://www.ti.com>
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2019 Jeroen Hofstee <jhofstee@victronenergy.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -34,6 +35,7 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include <linux/can/led.h>
+#include <linux/can/rx-offload.h>
#define DRV_NAME "ti_hecc"
#define HECC_MODULE_VERSION "0.7"
@@ -44,8 +46,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
#define HECC_MAX_MAILBOXES 32 /* hardware mailboxes - do not change */
#define MAX_TX_PRIO 0x3F /* hardware value - do not change */
-/*
- * Important Note: TX mailbox configuration
+/* Important Note: TX mailbox configuration
* TX mailboxes should be restricted to the number of SKB buffers to avoid
* maintaining SKB buffers separately. TX mailboxes should be a power of 2
* for the mailbox logic to work. Top mailbox numbers are reserved for RX
@@ -63,29 +64,15 @@ MODULE_VERSION(HECC_MODULE_VERSION);
#define HECC_TX_PRIO_MASK (MAX_TX_PRIO << HECC_MB_TX_SHIFT)
#define HECC_TX_MB_MASK (HECC_MAX_TX_MBOX - 1)
#define HECC_TX_MASK ((HECC_MAX_TX_MBOX - 1) | HECC_TX_PRIO_MASK)
-#define HECC_TX_MBOX_MASK (~(BIT(HECC_MAX_TX_MBOX) - 1))
-#define HECC_DEF_NAPI_WEIGHT HECC_MAX_RX_MBOX
-/*
- * Important Note: RX mailbox configuration
- * RX mailboxes are further logically split into two - main and buffer
- * mailboxes. The goal is to get all packets into main mailboxes as
- * driven by mailbox number and receive priority (higher to lower) and
- * buffer mailboxes are used to receive pkts while main mailboxes are being
- * processed. This ensures in-order packet reception.
+/* RX mailbox configuration
*
- * Here are the recommended values for buffer mailbox. Note that RX mailboxes
- * start after TX mailboxes:
- *
- * HECC_MAX_RX_MBOX HECC_RX_BUFFER_MBOX No of buffer mailboxes
- * 28 12 8
- * 16 20 4
+ * The remaining mailboxes are used for reception and are delivered
+ * based on their timestamp, to avoid a hardware race when CANME is
+ * changed while CAN-bus traffic is being received.
*/
-
#define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
-#define HECC_RX_BUFFER_MBOX 12 /* as per table above */
#define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1)
-#define HECC_RX_HIGH_MBOX_MASK (~(BIT(HECC_RX_BUFFER_MBOX) - 1))
/* TI HECC module registers */
#define HECC_CANME 0x0 /* Mailbox enable */
@@ -117,6 +104,9 @@ MODULE_VERSION(HECC_MODULE_VERSION);
#define HECC_CANTIOCE 0x68 /* SCC only:Enhanced TX I/O control */
#define HECC_CANRIOCE 0x6C /* SCC only:Enhanced RX I/O control */
+/* TI HECC RAM registers */
+#define HECC_CANMOTS 0x80 /* Message object time stamp */
+
/* Mailbox registers */
#define HECC_CANMID 0x0
#define HECC_CANMCF 0x4
@@ -193,7 +183,7 @@ static const struct can_bittiming_const ti_hecc_bittiming_const = {
struct ti_hecc_priv {
struct can_priv can; /* MUST be first member/field */
- struct napi_struct napi;
+ struct can_rx_offload offload;
struct net_device *ndev;
struct clk *clk;
void __iomem *base;
@@ -203,7 +193,6 @@ struct ti_hecc_priv {
spinlock_t mbx_lock; /* CANME register needs protection */
u32 tx_head;
u32 tx_tail;
- u32 rx_next;
struct regulator *reg_xceiver;
};
@@ -227,8 +216,13 @@ static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val)
__raw_writel(val, priv->hecc_ram + mbxno * 4);
}
+static inline u32 hecc_read_stamp(struct ti_hecc_priv *priv, u32 mbxno)
+{
+ return __raw_readl(priv->hecc_ram + HECC_CANMOTS + mbxno * 4);
+}
+
static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno,
- u32 reg, u32 val)
+ u32 reg, u32 val)
{
__raw_writel(val, priv->mbx + mbxno * 0x10 + reg);
}
@@ -249,13 +243,13 @@ static inline u32 hecc_read(struct ti_hecc_priv *priv, int reg)
}
static inline void hecc_set_bit(struct ti_hecc_priv *priv, int reg,
- u32 bit_mask)
+ u32 bit_mask)
{
hecc_write(priv, reg, hecc_read(priv, reg) | bit_mask);
}
static inline void hecc_clear_bit(struct ti_hecc_priv *priv, int reg,
- u32 bit_mask)
+ u32 bit_mask)
{
hecc_write(priv, reg, hecc_read(priv, reg) & ~bit_mask);
}
@@ -277,8 +271,8 @@ static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
if (bit_timing->brp > 4)
can_btc |= HECC_CANBTC_SAM;
else
- netdev_warn(priv->ndev, "WARN: Triple"
- "sampling not set due to h/w limitations");
+ netdev_warn(priv->ndev,
+ "WARN: Triple sampling not set due to h/w limitations");
}
can_btc |= ((bit_timing->sjw - 1) & 0x3) << 8;
can_btc |= ((bit_timing->brp - 1) & 0xFF) << 16;
@@ -314,8 +308,7 @@ static void ti_hecc_reset(struct net_device *ndev)
/* Set change control request and wait till enabled */
hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
- /*
- * INFO: It has been observed that at times CCE bit may not be
+ /* INFO: It has been observed that at times CCE bit may not be
* set and hw seems to be ok even if this bit is not set so
* timing out with a timing of 1ms to respect the specs
*/
@@ -325,8 +318,7 @@ static void ti_hecc_reset(struct net_device *ndev)
udelay(10);
}
- /*
- * Note: On HECC, BTC can be programmed only in initialization mode, so
+ /* Note: On HECC, BTC can be programmed only in initialization mode, so
* it is expected that the can bittiming parameters are set via ip
* utility before the device is opened
*/
@@ -335,13 +327,11 @@ static void ti_hecc_reset(struct net_device *ndev)
/* Clear CCR (and CANMC register) and wait for CCE = 0 enable */
hecc_write(priv, HECC_CANMC, 0);
- /*
- * INFO: CAN net stack handles bus off and hence disabling auto-bus-on
+ /* INFO: CAN net stack handles bus off and hence disabling auto-bus-on
* hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_ABO);
*/
- /*
- * INFO: It has been observed that at times CCE bit may not be
+ /* INFO: It has been observed that at times CCE bit may not be
* set and hw seems to be ok even if this bit is not set so
*/
cnt = HECC_CCE_WAIT_COUNT;
@@ -374,8 +364,8 @@ static void ti_hecc_start(struct net_device *ndev)
/* put HECC in initialization mode and set btc */
ti_hecc_reset(ndev);
- priv->tx_head = priv->tx_tail = HECC_TX_MASK;
- priv->rx_next = HECC_RX_FIRST_MBOX;
+ priv->tx_head = HECC_TX_MASK;
+ priv->tx_tail = HECC_TX_MASK;
/* Enable local and global acceptance mask registers */
hecc_write(priv, HECC_CANGAM, HECC_SET_REG);
@@ -401,7 +391,7 @@ static void ti_hecc_start(struct net_device *ndev)
} else {
hecc_write(priv, HECC_CANMIL, 0);
hecc_write(priv, HECC_CANGIM,
- HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN);
+ HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN);
}
priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
@@ -435,7 +425,7 @@ static int ti_hecc_do_set_mode(struct net_device *ndev, enum can_mode mode)
}
static int ti_hecc_get_berr_counter(const struct net_device *ndev,
- struct can_berr_counter *bec)
+ struct can_berr_counter *bec)
{
struct ti_hecc_priv *priv = netdev_priv(ndev);
@@ -445,8 +435,7 @@ static int ti_hecc_get_berr_counter(const struct net_device *ndev,
return 0;
}
-/*
- * ti_hecc_xmit: HECC Transmit
+/* ti_hecc_xmit: HECC Transmit
*
* The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the
* priority of the mailbox for tranmission is dependent upon priority setting
@@ -484,8 +473,8 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irqrestore(&priv->mbx_lock, flags);
netif_stop_queue(ndev);
netdev_err(priv->ndev,
- "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n",
- priv->tx_head, priv->tx_tail);
+ "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n",
+ priv->tx_head, priv->tx_tail);
return NETDEV_TX_BUSY;
}
spin_unlock_irqrestore(&priv->mbx_lock, flags);
@@ -502,10 +491,10 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
data = (cf->can_id & CAN_SFF_MASK) << 18;
hecc_write_mbx(priv, mbxno, HECC_CANMID, data);
hecc_write_mbx(priv, mbxno, HECC_CANMDL,
- be32_to_cpu(*(__be32 *)(cf->data)));
+ be32_to_cpu(*(__be32 *)(cf->data)));
if (cf->can_dlc > 4)
hecc_write_mbx(priv, mbxno, HECC_CANMDH,
- be32_to_cpu(*(__be32 *)(cf->data + 4)));
+ be32_to_cpu(*(__be32 *)(cf->data + 4)));
else
*(u32 *)(cf->data + 4) = 0;
can_put_echo_skb(skb, ndev, mbxno);
@@ -513,7 +502,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_lock_irqsave(&priv->mbx_lock, flags);
--priv->tx_head;
if ((hecc_read(priv, HECC_CANME) & BIT(get_tx_head_mb(priv))) ||
- (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) {
+ (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) {
netif_stop_queue(ndev);
}
hecc_set_bit(priv, HECC_CANME, mbx_mask);
@@ -526,139 +515,57 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
-static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
+static inline
+struct ti_hecc_priv *rx_offload_to_priv(struct can_rx_offload *offload)
{
- struct net_device_stats *stats = &priv->ndev->stats;
- struct can_frame *cf;
- struct sk_buff *skb;
- u32 data, mbx_mask;
- unsigned long flags;
+ return container_of(offload, struct ti_hecc_priv, offload);
+}
- skb = alloc_can_skb(priv->ndev, &cf);
- if (!skb) {
- if (printk_ratelimit())
- netdev_err(priv->ndev,
- "ti_hecc_rx_pkt: alloc_can_skb() failed\n");
- return -ENOMEM;
- }
+static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
+ struct can_frame *cf,
+ u32 *timestamp, unsigned int mbxno)
+{
+ struct ti_hecc_priv *priv = rx_offload_to_priv(offload);
+ u32 data;
- mbx_mask = BIT(mbxno);
data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
if (data & HECC_CANMID_IDE)
cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
cf->can_id = (data >> 18) & CAN_SFF_MASK;
+
data = hecc_read_mbx(priv, mbxno, HECC_CANMCF);
if (data & HECC_CANMCF_RTR)
cf->can_id |= CAN_RTR_FLAG;
cf->can_dlc = get_can_dlc(data & 0xF);
+
data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
*(__be32 *)(cf->data) = cpu_to_be32(data);
if (cf->can_dlc > 4) {
data = hecc_read_mbx(priv, mbxno, HECC_CANMDH);
*(__be32 *)(cf->data + 4) = cpu_to_be32(data);
}
- spin_lock_irqsave(&priv->mbx_lock, flags);
- hecc_clear_bit(priv, HECC_CANME, mbx_mask);
- hecc_write(priv, HECC_CANRMP, mbx_mask);
- /* enable mailbox only if it is part of rx buffer mailboxes */
- if (priv->rx_next < HECC_RX_BUFFER_MBOX)
- hecc_set_bit(priv, HECC_CANME, mbx_mask);
- spin_unlock_irqrestore(&priv->mbx_lock, flags);
- stats->rx_bytes += cf->can_dlc;
- can_led_event(priv->ndev, CAN_LED_EVENT_RX);
- netif_receive_skb(skb);
- stats->rx_packets++;
+ *timestamp = hecc_read_stamp(priv, mbxno);
- return 0;
-}
-
-/*
- * ti_hecc_rx_poll - HECC receive pkts
- *
- * The receive mailboxes start from highest numbered mailbox till last xmit
- * mailbox. On CAN frame reception the hardware places the data into highest
- * numbered mailbox that matches the CAN ID filter. Since all receive mailboxes
- * have same filtering (ALL CAN frames) packets will arrive in the highest
- * available RX mailbox and we need to ensure in-order packet reception.
- *
- * To ensure the packets are received in the right order we logically divide
- * the RX mailboxes into main and buffer mailboxes. Packets are received as per
- * mailbox priotity (higher to lower) in the main bank and once it is full we
- * disable further reception into main mailboxes. While the main mailboxes are
- * processed in NAPI, further packets are received in buffer mailboxes.
- *
- * We maintain a RX next mailbox counter to process packets and once all main
- * mailboxe packets are passed to the upper stack we enable all of them but
- * continue to process packets received in buffer mailboxes. With each packet
- * received from buffer mailbox we enable it immediately so as to handle the
- * overflow from higher mailboxes.
- */
-static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
-{
- struct net_device *ndev = napi->dev;
- struct ti_hecc_priv *priv = netdev_priv(ndev);
- u32 num_pkts = 0;
- u32 mbx_mask;
- unsigned long pending_pkts, flags;
-
- if (!netif_running(ndev))
- return 0;
-
- while ((pending_pkts = hecc_read(priv, HECC_CANRMP)) &&
- num_pkts < quota) {
- mbx_mask = BIT(priv->rx_next); /* next rx mailbox to process */
- if (mbx_mask & pending_pkts) {
- if (ti_hecc_rx_pkt(priv, priv->rx_next) < 0)
- return num_pkts;
- ++num_pkts;
- } else if (priv->rx_next > HECC_RX_BUFFER_MBOX) {
- break; /* pkt not received yet */
- }
- --priv->rx_next;
- if (priv->rx_next == HECC_RX_BUFFER_MBOX) {
- /* enable high bank mailboxes */
- spin_lock_irqsave(&priv->mbx_lock, flags);
- mbx_mask = hecc_read(priv, HECC_CANME);
- mbx_mask |= HECC_RX_HIGH_MBOX_MASK;
- hecc_write(priv, HECC_CANME, mbx_mask);
- spin_unlock_irqrestore(&priv->mbx_lock, flags);
- } else if (priv->rx_next == HECC_MAX_TX_MBOX - 1) {
- priv->rx_next = HECC_RX_FIRST_MBOX;
- break;
- }
- }
-
- /* Enable packet interrupt if all pkts are handled */
- if (hecc_read(priv, HECC_CANRMP) == 0) {
- napi_complete(napi);
- /* Re-enable RX mailbox interrupts */
- mbx_mask = hecc_read(priv, HECC_CANMIM);
- mbx_mask |= HECC_TX_MBOX_MASK;
- hecc_write(priv, HECC_CANMIM, mbx_mask);
- } else {
- /* repoll is done only if whole budget is used */
- num_pkts = quota;
- }
-
- return num_pkts;
+ return 1;
}
static int ti_hecc_error(struct net_device *ndev, int int_status,
- int err_status)
+ int err_status)
{
struct ti_hecc_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
+ u32 timestamp;
/* propagate the error condition to the can stack */
skb = alloc_can_err_skb(ndev, &cf);
if (!skb) {
if (printk_ratelimit())
netdev_err(priv->ndev,
- "ti_hecc_error: alloc_can_err_skb() failed\n");
+ "%s: alloc_can_err_skb() failed\n",
+ __func__);
return -ENOMEM;
}
@@ -692,8 +599,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
}
- /*
- * Need to check busoff condition in error status register too to
+ /* Need to check busoff condition in error status register too to
* ensure warning interrupts don't hog the system
*/
if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) {
@@ -732,9 +638,8 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
}
}
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
+ timestamp = hecc_read(priv, HECC_CANLNT);
+ can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
return 0;
}
@@ -744,19 +649,20 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
struct net_device *ndev = (struct net_device *)dev_id;
struct ti_hecc_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
- u32 mbxno, mbx_mask, int_status, err_status;
- unsigned long ack, flags;
+ u32 mbxno, mbx_mask, int_status, err_status, stamp;
+ unsigned long flags, rx_pending;
int_status = hecc_read(priv,
- (priv->use_hecc1int) ? HECC_CANGIF1 : HECC_CANGIF0);
+ priv->use_hecc1int ?
+ HECC_CANGIF1 : HECC_CANGIF0);
if (!int_status)
return IRQ_NONE;
err_status = hecc_read(priv, HECC_CANES);
if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO |
- HECC_CANES_EP | HECC_CANES_EW))
- ti_hecc_error(ndev, int_status, err_status);
+ HECC_CANES_EP | HECC_CANES_EW))
+ ti_hecc_error(ndev, int_status, err_status);
if (int_status & HECC_CANGIF_GMIF) {
while (priv->tx_tail - priv->tx_head > 0) {
@@ -769,27 +675,27 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
spin_lock_irqsave(&priv->mbx_lock, flags);
hecc_clear_bit(priv, HECC_CANME, mbx_mask);
spin_unlock_irqrestore(&priv->mbx_lock, flags);
- stats->tx_bytes += hecc_read_mbx(priv, mbxno,
- HECC_CANMCF) & 0xF;
+ stamp = hecc_read_stamp(priv, mbxno);
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb(&priv->offload,
+ mbxno, stamp);
stats->tx_packets++;
can_led_event(ndev, CAN_LED_EVENT_TX);
- can_get_echo_skb(ndev, mbxno);
--priv->tx_tail;
}
/* restart queue if wrap-up or if queue stalled on last pkt */
- if (((priv->tx_head == priv->tx_tail) &&
- ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) ||
- (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) &&
- ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK)))
+ if ((priv->tx_head == priv->tx_tail &&
+ ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) ||
+ (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) &&
+ ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK)))
netif_wake_queue(ndev);
- /* Disable RX mailbox interrupts and let NAPI reenable them */
- if (hecc_read(priv, HECC_CANRMP)) {
- ack = hecc_read(priv, HECC_CANMIM);
- ack &= BIT(HECC_MAX_TX_MBOX) - 1;
- hecc_write(priv, HECC_CANMIM, ack);
- napi_schedule(&priv->napi);
+ /* offload RX mailboxes and let NAPI deliver them */
+ while ((rx_pending = hecc_read(priv, HECC_CANRMP))) {
+ can_rx_offload_irq_offload_timestamp(&priv->offload,
+ rx_pending);
+ hecc_write(priv, HECC_CANRMP, rx_pending);
}
}
@@ -811,7 +717,7 @@ static int ti_hecc_open(struct net_device *ndev)
int err;
err = request_irq(ndev->irq, ti_hecc_interrupt, IRQF_SHARED,
- ndev->name, ndev);
+ ndev->name, ndev);
if (err) {
netdev_err(ndev, "error requesting interrupt\n");
return err;
@@ -831,7 +737,7 @@ static int ti_hecc_open(struct net_device *ndev)
can_led_event(ndev, CAN_LED_EVENT_OPEN);
ti_hecc_start(ndev);
- napi_enable(&priv->napi);
+ can_rx_offload_enable(&priv->offload);
netif_start_queue(ndev);
return 0;
@@ -842,7 +748,7 @@ static int ti_hecc_close(struct net_device *ndev)
struct ti_hecc_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev);
- napi_disable(&priv->napi);
+ can_rx_offload_disable(&priv->offload);
ti_hecc_stop(ndev);
free_irq(ndev->irq, ndev);
close_candev(ndev);
@@ -962,8 +868,6 @@ static int ti_hecc_probe(struct platform_device *pdev)
goto probe_exit_candev;
}
priv->can.clock.freq = clk_get_rate(priv->clk);
- netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
- HECC_DEF_NAPI_WEIGHT);
err = clk_prepare_enable(priv->clk);
if (err) {
@@ -971,19 +875,30 @@ static int ti_hecc_probe(struct platform_device *pdev)
goto probe_exit_clk;
}
+ priv->offload.mailbox_read = ti_hecc_mailbox_read;
+ priv->offload.mb_first = HECC_RX_FIRST_MBOX;
+ priv->offload.mb_last = HECC_MAX_TX_MBOX;
+ err = can_rx_offload_add_timestamp(ndev, &priv->offload);
+ if (err) {
+ dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n");
+ goto probe_exit_clk;
+ }
+
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev, "register_candev() failed\n");
- goto probe_exit_clk;
+ goto probe_exit_offload;
}
devm_can_led_init(ndev);
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
- priv->base, (u32) ndev->irq);
+ priv->base, (u32)ndev->irq);
return 0;
+probe_exit_offload:
+ can_rx_offload_del(&priv->offload);
probe_exit_clk:
clk_put(priv->clk);
probe_exit_candev:
@@ -1000,6 +915,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
unregister_candev(ndev);
clk_disable_unprepare(priv->clk);
clk_put(priv->clk);
+ can_rx_offload_del(&priv->offload);
free_candev(ndev);
return 0;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index c89c7d4900d7..0f1d3e807d63 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -643,8 +643,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev,
return err;
}
- netdev = alloc_candev(sizeof(*priv) +
- dev->max_tx_urbs * sizeof(*priv->tx_contexts),
+ netdev = alloc_candev(struct_size(priv, tx_contexts, dev->max_tx_urbs),
dev->max_tx_urbs);
if (!netdev) {
dev_err(&dev->intf->dev, "Cannot alloc candev\n");
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index d200a5b0651c..daf27133887b 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -1,5 +1,4 @@
-/*
- * vcan.c - Virtual CAN interface
+/* vcan.c - Virtual CAN interface
*
* Copyright (c) 2002-2017 Volkswagen Group Electronic Research
* All rights reserved.
@@ -39,6 +38,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -57,9 +58,7 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
MODULE_ALIAS_RTNL_LINK(DRV_NAME);
-
-/*
- * CAN test feature:
+/* CAN test feature:
* Enable the echo on driver level for testing the CAN core echo modes.
* See Documentation/networking/can.rst for details.
*/
@@ -68,7 +67,6 @@ static bool echo; /* echo testing. Default: 0 (Off) */
module_param(echo, bool, 0444);
MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
-
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
@@ -101,10 +99,8 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
if (!echo) {
/* no echo handling available inside this driver */
-
if (loop) {
- /*
- * only count the packets here, because the
+ /* only count the packets here, because the
* CAN core already did the echo for us
*/
stats->rx_packets++;
@@ -117,7 +113,6 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
/* perform standard echo handling for CAN network interfaces */
if (loop) {
-
skb = can_create_echo_skb(skb);
if (!skb)
return NETDEV_TX_OK;
@@ -173,10 +168,10 @@ static struct rtnl_link_ops vcan_link_ops __read_mostly = {
static __init int vcan_init_module(void)
{
- pr_info("vcan: Virtual CAN interface driver\n");
+ pr_info("Virtual CAN interface driver\n");
if (echo)
- printk(KERN_INFO "vcan: enabled echo on driver level.\n");
+ pr_info("enabled echo on driver level.\n");
return rtnl_link_register(&vcan_link_ops);
}
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 63203ff452b5..911b34316c9d 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -50,6 +50,10 @@ enum xcan_reg {
XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */
/* only on CAN FD cores */
+ XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate
+ * Prescalar
+ */
+ XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
@@ -62,6 +66,7 @@ enum xcan_reg {
#define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
#define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
#define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
+#define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08)
#define XCAN_CANFD_FRAME_SIZE 0x48
#define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
@@ -118,8 +123,12 @@ enum xcan_reg {
#define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
#define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
#define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */
+#define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */
#define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */
#define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */
+#define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */
+#define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */
+#define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
@@ -133,6 +142,7 @@ enum xcan_reg {
/* CAN frame length constants */
#define XCAN_FRAME_MAX_DATA_LEN 8
+#define XCANFD_DW_BYTES 4
#define XCAN_TIMEOUT (1 * HZ)
/* TX-FIFO-empty interrupt available */
@@ -149,7 +159,15 @@ enum xcan_reg {
#define XCAN_FLAG_RX_FIFO_MULTI 0x0010
#define XCAN_FLAG_CANFD_2 0x0020
+enum xcan_ip_type {
+ XAXI_CAN = 0,
+ XZYNQ_CANPS,
+ XAXI_CANFD,
+ XAXI_CANFD_2_0,
+};
+
struct xcan_devtype_data {
+ enum xcan_ip_type cantype;
unsigned int flags;
const struct can_bittiming_const *bittiming_const;
const char *bus_clk_name;
@@ -183,7 +201,7 @@ struct xcan_priv {
struct napi_struct napi;
u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
- u32 val);
+ u32 val);
struct device *dev;
void __iomem *reg_base;
unsigned long irq_flags;
@@ -205,6 +223,7 @@ static const struct can_bittiming_const xcan_bittiming_const = {
.brp_inc = 1,
};
+/* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */
static const struct can_bittiming_const xcan_bittiming_const_canfd = {
.name = DRIVER_NAME,
.tseg1_min = 1,
@@ -217,6 +236,20 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = {
.brp_inc = 1,
};
+/* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
+static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
+ .name = DRIVER_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 8,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+/* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */
static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
.name = DRIVER_NAME,
.tseg1_min = 1,
@@ -229,6 +262,19 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
.brp_inc = 1,
};
+/* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
+static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
+ .name = DRIVER_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -238,7 +284,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
* Write data to the paricular CAN register
*/
static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
- u32 val)
+ u32 val)
{
iowrite32(val, priv->reg_base + reg);
}
@@ -265,7 +311,7 @@ static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
* Write data to the paricular CAN register
*/
static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
- u32 val)
+ u32 val)
{
iowrite32be(val, priv->reg_base + reg);
}
@@ -343,6 +389,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct can_bittiming *bt = &priv->can.bittiming;
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
u32 btr0, btr1;
u32 is_config_mode;
@@ -372,9 +419,27 @@ static int xcan_set_bittiming(struct net_device *ndev)
priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
+ if (priv->devtype.cantype == XAXI_CANFD ||
+ priv->devtype.cantype == XAXI_CANFD_2_0) {
+ /* Setting Baud Rate prescalar value in F_BRPR Register */
+ btr0 = dbt->brp - 1;
+
+ /* Setting Time Segment 1 in BTR Register */
+ btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
+
+ /* Setting Time Segment 2 in BTR Register */
+ btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
+
+ /* Setting Synchronous jump width in BTR Register */
+ btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift;
+
+ priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0);
+ priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1);
+ }
+
netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
- priv->read_reg(priv, XCAN_BRPR_OFFSET),
- priv->read_reg(priv, XCAN_BTR_OFFSET));
+ priv->read_reg(priv, XCAN_BRPR_OFFSET),
+ priv->read_reg(priv, XCAN_BTR_OFFSET));
return 0;
}
@@ -392,9 +457,8 @@ static int xcan_set_bittiming(struct net_device *ndev)
static int xcan_chip_start(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
- u32 reg_msr, reg_sr_mask;
+ u32 reg_msr;
int err;
- unsigned long timeout;
u32 ier;
/* Check if it is in reset mode */
@@ -420,10 +484,8 @@ static int xcan_chip_start(struct net_device *ndev)
/* Check whether it is loopback mode or normal mode */
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
reg_msr = XCAN_MSR_LBACK_MASK;
- reg_sr_mask = XCAN_SR_LBACK_MASK;
} else {
reg_msr = 0x0;
- reg_sr_mask = XCAN_SR_NORMAL_MASK;
}
/* enable the first extended filter, if any, as cores with extended
@@ -435,16 +497,8 @@ static int xcan_chip_start(struct net_device *ndev)
priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
- timeout = jiffies + XCAN_TIMEOUT;
- while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
- if (time_after(jiffies, timeout)) {
- netdev_warn(ndev,
- "timed out for correct mode\n");
- return -ETIMEDOUT;
- }
- }
netdev_dbg(ndev, "status:#x%08x\n",
- priv->read_reg(priv, XCAN_SR_OFFSET));
+ priv->read_reg(priv, XCAN_SR_OFFSET));
priv->can.state = CAN_STATE_ERROR_ACTIVE;
return 0;
@@ -483,6 +537,7 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
/**
* xcan_write_frame - Write a frame to HW
+ * @priv: Driver private data structure
* @skb: sk_buff pointer that contains data to be Txed
* @frame_offset: Register offset to write the frame to
*/
@@ -490,7 +545,8 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
int frame_offset)
{
u32 id, dlc, data[2] = {0, 0};
- struct can_frame *cf = (struct can_frame *)skb->data;
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u32 ramoff, dwindex = 0, i;
/* Watch carefully on the bit sequence */
if (cf->can_id & CAN_EFF_FLAG) {
@@ -498,7 +554,7 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
XCAN_IDR_ID2_MASK;
id |= (((cf->can_id & CAN_EFF_MASK) >>
- (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
+ (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) <<
XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
/* The substibute remote TX request bit should be "1"
@@ -519,31 +575,51 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
id |= XCAN_IDR_SRR_MASK;
}
- dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT;
-
- if (cf->can_dlc > 0)
- data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
- if (cf->can_dlc > 4)
- data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
+ dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
+ if (can_is_canfd_skb(skb)) {
+ if (cf->flags & CANFD_BRS)
+ dlc |= XCAN_DLCR_BRS_MASK;
+ dlc |= XCAN_DLCR_EDL_MASK;
+ }
priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
/* If the CAN frame is RTR frame this write triggers transmission
* (not on CAN FD)
*/
priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
- if (!(cf->can_id & CAN_RTR_FLAG)) {
- priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset),
- data[0]);
- /* If the CAN frame is Standard/Extended frame this
- * write triggers transmission (not on CAN FD)
- */
- priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset),
- data[1]);
+ if (priv->devtype.cantype == XAXI_CANFD ||
+ priv->devtype.cantype == XAXI_CANFD_2_0) {
+ for (i = 0; i < cf->len; i += 4) {
+ ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) +
+ (dwindex * XCANFD_DW_BYTES);
+ priv->write_reg(priv, ramoff,
+ be32_to_cpup((__be32 *)(cf->data + i)));
+ dwindex++;
+ }
+ } else {
+ if (cf->len > 0)
+ data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
+ if (cf->len > 4)
+ data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
+
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ priv->write_reg(priv,
+ XCAN_FRAME_DW1_OFFSET(frame_offset),
+ data[0]);
+ /* If the CAN frame is Standard/Extended frame this
+ * write triggers transmission (not on CAN FD)
+ */
+ priv->write_reg(priv,
+ XCAN_FRAME_DW2_OFFSET(frame_offset),
+ data[1]);
+ }
}
}
/**
* xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
*
* Return: 0 on success, -ENOSPC if FIFO is full.
*/
@@ -580,6 +656,8 @@ static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
/**
* xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
*
* Return: 0 on success, -ENOSPC if there is no space
*/
@@ -712,6 +790,88 @@ static int xcan_rx(struct net_device *ndev, int frame_base)
}
/**
+ * xcanfd_rx - Is called from CAN isr to complete the received
+ * frame processing
+ * @ndev: Pointer to net_device structure
+ * @frame_base: Register offset to the frame to be read
+ *
+ * This function is invoked from the CAN isr(poll) to process the Rx frames. It
+ * does minimal processing and invokes "netif_receive_skb" to complete further
+ * processing.
+ * Return: 1 on success and 0 on failure.
+ */
+static int xcanfd_rx(struct net_device *ndev, int frame_base)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset;
+
+ id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
+ dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base));
+ if (dlc & XCAN_DLCR_EDL_MASK)
+ skb = alloc_canfd_skb(ndev, &cf);
+ else
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
+
+ if (unlikely(!skb)) {
+ stats->rx_dropped++;
+ return 0;
+ }
+
+ /* Change Xilinx CANFD data length format to socketCAN data
+ * format
+ */
+ if (dlc & XCAN_DLCR_EDL_MASK)
+ cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
+ XCAN_DLCR_DLC_SHIFT);
+ else
+ cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >>
+ XCAN_DLCR_DLC_SHIFT);
+
+ /* Change Xilinx CAN ID format to socketCAN ID format */
+ if (id_xcan & XCAN_IDR_IDE_MASK) {
+ /* The received frame is an Extended format frame */
+ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
+ cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
+ XCAN_IDR_ID2_SHIFT;
+ cf->can_id |= CAN_EFF_FLAG;
+ if (id_xcan & XCAN_IDR_RTR_MASK)
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ /* The received frame is a standard format frame */
+ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
+ XCAN_IDR_ID1_SHIFT;
+ if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan &
+ XCAN_IDR_SRR_MASK))
+ cf->can_id |= CAN_RTR_FLAG;
+ }
+
+ /* Check the frame received is FD or not*/
+ if (dlc & XCAN_DLCR_EDL_MASK) {
+ for (i = 0; i < cf->len; i += 4) {
+ dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) +
+ (dwindex * XCANFD_DW_BYTES);
+ data[0] = priv->read_reg(priv, dw_offset);
+ *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
+ dwindex++;
+ }
+ } else {
+ for (i = 0; i < cf->len; i += 4) {
+ dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base);
+ data[0] = priv->read_reg(priv, dw_offset + i);
+ *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
+ }
+ }
+ stats->rx_bytes += cf->len;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
+/**
* xcan_current_error_state - Get current error state from HW
* @ndev: Pointer to net_device structure
*
@@ -924,7 +1084,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
}
}
- priv->can.can_stats.bus_error++;
+ priv->can.can_stats.bus_error++;
}
if (skb) {
@@ -934,7 +1094,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
}
netdev_dbg(ndev, "%s: error status register:0x%x\n",
- __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
+ __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
}
/**
@@ -960,6 +1120,7 @@ static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
/**
* xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
+ * @priv: Driver private data structure
*
* Return: Register offset of the next frame in RX FIFO.
*/
@@ -968,7 +1129,7 @@ static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
int offset;
if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
- u32 fsr;
+ u32 fsr, mask;
/* clear RXOK before the is-empty check so that any newly
* received frame will reassert it without a race
@@ -978,13 +1139,20 @@ static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
/* check if RX FIFO is empty */
- if (!(fsr & XCAN_FSR_FL_MASK))
+ if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
+ mask = XCAN_2_FSR_FL_MASK;
+ else
+ mask = XCAN_FSR_FL_MASK;
+
+ if (!(fsr & mask))
return -ENOENT;
if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
- offset = XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
+ offset =
+ XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK);
else
- offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
+ offset =
+ XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
} else {
/* check if RX FIFO is empty */
@@ -1019,7 +1187,10 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
(work_done < quota)) {
- work_done += xcan_rx(ndev, frame_offset);
+ if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK)
+ work_done += xcanfd_rx(ndev, frame_offset);
+ else
+ work_done += xcan_rx(ndev, frame_offset);
if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
/* increment read index */
@@ -1094,8 +1265,10 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
* via TXFEMP handling as we read TXFEMP *after* TXOK
* clear to satisfy (1).
*/
- while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
- priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+ while ((isr & XCAN_IXR_TXOK_MASK) &&
+ !WARN_ON(++retries == 100)) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET,
+ XCAN_IXR_TXOK_MASK);
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
@@ -1208,12 +1381,12 @@ static int xcan_open(struct net_device *ndev)
ret = pm_runtime_get_sync(priv->dev);
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
- __func__, ret);
+ __func__, ret);
return ret;
}
ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
- ndev->name, ndev);
+ ndev->name, ndev);
if (ret < 0) {
netdev_err(ndev, "irq allocation for CAN failed\n");
goto err;
@@ -1284,7 +1457,7 @@ static int xcan_close(struct net_device *ndev)
* Return: 0 on success and failure value on error
*/
static int xcan_get_berr_counter(const struct net_device *ndev,
- struct can_berr_counter *bec)
+ struct can_berr_counter *bec)
{
struct xcan_priv *priv = netdev_priv(ndev);
int ret;
@@ -1292,7 +1465,7 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
ret = pm_runtime_get_sync(priv->dev);
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
- __func__, ret);
+ __func__, ret);
return ret;
}
@@ -1305,7 +1478,6 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
return 0;
}
-
static const struct net_device_ops xcan_netdev_ops = {
.ndo_open = xcan_open,
.ndo_stop = xcan_close,
@@ -1417,6 +1589,8 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
};
static const struct xcan_devtype_data xcan_zynq_data = {
+ .cantype = XZYNQ_CANPS,
+ .flags = XCAN_FLAG_TXFEMP,
.bittiming_const = &xcan_bittiming_const,
.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
@@ -1424,6 +1598,8 @@ static const struct xcan_devtype_data xcan_zynq_data = {
};
static const struct xcan_devtype_data xcan_axi_data = {
+ .cantype = XAXI_CAN,
+ .flags = XCAN_FLAG_TXFEMP,
.bittiming_const = &xcan_bittiming_const,
.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
@@ -1431,6 +1607,7 @@ static const struct xcan_devtype_data xcan_axi_data = {
};
static const struct xcan_devtype_data xcan_canfd_data = {
+ .cantype = XAXI_CANFD,
.flags = XCAN_FLAG_EXT_FILTERS |
XCAN_FLAG_RXMNF |
XCAN_FLAG_TX_MAILBOXES |
@@ -1442,6 +1619,7 @@ static const struct xcan_devtype_data xcan_canfd_data = {
};
static const struct xcan_devtype_data xcan_canfd2_data = {
+ .cantype = XAXI_CANFD_2_0,
.flags = XCAN_FLAG_EXT_FILTERS |
XCAN_FLAG_RXMNF |
XCAN_FLAG_TX_MAILBOXES |
@@ -1554,6 +1732,19 @@ static int xcan_probe(struct platform_device *pdev)
priv->can.do_get_berr_counter = xcan_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
+
+ if (devtype->cantype == XAXI_CANFD)
+ priv->can.data_bittiming_const =
+ &xcan_data_bittiming_const_canfd;
+
+ if (devtype->cantype == XAXI_CANFD_2_0)
+ priv->can.data_bittiming_const =
+ &xcan_data_bittiming_const_canfd2;
+
+ if (devtype->cantype == XAXI_CANFD ||
+ devtype->cantype == XAXI_CANFD_2_0)
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+
priv->reg_base = addr;
priv->tx_max = tx_max;
priv->devtype = *devtype;
@@ -1570,7 +1761,8 @@ static int xcan_probe(struct platform_device *pdev)
/* Getting the CAN can_clk info */
priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
if (IS_ERR(priv->can_clk)) {
- dev_err(&pdev->dev, "Device clock not found.\n");
+ if (PTR_ERR(priv->can_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Device clock not found.\n");
ret = PTR_ERR(priv->can_clk);
goto err_free;
}
@@ -1589,7 +1781,7 @@ static int xcan_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
- __func__, ret);
+ __func__, ret);
goto err_pmdisable;
}
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 907af62846ba..7d328a5f0161 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -510,10 +510,15 @@ EXPORT_SYMBOL(b53_imp_vlan_setup);
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
- unsigned int cpu_port = ds->ports[port].cpu_dp->index;
+ unsigned int cpu_port;
int ret = 0;
u16 pvlan;
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+ cpu_port = ds->ports[port].cpu_dp->index;
+
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
if (ret)
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index d9c56a779c08..0a1be5259be0 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -536,7 +536,6 @@ static void b53_srab_mux_init(struct platform_device *pdev)
struct b53_device *dev = platform_get_drvdata(pdev);
struct b53_srab_priv *priv = dev->priv;
struct b53_srab_port_priv *p;
- struct resource *r;
unsigned int port;
u32 reg, off = 0;
int ret;
@@ -544,8 +543,7 @@ static void b53_srab_mux_init(struct platform_device *pdev)
if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID)
return;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->mux_config = devm_ioremap_resource(&pdev->dev, r);
+ priv->mux_config = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->mux_config))
return;
@@ -593,7 +591,6 @@ static int b53_srab_probe(struct platform_device *pdev)
const struct of_device_id *of_id = NULL;
struct b53_srab_priv *priv;
struct b53_device *dev;
- struct resource *r;
if (dn)
of_id = of_match_node(b53_srab_of_match, dn);
@@ -610,8 +607,7 @@ static int b53_srab_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(&pdev->dev, r);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return -ENOMEM;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 28c963a21dac..26509fa37a50 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -157,6 +157,9 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
unsigned int i;
u32 reg;
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
/* Clear the memory power down */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
reg &= ~P_TXQ_PSM_VDD(port);
@@ -1047,7 +1050,6 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
struct b53_device *dev;
struct dsa_switch *ds;
void __iomem **base;
- struct resource *r;
unsigned int i;
u32 reg, rev;
int ret;
@@ -1113,8 +1115,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
base = &priv->core;
for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
- r = platform_get_resource(pdev, IORESOURCE_MEM, i);
- *base = devm_ioremap_resource(&pdev->dev, r);
+ *base = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(*base)) {
pr_err("unable to find register: %s\n", reg_names[i]);
return PTR_ERR(*base);
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index 7a2063e7737a..bbec86b9418e 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1079,6 +1079,9 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port,
{
struct lan9303 *chip = ds->priv;
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
return lan9303_enable_processing_port(chip, port);
}
@@ -1086,6 +1089,9 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
{
struct lan9303 *chip = ds->priv;
+ if (!dsa_is_user_port(ds, port))
+ return;
+
lan9303_disable_processing_port(chip, port);
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
}
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 4e64835deac2..a69c9b9878b7 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -642,6 +642,9 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
struct gswip_priv *priv = ds->priv;
int err;
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
if (!dsa_is_cpu_port(ds, port)) {
err = gswip_add_single_port_br(priv, port, true);
if (err)
@@ -678,6 +681,9 @@ static void gswip_port_disable(struct dsa_switch *ds, int port)
{
struct gswip_priv *priv = ds->priv;
+ if (!dsa_is_user_port(ds, port))
+ return;
+
if (!dsa_is_cpu_port(ds, port)) {
gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN,
GSWIP_MDIO_PHY_LINK_MASK,
@@ -1822,7 +1828,6 @@ remove_gphy:
static int gswip_probe(struct platform_device *pdev)
{
struct gswip_priv *priv;
- struct resource *gswip_res, *mdio_res, *mii_res;
struct device_node *mdio_np, *gphy_fw_np;
struct device *dev = &pdev->dev;
int err;
@@ -1833,18 +1838,15 @@ static int gswip_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- gswip_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->gswip = devm_ioremap_resource(dev, gswip_res);
+ priv->gswip = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->gswip))
return PTR_ERR(priv->gswip);
- mdio_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->mdio = devm_ioremap_resource(dev, mdio_res);
+ priv->mdio = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->mdio))
return PTR_ERR(priv->mdio);
- mii_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- priv->mii = devm_ioremap_resource(dev, mii_res);
+ priv->mii = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(priv->mii))
return PTR_ERR(priv->mii);
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index fe0a13b79c4b..e1c23d1e91e6 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -5,7 +5,6 @@ config NET_DSA_MICROCHIP_KSZ_COMMON
menuconfig NET_DSA_MICROCHIP_KSZ9477
tristate "Microchip KSZ9477 series switch support"
depends on NET_DSA
- select NET_DSA_TAG_KSZ9477
select NET_DSA_MICROCHIP_KSZ_COMMON
help
This driver adds support for Microchip KSZ9477 switch chips.
@@ -16,3 +15,20 @@ config NET_DSA_MICROCHIP_KSZ9477_SPI
select REGMAP_SPI
help
Select to enable support for registering switches configured through SPI.
+
+menuconfig NET_DSA_MICROCHIP_KSZ8795
+ tristate "Microchip KSZ8795 series switch support"
+ depends on NET_DSA
+ select NET_DSA_MICROCHIP_KSZ_COMMON
+ help
+ This driver adds support for Microchip KSZ8795 switch chips.
+
+config NET_DSA_MICROCHIP_KSZ8795_SPI
+ tristate "KSZ8795 series SPI connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ8795 && SPI
+ select REGMAP_SPI
+ help
+ This driver accesses KSZ8795 chip through SPI.
+
+ It is required to use the KSZ8795 switch driver as the only access
+ is through SPI.
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 68451b02f775..e3d799b95d7d 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -2,3 +2,5 @@
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_common.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477) += ksz9477.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI) += ksz9477_spi.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795) += ksz8795.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI) += ksz8795_spi.o
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
new file mode 100644
index 000000000000..a23d3ffdf0c4
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -0,0 +1,1310 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip KSZ8795 switch driver
+ *
+ * Copyright (C) 2017 Microchip Technology Inc.
+ * Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/microchip-ksz.h>
+#include <linux/phy.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "ksz_common.h"
+#include "ksz8795_reg.h"
+
+static const struct {
+ char string[ETH_GSTRING_LEN];
+} mib_names[TOTAL_SWITCH_COUNTER_NUM] = {
+ { "rx_hi" },
+ { "rx_undersize" },
+ { "rx_fragments" },
+ { "rx_oversize" },
+ { "rx_jabbers" },
+ { "rx_symbol_err" },
+ { "rx_crc_err" },
+ { "rx_align_err" },
+ { "rx_mac_ctrl" },
+ { "rx_pause" },
+ { "rx_bcast" },
+ { "rx_mcast" },
+ { "rx_ucast" },
+ { "rx_64_or_less" },
+ { "rx_65_127" },
+ { "rx_128_255" },
+ { "rx_256_511" },
+ { "rx_512_1023" },
+ { "rx_1024_1522" },
+ { "rx_1523_2000" },
+ { "rx_2001" },
+ { "tx_hi" },
+ { "tx_late_col" },
+ { "tx_pause" },
+ { "tx_bcast" },
+ { "tx_mcast" },
+ { "tx_ucast" },
+ { "tx_deferred" },
+ { "tx_total_col" },
+ { "tx_exc_col" },
+ { "tx_single_col" },
+ { "tx_mult_col" },
+ { "rx_total" },
+ { "tx_total" },
+ { "rx_discards" },
+ { "tx_discards" },
+};
+
+static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+{
+ regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
+}
+
+static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
+ bool set)
+{
+ regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset),
+ bits, set ? bits : 0);
+}
+
+static int ksz8795_reset_switch(struct ksz_device *dev)
+{
+ /* reset switch */
+ ksz_write8(dev, REG_POWER_MANAGEMENT_1,
+ SW_SOFTWARE_POWER_DOWN << SW_POWER_MANAGEMENT_MODE_S);
+ ksz_write8(dev, REG_POWER_MANAGEMENT_1, 0);
+
+ return 0;
+}
+
+static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
+{
+ u8 hi, lo;
+
+ /* Number of queues can only be 1, 2, or 4. */
+ switch (queue) {
+ case 4:
+ case 3:
+ queue = PORT_QUEUE_SPLIT_4;
+ break;
+ case 2:
+ queue = PORT_QUEUE_SPLIT_2;
+ break;
+ default:
+ queue = PORT_QUEUE_SPLIT_1;
+ }
+ ksz_pread8(dev, port, REG_PORT_CTRL_0, &lo);
+ ksz_pread8(dev, port, P_DROP_TAG_CTRL, &hi);
+ lo &= ~PORT_QUEUE_SPLIT_L;
+ if (queue & PORT_QUEUE_SPLIT_2)
+ lo |= PORT_QUEUE_SPLIT_L;
+ hi &= ~PORT_QUEUE_SPLIT_H;
+ if (queue & PORT_QUEUE_SPLIT_4)
+ hi |= PORT_QUEUE_SPLIT_H;
+ ksz_pwrite8(dev, port, REG_PORT_CTRL_0, lo);
+ ksz_pwrite8(dev, port, P_DROP_TAG_CTRL, hi);
+
+ /* Default is port based for egress rate limit. */
+ if (queue != PORT_QUEUE_SPLIT_1)
+ ksz_cfg(dev, REG_SW_CTRL_19, SW_OUT_RATE_LIMIT_QUEUE_BASED,
+ true);
+}
+
+static void ksz8795_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
+ u64 *cnt)
+{
+ u16 ctrl_addr;
+ u32 data;
+ u8 check;
+ int loop;
+
+ ctrl_addr = addr + SWITCH_COUNTER_NUM * port;
+ ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr);
+
+ /* It is almost guaranteed to always read the valid bit because of
+ * slow SPI speed.
+ */
+ for (loop = 2; loop > 0; loop--) {
+ ksz_read8(dev, REG_IND_MIB_CHECK, &check);
+
+ if (check & MIB_COUNTER_VALID) {
+ ksz_read32(dev, REG_IND_DATA_LO, &data);
+ if (check & MIB_COUNTER_OVERFLOW)
+ *cnt += MIB_COUNTER_VALUE + 1;
+ *cnt += data & MIB_COUNTER_VALUE;
+ break;
+ }
+ }
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
+{
+ u16 ctrl_addr;
+ u32 data;
+ u8 check;
+ int loop;
+
+ addr -= SWITCH_COUNTER_NUM;
+ ctrl_addr = (KS_MIB_TOTAL_RX_1 - KS_MIB_TOTAL_RX_0) * port;
+ ctrl_addr += addr + KS_MIB_TOTAL_RX_0;
+ ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr);
+
+ /* It is almost guaranteed to always read the valid bit because of
+ * slow SPI speed.
+ */
+ for (loop = 2; loop > 0; loop--) {
+ ksz_read8(dev, REG_IND_MIB_CHECK, &check);
+
+ if (check & MIB_COUNTER_VALID) {
+ ksz_read32(dev, REG_IND_DATA_LO, &data);
+ if (addr < 2) {
+ u64 total;
+
+ total = check & MIB_TOTAL_BYTES_H;
+ total <<= 32;
+ *cnt += total;
+ *cnt += data;
+ if (check & MIB_COUNTER_OVERFLOW) {
+ total = MIB_TOTAL_BYTES_H + 1;
+ total <<= 32;
+ *cnt += total;
+ }
+ } else {
+ if (check & MIB_COUNTER_OVERFLOW)
+ *cnt += MIB_PACKET_DROPPED + 1;
+ *cnt += data & MIB_PACKET_DROPPED;
+ }
+ break;
+ }
+ }
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static void ksz8795_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+{
+ /* enable the port for flush/freeze function */
+ if (freeze)
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
+ ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FREEZE, freeze);
+
+ /* disable the port after freeze is done */
+ if (!freeze)
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
+}
+
+static void ksz8795_port_init_cnt(struct ksz_device *dev, int port)
+{
+ struct ksz_port_mib *mib = &dev->ports[port].mib;
+
+ /* flush all enabled port MIB counters */
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
+ ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true);
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
+
+ mib->cnt_ptr = 0;
+
+ /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
+ while (mib->cnt_ptr < dev->reg_mib_cnt) {
+ dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
+ &mib->counters[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+
+ /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
+ while (mib->cnt_ptr < dev->mib_cnt) {
+ dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
+ NULL, &mib->counters[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+ mib->cnt_ptr = 0;
+ memset(mib->counters, 0, dev->mib_cnt * sizeof(u64));
+}
+
+static void ksz8795_r_table(struct ksz_device *dev, int table, u16 addr,
+ u64 *data)
+{
+ u16 ctrl_addr;
+
+ ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr);
+ ksz_read64(dev, REG_IND_DATA_HI, data);
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static void ksz8795_w_table(struct ksz_device *dev, int table, u16 addr,
+ u64 data)
+{
+ u16 ctrl_addr;
+
+ ctrl_addr = IND_ACC_TABLE(table) | addr;
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write64(dev, REG_IND_DATA_HI, data);
+ ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr);
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static int ksz8795_valid_dyn_entry(struct ksz_device *dev, u8 *data)
+{
+ int timeout = 100;
+
+ do {
+ ksz_read8(dev, REG_IND_DATA_CHECK, data);
+ timeout--;
+ } while ((*data & DYNAMIC_MAC_TABLE_NOT_READY) && timeout);
+
+ /* Entry is not ready for accessing. */
+ if (*data & DYNAMIC_MAC_TABLE_NOT_READY) {
+ return -EAGAIN;
+ /* Entry is ready for accessing. */
+ } else {
+ ksz_read8(dev, REG_IND_DATA_8, data);
+
+ /* There is no valid entry in the table. */
+ if (*data & DYNAMIC_MAC_TABLE_MAC_EMPTY)
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static int ksz8795_r_dyn_mac_table(struct ksz_device *dev, u16 addr,
+ u8 *mac_addr, u8 *fid, u8 *src_port,
+ u8 *timestamp, u16 *entries)
+{
+ u32 data_hi, data_lo;
+ u16 ctrl_addr;
+ u8 data;
+ int rc;
+
+ ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr;
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr);
+
+ rc = ksz8795_valid_dyn_entry(dev, &data);
+ if (rc == -EAGAIN) {
+ if (addr == 0)
+ *entries = 0;
+ } else if (rc == -ENXIO) {
+ *entries = 0;
+ /* At least one valid entry in the table. */
+ } else {
+ u64 buf = 0;
+ int cnt;
+
+ ksz_read64(dev, REG_IND_DATA_HI, &buf);
+ data_hi = (u32)(buf >> 32);
+ data_lo = (u32)buf;
+
+ /* Check out how many valid entry in the table. */
+ cnt = data & DYNAMIC_MAC_TABLE_ENTRIES_H;
+ cnt <<= DYNAMIC_MAC_ENTRIES_H_S;
+ cnt |= (data_hi & DYNAMIC_MAC_TABLE_ENTRIES) >>
+ DYNAMIC_MAC_ENTRIES_S;
+ *entries = cnt + 1;
+
+ *fid = (data_hi & DYNAMIC_MAC_TABLE_FID) >>
+ DYNAMIC_MAC_FID_S;
+ *src_port = (data_hi & DYNAMIC_MAC_TABLE_SRC_PORT) >>
+ DYNAMIC_MAC_SRC_PORT_S;
+ *timestamp = (data_hi & DYNAMIC_MAC_TABLE_TIMESTAMP) >>
+ DYNAMIC_MAC_TIMESTAMP_S;
+
+ mac_addr[5] = (u8)data_lo;
+ mac_addr[4] = (u8)(data_lo >> 8);
+ mac_addr[3] = (u8)(data_lo >> 16);
+ mac_addr[2] = (u8)(data_lo >> 24);
+
+ mac_addr[1] = (u8)data_hi;
+ mac_addr[0] = (u8)(data_hi >> 8);
+ rc = 0;
+ }
+ mutex_unlock(&dev->alu_mutex);
+
+ return rc;
+}
+
+static int ksz8795_r_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
+{
+ u32 data_hi, data_lo;
+ u64 data;
+
+ ksz8795_r_table(dev, TABLE_STATIC_MAC, addr, &data);
+ data_hi = data >> 32;
+ data_lo = (u32)data;
+ if (data_hi & (STATIC_MAC_TABLE_VALID | STATIC_MAC_TABLE_OVERRIDE)) {
+ alu->mac[5] = (u8)data_lo;
+ alu->mac[4] = (u8)(data_lo >> 8);
+ alu->mac[3] = (u8)(data_lo >> 16);
+ alu->mac[2] = (u8)(data_lo >> 24);
+ alu->mac[1] = (u8)data_hi;
+ alu->mac[0] = (u8)(data_hi >> 8);
+ alu->port_forward = (data_hi & STATIC_MAC_TABLE_FWD_PORTS) >>
+ STATIC_MAC_FWD_PORTS_S;
+ alu->is_override =
+ (data_hi & STATIC_MAC_TABLE_OVERRIDE) ? 1 : 0;
+ data_hi >>= 1;
+ alu->is_use_fid = (data_hi & STATIC_MAC_TABLE_USE_FID) ? 1 : 0;
+ alu->fid = (data_hi & STATIC_MAC_TABLE_FID) >>
+ STATIC_MAC_FID_S;
+ return 0;
+ }
+ return -ENXIO;
+}
+
+static void ksz8795_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
+{
+ u32 data_hi, data_lo;
+ u64 data;
+
+ data_lo = ((u32)alu->mac[2] << 24) |
+ ((u32)alu->mac[3] << 16) |
+ ((u32)alu->mac[4] << 8) | alu->mac[5];
+ data_hi = ((u32)alu->mac[0] << 8) | alu->mac[1];
+ data_hi |= (u32)alu->port_forward << STATIC_MAC_FWD_PORTS_S;
+
+ if (alu->is_override)
+ data_hi |= STATIC_MAC_TABLE_OVERRIDE;
+ if (alu->is_use_fid) {
+ data_hi |= STATIC_MAC_TABLE_USE_FID;
+ data_hi |= (u32)alu->fid << STATIC_MAC_FID_S;
+ }
+ if (alu->is_static)
+ data_hi |= STATIC_MAC_TABLE_VALID;
+ else
+ data_hi &= ~STATIC_MAC_TABLE_OVERRIDE;
+
+ data = (u64)data_hi << 32 | data_lo;
+ ksz8795_w_table(dev, TABLE_STATIC_MAC, addr, data);
+}
+
+static void ksz8795_from_vlan(u16 vlan, u8 *fid, u8 *member, u8 *valid)
+{
+ *fid = vlan & VLAN_TABLE_FID;
+ *member = (vlan & VLAN_TABLE_MEMBERSHIP) >> VLAN_TABLE_MEMBERSHIP_S;
+ *valid = !!(vlan & VLAN_TABLE_VALID);
+}
+
+static void ksz8795_to_vlan(u8 fid, u8 member, u8 valid, u16 *vlan)
+{
+ *vlan = fid;
+ *vlan |= (u16)member << VLAN_TABLE_MEMBERSHIP_S;
+ if (valid)
+ *vlan |= VLAN_TABLE_VALID;
+}
+
+static void ksz8795_r_vlan_entries(struct ksz_device *dev, u16 addr)
+{
+ u64 data;
+ int i;
+
+ ksz8795_r_table(dev, TABLE_VLAN, addr, &data);
+ addr *= 4;
+ for (i = 0; i < 4; i++) {
+ dev->vlan_cache[addr + i].table[0] = (u16)data;
+ data >>= VLAN_TABLE_S;
+ }
+}
+
+static void ksz8795_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
+{
+ int index;
+ u16 *data;
+ u16 addr;
+ u64 buf;
+
+ data = (u16 *)&buf;
+ addr = vid / 4;
+ index = vid & 3;
+ ksz8795_r_table(dev, TABLE_VLAN, addr, &buf);
+ *vlan = data[index];
+}
+
+static void ksz8795_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
+{
+ int index;
+ u16 *data;
+ u16 addr;
+ u64 buf;
+
+ data = (u16 *)&buf;
+ addr = vid / 4;
+ index = vid & 3;
+ ksz8795_r_table(dev, TABLE_VLAN, addr, &buf);
+ data[index] = vlan;
+ dev->vlan_cache[vid].table[0] = vlan;
+ ksz8795_w_table(dev, TABLE_VLAN, addr, buf);
+}
+
+static void ksz8795_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+{
+ u8 restart, speed, ctrl, link;
+ int processed = true;
+ u16 data = 0;
+ u8 p = phy;
+
+ switch (reg) {
+ case PHY_REG_CTRL:
+ ksz_pread8(dev, p, P_NEG_RESTART_CTRL, &restart);
+ ksz_pread8(dev, p, P_SPEED_STATUS, &speed);
+ ksz_pread8(dev, p, P_FORCE_CTRL, &ctrl);
+ if (restart & PORT_PHY_LOOPBACK)
+ data |= PHY_LOOPBACK;
+ if (ctrl & PORT_FORCE_100_MBIT)
+ data |= PHY_SPEED_100MBIT;
+ if (!(ctrl & PORT_AUTO_NEG_DISABLE))
+ data |= PHY_AUTO_NEG_ENABLE;
+ if (restart & PORT_POWER_DOWN)
+ data |= PHY_POWER_DOWN;
+ if (restart & PORT_AUTO_NEG_RESTART)
+ data |= PHY_AUTO_NEG_RESTART;
+ if (ctrl & PORT_FORCE_FULL_DUPLEX)
+ data |= PHY_FULL_DUPLEX;
+ if (speed & PORT_HP_MDIX)
+ data |= PHY_HP_MDIX;
+ if (restart & PORT_FORCE_MDIX)
+ data |= PHY_FORCE_MDIX;
+ if (restart & PORT_AUTO_MDIX_DISABLE)
+ data |= PHY_AUTO_MDIX_DISABLE;
+ if (restart & PORT_TX_DISABLE)
+ data |= PHY_TRANSMIT_DISABLE;
+ if (restart & PORT_LED_OFF)
+ data |= PHY_LED_DISABLE;
+ break;
+ case PHY_REG_STATUS:
+ ksz_pread8(dev, p, P_LINK_STATUS, &link);
+ data = PHY_100BTX_FD_CAPABLE |
+ PHY_100BTX_CAPABLE |
+ PHY_10BT_FD_CAPABLE |
+ PHY_10BT_CAPABLE |
+ PHY_AUTO_NEG_CAPABLE;
+ if (link & PORT_AUTO_NEG_COMPLETE)
+ data |= PHY_AUTO_NEG_ACKNOWLEDGE;
+ if (link & PORT_STAT_LINK_GOOD)
+ data |= PHY_LINK_STATUS;
+ break;
+ case PHY_REG_ID_1:
+ data = KSZ8795_ID_HI;
+ break;
+ case PHY_REG_ID_2:
+ data = KSZ8795_ID_LO;
+ break;
+ case PHY_REG_AUTO_NEGOTIATION:
+ ksz_pread8(dev, p, P_LOCAL_CTRL, &ctrl);
+ data = PHY_AUTO_NEG_802_3;
+ if (ctrl & PORT_AUTO_NEG_SYM_PAUSE)
+ data |= PHY_AUTO_NEG_SYM_PAUSE;
+ if (ctrl & PORT_AUTO_NEG_100BTX_FD)
+ data |= PHY_AUTO_NEG_100BTX_FD;
+ if (ctrl & PORT_AUTO_NEG_100BTX)
+ data |= PHY_AUTO_NEG_100BTX;
+ if (ctrl & PORT_AUTO_NEG_10BT_FD)
+ data |= PHY_AUTO_NEG_10BT_FD;
+ if (ctrl & PORT_AUTO_NEG_10BT)
+ data |= PHY_AUTO_NEG_10BT;
+ break;
+ case PHY_REG_REMOTE_CAPABILITY:
+ ksz_pread8(dev, p, P_REMOTE_STATUS, &link);
+ data = PHY_AUTO_NEG_802_3;
+ if (link & PORT_REMOTE_SYM_PAUSE)
+ data |= PHY_AUTO_NEG_SYM_PAUSE;
+ if (link & PORT_REMOTE_100BTX_FD)
+ data |= PHY_AUTO_NEG_100BTX_FD;
+ if (link & PORT_REMOTE_100BTX)
+ data |= PHY_AUTO_NEG_100BTX;
+ if (link & PORT_REMOTE_10BT_FD)
+ data |= PHY_AUTO_NEG_10BT_FD;
+ if (link & PORT_REMOTE_10BT)
+ data |= PHY_AUTO_NEG_10BT;
+ if (data & ~PHY_AUTO_NEG_802_3)
+ data |= PHY_REMOTE_ACKNOWLEDGE_NOT;
+ break;
+ default:
+ processed = false;
+ break;
+ }
+ if (processed)
+ *val = data;
+}
+
+static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+{
+ u8 p = phy;
+ u8 restart, speed, ctrl, data;
+
+ switch (reg) {
+ case PHY_REG_CTRL:
+
+ /* Do not support PHY reset function. */
+ if (val & PHY_RESET)
+ break;
+ ksz_pread8(dev, p, P_SPEED_STATUS, &speed);
+ data = speed;
+ if (val & PHY_HP_MDIX)
+ data |= PORT_HP_MDIX;
+ else
+ data &= ~PORT_HP_MDIX;
+ if (data != speed)
+ ksz_pwrite8(dev, p, P_SPEED_STATUS, data);
+ ksz_pread8(dev, p, P_FORCE_CTRL, &ctrl);
+ data = ctrl;
+ if (!(val & PHY_AUTO_NEG_ENABLE))
+ data |= PORT_AUTO_NEG_DISABLE;
+ else
+ data &= ~PORT_AUTO_NEG_DISABLE;
+
+ /* Fiber port does not support auto-negotiation. */
+ if (dev->ports[p].fiber)
+ data |= PORT_AUTO_NEG_DISABLE;
+ if (val & PHY_SPEED_100MBIT)
+ data |= PORT_FORCE_100_MBIT;
+ else
+ data &= ~PORT_FORCE_100_MBIT;
+ if (val & PHY_FULL_DUPLEX)
+ data |= PORT_FORCE_FULL_DUPLEX;
+ else
+ data &= ~PORT_FORCE_FULL_DUPLEX;
+ if (data != ctrl)
+ ksz_pwrite8(dev, p, P_FORCE_CTRL, data);
+ ksz_pread8(dev, p, P_NEG_RESTART_CTRL, &restart);
+ data = restart;
+ if (val & PHY_LED_DISABLE)
+ data |= PORT_LED_OFF;
+ else
+ data &= ~PORT_LED_OFF;
+ if (val & PHY_TRANSMIT_DISABLE)
+ data |= PORT_TX_DISABLE;
+ else
+ data &= ~PORT_TX_DISABLE;
+ if (val & PHY_AUTO_NEG_RESTART)
+ data |= PORT_AUTO_NEG_RESTART;
+ else
+ data &= ~(PORT_AUTO_NEG_RESTART);
+ if (val & PHY_POWER_DOWN)
+ data |= PORT_POWER_DOWN;
+ else
+ data &= ~PORT_POWER_DOWN;
+ if (val & PHY_AUTO_MDIX_DISABLE)
+ data |= PORT_AUTO_MDIX_DISABLE;
+ else
+ data &= ~PORT_AUTO_MDIX_DISABLE;
+ if (val & PHY_FORCE_MDIX)
+ data |= PORT_FORCE_MDIX;
+ else
+ data &= ~PORT_FORCE_MDIX;
+ if (val & PHY_LOOPBACK)
+ data |= PORT_PHY_LOOPBACK;
+ else
+ data &= ~PORT_PHY_LOOPBACK;
+ if (data != restart)
+ ksz_pwrite8(dev, p, P_NEG_RESTART_CTRL, data);
+ break;
+ case PHY_REG_AUTO_NEGOTIATION:
+ ksz_pread8(dev, p, P_LOCAL_CTRL, &ctrl);
+ data = ctrl;
+ data &= ~(PORT_AUTO_NEG_SYM_PAUSE |
+ PORT_AUTO_NEG_100BTX_FD |
+ PORT_AUTO_NEG_100BTX |
+ PORT_AUTO_NEG_10BT_FD |
+ PORT_AUTO_NEG_10BT);
+ if (val & PHY_AUTO_NEG_SYM_PAUSE)
+ data |= PORT_AUTO_NEG_SYM_PAUSE;
+ if (val & PHY_AUTO_NEG_100BTX_FD)
+ data |= PORT_AUTO_NEG_100BTX_FD;
+ if (val & PHY_AUTO_NEG_100BTX)
+ data |= PORT_AUTO_NEG_100BTX;
+ if (val & PHY_AUTO_NEG_10BT_FD)
+ data |= PORT_AUTO_NEG_10BT_FD;
+ if (val & PHY_AUTO_NEG_10BT)
+ data |= PORT_AUTO_NEG_10BT;
+ if (data != ctrl)
+ ksz_pwrite8(dev, p, P_LOCAL_CTRL, data);
+ break;
+ default:
+ break;
+ }
+}
+
+static enum dsa_tag_protocol ksz8795_get_tag_protocol(struct dsa_switch *ds,
+ int port)
+{
+ return DSA_TAG_PROTO_KSZ8795;
+}
+
+static void ksz8795_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf)
+{
+ int i;
+
+ for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
+ memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string,
+ ETH_GSTRING_LEN);
+ }
+}
+
+static void ksz8795_cfg_port_member(struct ksz_device *dev, int port,
+ u8 member)
+{
+ u8 data;
+
+ ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
+ data &= ~PORT_VLAN_MEMBERSHIP;
+ data |= (member & dev->port_mask);
+ ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
+ dev->ports[port].member = member;
+}
+
+static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct ksz_device *dev = ds->priv;
+ int forward = dev->member;
+ struct ksz_port *p;
+ int member = -1;
+ u8 data;
+
+ p = &dev->ports[port];
+
+ ksz_pread8(dev, port, P_STP_CTRL, &data);
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ data |= PORT_LEARN_DISABLE;
+ if (port < SWITCH_PORT_NUM)
+ member = 0;
+ break;
+ case BR_STATE_LISTENING:
+ data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ if (port < SWITCH_PORT_NUM &&
+ p->stp_state == BR_STATE_DISABLED)
+ member = dev->host_mask | p->vid_member;
+ break;
+ case BR_STATE_LEARNING:
+ data |= PORT_RX_ENABLE;
+ break;
+ case BR_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+
+ /* This function is also used internally. */
+ if (port == dev->cpu_port)
+ break;
+
+ /* Port is a member of a bridge. */
+ if (dev->br_member & BIT(port)) {
+ dev->member |= BIT(port);
+ member = dev->member;
+ } else {
+ member = dev->host_mask | p->vid_member;
+ }
+ break;
+ case BR_STATE_BLOCKING:
+ data |= PORT_LEARN_DISABLE;
+ if (port < SWITCH_PORT_NUM &&
+ p->stp_state == BR_STATE_DISABLED)
+ member = dev->host_mask | p->vid_member;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ ksz_pwrite8(dev, port, P_STP_CTRL, data);
+ p->stp_state = state;
+ if (data & PORT_RX_ENABLE)
+ dev->rx_ports |= BIT(port);
+ else
+ dev->rx_ports &= ~BIT(port);
+ if (data & PORT_TX_ENABLE)
+ dev->tx_ports |= BIT(port);
+ else
+ dev->tx_ports &= ~BIT(port);
+
+ /* Port membership may share register with STP state. */
+ if (member >= 0 && member != p->member)
+ ksz8795_cfg_port_member(dev, port, (u8)member);
+
+ /* Check if forwarding needs to be updated. */
+ if (state != BR_STATE_FORWARDING) {
+ if (dev->br_member & BIT(port))
+ dev->member &= ~BIT(port);
+ }
+
+ /* When topology has changed the function ksz_update_port_member
+ * should be called to modify port forwarding behavior.
+ */
+ if (forward != dev->member)
+ ksz_update_port_member(dev, port);
+}
+
+static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port)
+{
+ u8 learn[TOTAL_PORT_NUM];
+ int first, index, cnt;
+ struct ksz_port *p;
+
+ if ((uint)port < TOTAL_PORT_NUM) {
+ first = port;
+ cnt = port + 1;
+ } else {
+ /* Flush all ports. */
+ first = 0;
+ cnt = dev->mib_port_cnt;
+ }
+ for (index = first; index < cnt; index++) {
+ p = &dev->ports[index];
+ if (!p->on)
+ continue;
+ ksz_pread8(dev, index, P_STP_CTRL, &learn[index]);
+ if (!(learn[index] & PORT_LEARN_DISABLE))
+ ksz_pwrite8(dev, index, P_STP_CTRL,
+ learn[index] | PORT_LEARN_DISABLE);
+ }
+ ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
+ for (index = first; index < cnt; index++) {
+ p = &dev->ports[index];
+ if (!p->on)
+ continue;
+ if (!(learn[index] & PORT_LEARN_DISABLE))
+ ksz_pwrite8(dev, index, P_STP_CTRL, learn[index]);
+ }
+}
+
+static int ksz8795_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool flag)
+{
+ struct ksz_device *dev = ds->priv;
+
+ ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
+
+ return 0;
+}
+
+static void ksz8795_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct ksz_device *dev = ds->priv;
+ u16 data, vid, new_pvid = 0;
+ u8 fid, member, valid;
+
+ ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ ksz8795_r_vlan_table(dev, vid, &data);
+ ksz8795_from_vlan(data, &fid, &member, &valid);
+
+ /* First time to setup the VLAN entry. */
+ if (!valid) {
+ /* Need to find a way to map VID to FID. */
+ fid = 1;
+ valid = 1;
+ }
+ member |= BIT(port);
+
+ ksz8795_to_vlan(fid, member, valid, &data);
+ ksz8795_w_vlan_table(dev, vid, data);
+
+ /* change PVID */
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ new_pvid = vid;
+ }
+
+ if (new_pvid) {
+ ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
+ vid &= 0xfff;
+ vid |= new_pvid;
+ ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
+ }
+}
+
+static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct ksz_device *dev = ds->priv;
+ u16 data, vid, pvid, new_pvid = 0;
+ u8 fid, member, valid;
+
+ ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
+ pvid = pvid & 0xFFF;
+
+ ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ ksz8795_r_vlan_table(dev, vid, &data);
+ ksz8795_from_vlan(data, &fid, &member, &valid);
+
+ member &= ~BIT(port);
+
+ /* Invalidate the entry if no more member. */
+ if (!member) {
+ fid = 0;
+ valid = 0;
+ }
+
+ if (pvid == vid)
+ new_pvid = 1;
+
+ ksz8795_to_vlan(fid, member, valid, &data);
+ ksz8795_w_vlan_table(dev, vid, data);
+ }
+
+ if (new_pvid != pvid)
+ ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid);
+
+ return 0;
+}
+
+static int ksz8795_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (ingress) {
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
+ dev->mirror_rx |= BIT(port);
+ } else {
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
+ dev->mirror_tx |= BIT(port);
+ }
+
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
+
+ /* configure mirror port */
+ if (dev->mirror_rx || dev->mirror_tx)
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, true);
+
+ return 0;
+}
+
+static void ksz8795_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ksz_device *dev = ds->priv;
+ u8 data;
+
+ if (mirror->ingress) {
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
+ dev->mirror_rx &= ~BIT(port);
+ } else {
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
+ dev->mirror_tx &= ~BIT(port);
+ }
+
+ ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
+
+ if (!dev->mirror_rx && !dev->mirror_tx)
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, false);
+}
+
+static void ksz8795_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+{
+ struct ksz_port *p = &dev->ports[port];
+ u8 data8, member;
+
+ /* enable broadcast storm limit */
+ ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
+
+ ksz8795_set_prio_queue(dev, port, 4);
+
+ /* disable DiffServ priority */
+ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_ENABLE, false);
+
+ /* replace priority */
+ ksz_port_cfg(dev, port, P_802_1P_CTRL, PORT_802_1P_REMAPPING, false);
+
+ /* enable 802.1p priority */
+ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true);
+
+ if (cpu_port) {
+ /* Configure MII interface for proper network communication. */
+ ksz_read8(dev, REG_PORT_5_CTRL_6, &data8);
+ data8 &= ~PORT_INTERFACE_TYPE;
+ data8 &= ~PORT_GMII_1GPS_MODE;
+ switch (dev->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ p->phydev.speed = SPEED_100;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ data8 |= PORT_INTERFACE_RMII;
+ p->phydev.speed = SPEED_100;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ data8 |= PORT_GMII_1GPS_MODE;
+ data8 |= PORT_INTERFACE_GMII;
+ p->phydev.speed = SPEED_1000;
+ break;
+ default:
+ data8 &= ~PORT_RGMII_ID_IN_ENABLE;
+ data8 &= ~PORT_RGMII_ID_OUT_ENABLE;
+ if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ dev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ data8 |= PORT_RGMII_ID_IN_ENABLE;
+ if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ dev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ data8 |= PORT_RGMII_ID_OUT_ENABLE;
+ data8 |= PORT_GMII_1GPS_MODE;
+ data8 |= PORT_INTERFACE_RGMII;
+ p->phydev.speed = SPEED_1000;
+ break;
+ }
+ ksz_write8(dev, REG_PORT_5_CTRL_6, data8);
+ p->phydev.duplex = 1;
+
+ member = dev->port_mask;
+ dev->on_ports = dev->host_mask;
+ dev->live_ports = dev->host_mask;
+ } else {
+ member = dev->host_mask | p->vid_member;
+ dev->on_ports |= BIT(port);
+
+ /* Link was detected before port is enabled. */
+ if (p->phydev.link)
+ dev->live_ports |= BIT(port);
+ }
+ ksz8795_cfg_port_member(dev, port, member);
+}
+
+static void ksz8795_config_cpu_port(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
+ u8 remote;
+ int i;
+
+ ds->num_ports = dev->port_cnt + 1;
+
+ /* Switch marks the maximum frame with extra byte as oversize. */
+ ksz_cfg(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, true);
+ ksz_cfg(dev, S_TAIL_TAG_CTRL, SW_TAIL_TAG_ENABLE, true);
+
+ p = &dev->ports[dev->cpu_port];
+ p->vid_member = dev->port_mask;
+ p->on = 1;
+
+ ksz8795_port_setup(dev, dev->cpu_port, true);
+ dev->member = dev->host_mask;
+
+ for (i = 0; i < SWITCH_PORT_NUM; i++) {
+ p = &dev->ports[i];
+
+ /* Initialize to non-zero so that ksz_cfg_port_member() will
+ * be called.
+ */
+ p->vid_member = BIT(i);
+ p->member = dev->port_mask;
+ ksz8795_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+
+ /* Last port may be disabled. */
+ if (i == dev->port_cnt)
+ break;
+ p->on = 1;
+ p->phy = 1;
+ }
+ for (i = 0; i < dev->phy_port_cnt; i++) {
+ p = &dev->ports[i];
+ if (!p->on)
+ continue;
+ ksz_pread8(dev, i, P_REMOTE_STATUS, &remote);
+ if (remote & PORT_FIBER_MODE)
+ p->fiber = 1;
+ if (p->fiber)
+ ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL,
+ true);
+ else
+ ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL,
+ false);
+ }
+}
+
+static int ksz8795_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct alu_struct alu;
+ int i, ret = 0;
+
+ dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
+ dev->num_vlans, GFP_KERNEL);
+ if (!dev->vlan_cache)
+ return -ENOMEM;
+
+ ret = ksz8795_reset_switch(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+ return ret;
+ }
+
+ ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_FLOW_CTRL, true);
+
+ /* Enable automatic fast aging when link changed detected. */
+ ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true);
+
+ /* Enable aggressive back off algorithm in half duplex mode. */
+ regmap_update_bits(dev->regmap[0], REG_SW_CTRL_1,
+ SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
+
+ /*
+ * Make sure unicast VLAN boundary is set as default and
+ * enable no excessive collision drop.
+ */
+ regmap_update_bits(dev->regmap[0], REG_SW_CTRL_2,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
+
+ ksz8795_config_cpu_port(ds);
+
+ ksz_cfg(dev, REG_SW_CTRL_2, MULTICAST_STORM_DISABLE, true);
+
+ ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
+
+ ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
+
+ /* set broadcast storm protection 10% rate */
+ regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL,
+ BROADCAST_STORM_RATE,
+ (BROADCAST_STORM_VALUE *
+ BROADCAST_STORM_PROT_RATE) / 100);
+
+ for (i = 0; i < VLAN_TABLE_ENTRIES; i++)
+ ksz8795_r_vlan_entries(dev, i);
+
+ /* Setup STP address for STP operation. */
+ memset(&alu, 0, sizeof(alu));
+ ether_addr_copy(alu.mac, eth_stp_addr);
+ alu.is_static = true;
+ alu.is_override = true;
+ alu.port_forward = dev->host_mask;
+
+ ksz8795_w_sta_mac_table(dev, 0, &alu);
+
+ ksz_init_mib_timer(dev);
+
+ return 0;
+}
+
+static const struct dsa_switch_ops ksz8795_switch_ops = {
+ .get_tag_protocol = ksz8795_get_tag_protocol,
+ .setup = ksz8795_setup,
+ .phy_read = ksz_phy_read16,
+ .phy_write = ksz_phy_write16,
+ .adjust_link = ksz_adjust_link,
+ .port_enable = ksz_enable_port,
+ .port_disable = ksz_disable_port,
+ .get_strings = ksz8795_get_strings,
+ .get_ethtool_stats = ksz_get_ethtool_stats,
+ .get_sset_count = ksz_sset_count,
+ .port_bridge_join = ksz_port_bridge_join,
+ .port_bridge_leave = ksz_port_bridge_leave,
+ .port_stp_state_set = ksz8795_port_stp_state_set,
+ .port_fast_age = ksz_port_fast_age,
+ .port_vlan_filtering = ksz8795_port_vlan_filtering,
+ .port_vlan_prepare = ksz_port_vlan_prepare,
+ .port_vlan_add = ksz8795_port_vlan_add,
+ .port_vlan_del = ksz8795_port_vlan_del,
+ .port_fdb_dump = ksz_port_fdb_dump,
+ .port_mdb_prepare = ksz_port_mdb_prepare,
+ .port_mdb_add = ksz_port_mdb_add,
+ .port_mdb_del = ksz_port_mdb_del,
+ .port_mirror_add = ksz8795_port_mirror_add,
+ .port_mirror_del = ksz8795_port_mirror_del,
+};
+
+static u32 ksz8795_get_port_addr(int port, int offset)
+{
+ return PORT_CTRL_ADDR(port, offset);
+}
+
+static int ksz8795_switch_detect(struct ksz_device *dev)
+{
+ u8 id1, id2;
+ u16 id16;
+ int ret;
+
+ /* read chip id */
+ ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
+ if (ret)
+ return ret;
+
+ id1 = id16 >> 8;
+ id2 = id16 & SW_CHIP_ID_M;
+ if (id1 != FAMILY_ID ||
+ (id2 != CHIP_ID_94 && id2 != CHIP_ID_95))
+ return -ENODEV;
+
+ dev->mib_port_cnt = TOTAL_PORT_NUM;
+ dev->phy_port_cnt = SWITCH_PORT_NUM;
+ dev->port_cnt = SWITCH_PORT_NUM;
+
+ if (id2 == CHIP_ID_95) {
+ u8 val;
+
+ id2 = 0x95;
+ ksz_read8(dev, REG_PORT_1_STATUS_0, &val);
+ if (val & PORT_FIBER_MODE)
+ id2 = 0x65;
+ } else if (id2 == CHIP_ID_94) {
+ dev->port_cnt--;
+ dev->last_port = dev->port_cnt;
+ id2 = 0x94;
+ }
+ id16 &= ~0xff;
+ id16 |= id2;
+ dev->chip_id = id16;
+
+ dev->cpu_port = dev->mib_port_cnt - 1;
+ dev->host_mask = BIT(dev->cpu_port);
+
+ return 0;
+}
+
+struct ksz_chip_data {
+ u16 chip_id;
+ const char *dev_name;
+ int num_vlans;
+ int num_alus;
+ int num_statics;
+ int cpu_ports;
+ int port_cnt;
+};
+
+static const struct ksz_chip_data ksz8795_switch_chips[] = {
+ {
+ .chip_id = 0x8795,
+ .dev_name = "KSZ8795",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 4, /* total physical port count */
+ },
+ {
+ .chip_id = 0x8794,
+ .dev_name = "KSZ8794",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 3, /* total physical port count */
+ },
+ {
+ .chip_id = 0x8765,
+ .dev_name = "KSZ8765",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 4, /* total physical port count */
+ },
+};
+
+static int ksz8795_switch_init(struct ksz_device *dev)
+{
+ int i;
+
+ mutex_init(&dev->stats_mutex);
+ mutex_init(&dev->alu_mutex);
+ mutex_init(&dev->vlan_mutex);
+
+ dev->ds->ops = &ksz8795_switch_ops;
+
+ for (i = 0; i < ARRAY_SIZE(ksz8795_switch_chips); i++) {
+ const struct ksz_chip_data *chip = &ksz8795_switch_chips[i];
+
+ if (dev->chip_id == chip->chip_id) {
+ dev->name = chip->dev_name;
+ dev->num_vlans = chip->num_vlans;
+ dev->num_alus = chip->num_alus;
+ dev->num_statics = chip->num_statics;
+ dev->port_cnt = chip->port_cnt;
+ dev->cpu_ports = chip->cpu_ports;
+
+ break;
+ }
+ }
+
+ /* no switch found */
+ if (!dev->cpu_ports)
+ return -ENODEV;
+
+ dev->port_mask = BIT(dev->port_cnt) - 1;
+ dev->port_mask |= dev->host_mask;
+
+ dev->reg_mib_cnt = SWITCH_COUNTER_NUM;
+ dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM;
+
+ i = dev->mib_port_cnt;
+ dev->ports = devm_kzalloc(dev->dev, sizeof(struct ksz_port) * i,
+ GFP_KERNEL);
+ if (!dev->ports)
+ return -ENOMEM;
+ for (i = 0; i < dev->mib_port_cnt; i++) {
+ mutex_init(&dev->ports[i].mib.cnt_mutex);
+ dev->ports[i].mib.counters =
+ devm_kzalloc(dev->dev,
+ sizeof(u64) *
+ (TOTAL_SWITCH_COUNTER_NUM + 1),
+ GFP_KERNEL);
+ if (!dev->ports[i].mib.counters)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ksz8795_switch_exit(struct ksz_device *dev)
+{
+ ksz8795_reset_switch(dev);
+}
+
+static const struct ksz_dev_ops ksz8795_dev_ops = {
+ .get_port_addr = ksz8795_get_port_addr,
+ .cfg_port_member = ksz8795_cfg_port_member,
+ .flush_dyn_mac_table = ksz8795_flush_dyn_mac_table,
+ .port_setup = ksz8795_port_setup,
+ .r_phy = ksz8795_r_phy,
+ .w_phy = ksz8795_w_phy,
+ .r_dyn_mac_table = ksz8795_r_dyn_mac_table,
+ .r_sta_mac_table = ksz8795_r_sta_mac_table,
+ .w_sta_mac_table = ksz8795_w_sta_mac_table,
+ .r_mib_cnt = ksz8795_r_mib_cnt,
+ .r_mib_pkt = ksz8795_r_mib_pkt,
+ .freeze_mib = ksz8795_freeze_mib,
+ .port_init_cnt = ksz8795_port_init_cnt,
+ .shutdown = ksz8795_reset_switch,
+ .detect = ksz8795_switch_detect,
+ .init = ksz8795_switch_init,
+ .exit = ksz8795_switch_exit,
+};
+
+int ksz8795_switch_register(struct ksz_device *dev)
+{
+ return ksz_switch_register(dev, &ksz8795_dev_ops);
+}
+EXPORT_SYMBOL(ksz8795_switch_register);
+
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
new file mode 100644
index 000000000000..3a50462df8fa
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -0,0 +1,1004 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Microchip KSZ8795 register definitions
+ *
+ * Copyright (c) 2017 Microchip Technology Inc.
+ * Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#ifndef __KSZ8795_REG_H
+#define __KSZ8795_REG_H
+
+#define KS_PORT_M 0x1F
+
+#define KS_PRIO_M 0x3
+#define KS_PRIO_S 2
+
+#define REG_CHIP_ID0 0x00
+
+#define FAMILY_ID 0x87
+
+#define REG_CHIP_ID1 0x01
+
+#define SW_CHIP_ID_M 0xF0
+#define SW_CHIP_ID_S 4
+#define SW_REVISION_M 0x0E
+#define SW_REVISION_S 1
+#define SW_START 0x01
+
+#define CHIP_ID_94 0x60
+#define CHIP_ID_95 0x90
+
+#define REG_SW_CTRL_0 0x02
+
+#define SW_NEW_BACKOFF BIT(7)
+#define SW_GLOBAL_RESET BIT(6)
+#define SW_FLUSH_DYN_MAC_TABLE BIT(5)
+#define SW_FLUSH_STA_MAC_TABLE BIT(4)
+#define SW_LINK_AUTO_AGING BIT(0)
+
+#define REG_SW_CTRL_1 0x03
+
+#define SW_HUGE_PACKET BIT(6)
+#define SW_TX_FLOW_CTRL_DISABLE BIT(5)
+#define SW_RX_FLOW_CTRL_DISABLE BIT(4)
+#define SW_CHECK_LENGTH BIT(3)
+#define SW_AGING_ENABLE BIT(2)
+#define SW_FAST_AGING BIT(1)
+#define SW_AGGR_BACKOFF BIT(0)
+
+#define REG_SW_CTRL_2 0x04
+
+#define UNICAST_VLAN_BOUNDARY BIT(7)
+#define MULTICAST_STORM_DISABLE BIT(6)
+#define SW_BACK_PRESSURE BIT(5)
+#define FAIR_FLOW_CTRL BIT(4)
+#define NO_EXC_COLLISION_DROP BIT(3)
+#define SW_LEGAL_PACKET_DISABLE BIT(1)
+
+#define REG_SW_CTRL_3 0x05
+ #define WEIGHTED_FAIR_QUEUE_ENABLE BIT(3)
+
+#define SW_VLAN_ENABLE BIT(7)
+#define SW_IGMP_SNOOP BIT(6)
+#define SW_MIRROR_RX_TX BIT(0)
+
+#define REG_SW_CTRL_4 0x06
+
+#define SW_HALF_DUPLEX_FLOW_CTRL BIT(7)
+#define SW_HALF_DUPLEX BIT(6)
+#define SW_FLOW_CTRL BIT(5)
+#define SW_10_MBIT BIT(4)
+#define SW_REPLACE_VID BIT(3)
+#define BROADCAST_STORM_RATE_HI 0x07
+
+#define REG_SW_CTRL_5 0x07
+
+#define BROADCAST_STORM_RATE_LO 0xFF
+#define BROADCAST_STORM_RATE 0x07FF
+
+#define REG_SW_CTRL_6 0x08
+
+#define SW_MIB_COUNTER_FLUSH BIT(7)
+#define SW_MIB_COUNTER_FREEZE BIT(6)
+#define SW_MIB_COUNTER_CTRL_ENABLE KS_PORT_M
+
+#define REG_SW_CTRL_9 0x0B
+
+#define SPI_CLK_125_MHZ 0x80
+#define SPI_CLK_62_5_MHZ 0x40
+#define SPI_CLK_31_25_MHZ 0x00
+
+#define SW_LED_MODE_M 0x3
+#define SW_LED_MODE_S 4
+#define SW_LED_LINK_ACT_SPEED 0
+#define SW_LED_LINK_ACT 1
+#define SW_LED_LINK_ACT_DUPLEX 2
+#define SW_LED_LINK_DUPLEX 3
+
+#define REG_SW_CTRL_10 0x0C
+
+#define SW_TAIL_TAG_ENABLE BIT(1)
+#define SW_PASS_PAUSE BIT(0)
+
+#define REG_SW_CTRL_11 0x0D
+
+#define REG_POWER_MANAGEMENT_1 0x0E
+
+#define SW_PLL_POWER_DOWN BIT(5)
+#define SW_POWER_MANAGEMENT_MODE_M 0x3
+#define SW_POWER_MANAGEMENT_MODE_S 3
+#define SW_POWER_NORMAL 0
+#define SW_ENERGY_DETECTION 1
+#define SW_SOFTWARE_POWER_DOWN 2
+
+#define REG_POWER_MANAGEMENT_2 0x0F
+
+#define REG_PORT_1_CTRL_0 0x10
+#define REG_PORT_2_CTRL_0 0x20
+#define REG_PORT_3_CTRL_0 0x30
+#define REG_PORT_4_CTRL_0 0x40
+#define REG_PORT_5_CTRL_0 0x50
+
+#define PORT_BROADCAST_STORM BIT(7)
+#define PORT_DIFFSERV_ENABLE BIT(6)
+#define PORT_802_1P_ENABLE BIT(5)
+#define PORT_BASED_PRIO_S 3
+#define PORT_BASED_PRIO_M KS_PRIO_M
+#define PORT_BASED_PRIO_0 0
+#define PORT_BASED_PRIO_1 1
+#define PORT_BASED_PRIO_2 2
+#define PORT_BASED_PRIO_3 3
+#define PORT_INSERT_TAG BIT(2)
+#define PORT_REMOVE_TAG BIT(1)
+#define PORT_QUEUE_SPLIT_L BIT(0)
+
+#define REG_PORT_1_CTRL_1 0x11
+#define REG_PORT_2_CTRL_1 0x21
+#define REG_PORT_3_CTRL_1 0x31
+#define REG_PORT_4_CTRL_1 0x41
+#define REG_PORT_5_CTRL_1 0x51
+
+#define PORT_MIRROR_SNIFFER BIT(7)
+#define PORT_MIRROR_RX BIT(6)
+#define PORT_MIRROR_TX BIT(5)
+#define PORT_VLAN_MEMBERSHIP KS_PORT_M
+
+#define REG_PORT_1_CTRL_2 0x12
+#define REG_PORT_2_CTRL_2 0x22
+#define REG_PORT_3_CTRL_2 0x32
+#define REG_PORT_4_CTRL_2 0x42
+#define REG_PORT_5_CTRL_2 0x52
+
+#define PORT_802_1P_REMAPPING BIT(7)
+#define PORT_INGRESS_FILTER BIT(6)
+#define PORT_DISCARD_NON_VID BIT(5)
+#define PORT_FORCE_FLOW_CTRL BIT(4)
+#define PORT_BACK_PRESSURE BIT(3)
+#define PORT_TX_ENABLE BIT(2)
+#define PORT_RX_ENABLE BIT(1)
+#define PORT_LEARN_DISABLE BIT(0)
+
+#define REG_PORT_1_CTRL_3 0x13
+#define REG_PORT_2_CTRL_3 0x23
+#define REG_PORT_3_CTRL_3 0x33
+#define REG_PORT_4_CTRL_3 0x43
+#define REG_PORT_5_CTRL_3 0x53
+#define REG_PORT_1_CTRL_4 0x14
+#define REG_PORT_2_CTRL_4 0x24
+#define REG_PORT_3_CTRL_4 0x34
+#define REG_PORT_4_CTRL_4 0x44
+#define REG_PORT_5_CTRL_4 0x54
+
+#define PORT_DEFAULT_VID 0x0001
+
+#define REG_PORT_1_CTRL_5 0x15
+#define REG_PORT_2_CTRL_5 0x25
+#define REG_PORT_3_CTRL_5 0x35
+#define REG_PORT_4_CTRL_5 0x45
+#define REG_PORT_5_CTRL_5 0x55
+
+#define PORT_ACL_ENABLE BIT(2)
+#define PORT_AUTHEN_MODE 0x3
+#define PORT_AUTHEN_PASS 0
+#define PORT_AUTHEN_BLOCK 1
+#define PORT_AUTHEN_TRAP 2
+
+#define REG_PORT_5_CTRL_6 0x56
+
+#define PORT_MII_INTERNAL_CLOCK BIT(7)
+#define PORT_GMII_1GPS_MODE BIT(6)
+#define PORT_RGMII_ID_IN_ENABLE BIT(4)
+#define PORT_RGMII_ID_OUT_ENABLE BIT(3)
+#define PORT_GMII_MAC_MODE BIT(2)
+#define PORT_INTERFACE_TYPE 0x3
+#define PORT_INTERFACE_MII 0
+#define PORT_INTERFACE_RMII 1
+#define PORT_INTERFACE_GMII 2
+#define PORT_INTERFACE_RGMII 3
+
+#define REG_PORT_1_CTRL_7 0x17
+#define REG_PORT_2_CTRL_7 0x27
+#define REG_PORT_3_CTRL_7 0x37
+#define REG_PORT_4_CTRL_7 0x47
+
+#define PORT_AUTO_NEG_ASYM_PAUSE BIT(5)
+#define PORT_AUTO_NEG_SYM_PAUSE BIT(4)
+#define PORT_AUTO_NEG_100BTX_FD BIT(3)
+#define PORT_AUTO_NEG_100BTX BIT(2)
+#define PORT_AUTO_NEG_10BT_FD BIT(1)
+#define PORT_AUTO_NEG_10BT BIT(0)
+
+#define REG_PORT_1_STATUS_0 0x18
+#define REG_PORT_2_STATUS_0 0x28
+#define REG_PORT_3_STATUS_0 0x38
+#define REG_PORT_4_STATUS_0 0x48
+
+/* For KSZ8765. */
+#define PORT_FIBER_MODE BIT(7)
+
+#define PORT_REMOTE_ASYM_PAUSE BIT(5)
+#define PORT_REMOTE_SYM_PAUSE BIT(4)
+#define PORT_REMOTE_100BTX_FD BIT(3)
+#define PORT_REMOTE_100BTX BIT(2)
+#define PORT_REMOTE_10BT_FD BIT(1)
+#define PORT_REMOTE_10BT BIT(0)
+
+#define REG_PORT_1_STATUS_1 0x19
+#define REG_PORT_2_STATUS_1 0x29
+#define REG_PORT_3_STATUS_1 0x39
+#define REG_PORT_4_STATUS_1 0x49
+
+#define PORT_HP_MDIX BIT(7)
+#define PORT_REVERSED_POLARITY BIT(5)
+#define PORT_TX_FLOW_CTRL BIT(4)
+#define PORT_RX_FLOW_CTRL BIT(3)
+#define PORT_STAT_SPEED_100MBIT BIT(2)
+#define PORT_STAT_FULL_DUPLEX BIT(1)
+
+#define PORT_REMOTE_FAULT BIT(0)
+
+#define REG_PORT_1_LINK_MD_CTRL 0x1A
+#define REG_PORT_2_LINK_MD_CTRL 0x2A
+#define REG_PORT_3_LINK_MD_CTRL 0x3A
+#define REG_PORT_4_LINK_MD_CTRL 0x4A
+
+#define PORT_CABLE_10M_SHORT BIT(7)
+#define PORT_CABLE_DIAG_RESULT_M 0x3
+#define PORT_CABLE_DIAG_RESULT_S 5
+#define PORT_CABLE_STAT_NORMAL 0
+#define PORT_CABLE_STAT_OPEN 1
+#define PORT_CABLE_STAT_SHORT 2
+#define PORT_CABLE_STAT_FAILED 3
+#define PORT_START_CABLE_DIAG BIT(4)
+#define PORT_FORCE_LINK BIT(3)
+#define PORT_POWER_SAVING BIT(2)
+#define PORT_PHY_REMOTE_LOOPBACK BIT(1)
+#define PORT_CABLE_FAULT_COUNTER_H 0x01
+
+#define REG_PORT_1_LINK_MD_RESULT 0x1B
+#define REG_PORT_2_LINK_MD_RESULT 0x2B
+#define REG_PORT_3_LINK_MD_RESULT 0x3B
+#define REG_PORT_4_LINK_MD_RESULT 0x4B
+
+#define PORT_CABLE_FAULT_COUNTER_L 0xFF
+#define PORT_CABLE_FAULT_COUNTER 0x1FF
+
+#define REG_PORT_1_CTRL_9 0x1C
+#define REG_PORT_2_CTRL_9 0x2C
+#define REG_PORT_3_CTRL_9 0x3C
+#define REG_PORT_4_CTRL_9 0x4C
+
+#define PORT_AUTO_NEG_DISABLE BIT(7)
+#define PORT_FORCE_100_MBIT BIT(6)
+#define PORT_FORCE_FULL_DUPLEX BIT(5)
+
+#define REG_PORT_1_CTRL_10 0x1D
+#define REG_PORT_2_CTRL_10 0x2D
+#define REG_PORT_3_CTRL_10 0x3D
+#define REG_PORT_4_CTRL_10 0x4D
+
+#define PORT_LED_OFF BIT(7)
+#define PORT_TX_DISABLE BIT(6)
+#define PORT_AUTO_NEG_RESTART BIT(5)
+#define PORT_POWER_DOWN BIT(3)
+#define PORT_AUTO_MDIX_DISABLE BIT(2)
+#define PORT_FORCE_MDIX BIT(1)
+#define PORT_MAC_LOOPBACK BIT(0)
+
+#define REG_PORT_1_STATUS_2 0x1E
+#define REG_PORT_2_STATUS_2 0x2E
+#define REG_PORT_3_STATUS_2 0x3E
+#define REG_PORT_4_STATUS_2 0x4E
+
+#define PORT_MDIX_STATUS BIT(7)
+#define PORT_AUTO_NEG_COMPLETE BIT(6)
+#define PORT_STAT_LINK_GOOD BIT(5)
+
+#define REG_PORT_1_STATUS_3 0x1F
+#define REG_PORT_2_STATUS_3 0x2F
+#define REG_PORT_3_STATUS_3 0x3F
+#define REG_PORT_4_STATUS_3 0x4F
+
+#define PORT_PHY_LOOPBACK BIT(7)
+#define PORT_PHY_ISOLATE BIT(5)
+#define PORT_PHY_SOFT_RESET BIT(4)
+#define PORT_PHY_FORCE_LINK BIT(3)
+#define PORT_PHY_MODE_M 0x7
+#define PHY_MODE_IN_AUTO_NEG 1
+#define PHY_MODE_10BT_HALF 2
+#define PHY_MODE_100BT_HALF 3
+#define PHY_MODE_10BT_FULL 5
+#define PHY_MODE_100BT_FULL 6
+#define PHY_MODE_ISOLDATE 7
+
+#define REG_PORT_CTRL_0 0x00
+#define REG_PORT_CTRL_1 0x01
+#define REG_PORT_CTRL_2 0x02
+#define REG_PORT_CTRL_VID 0x03
+
+#define REG_PORT_CTRL_5 0x05
+
+#define REG_PORT_CTRL_7 0x07
+#define REG_PORT_STATUS_0 0x08
+#define REG_PORT_STATUS_1 0x09
+#define REG_PORT_LINK_MD_CTRL 0x0A
+#define REG_PORT_LINK_MD_RESULT 0x0B
+#define REG_PORT_CTRL_9 0x0C
+#define REG_PORT_CTRL_10 0x0D
+#define REG_PORT_STATUS_2 0x0E
+#define REG_PORT_STATUS_3 0x0F
+
+#define REG_PORT_CTRL_12 0xA0
+#define REG_PORT_CTRL_13 0xA1
+#define REG_PORT_RATE_CTRL_3 0xA2
+#define REG_PORT_RATE_CTRL_2 0xA3
+#define REG_PORT_RATE_CTRL_1 0xA4
+#define REG_PORT_RATE_CTRL_0 0xA5
+#define REG_PORT_RATE_LIMIT 0xA6
+#define REG_PORT_IN_RATE_0 0xA7
+#define REG_PORT_IN_RATE_1 0xA8
+#define REG_PORT_IN_RATE_2 0xA9
+#define REG_PORT_IN_RATE_3 0xAA
+#define REG_PORT_OUT_RATE_0 0xAB
+#define REG_PORT_OUT_RATE_1 0xAC
+#define REG_PORT_OUT_RATE_2 0xAD
+#define REG_PORT_OUT_RATE_3 0xAE
+
+#define PORT_CTRL_ADDR(port, addr) \
+ ((addr) + REG_PORT_1_CTRL_0 + (port) * \
+ (REG_PORT_2_CTRL_0 - REG_PORT_1_CTRL_0))
+
+#define REG_SW_MAC_ADDR_0 0x68
+#define REG_SW_MAC_ADDR_1 0x69
+#define REG_SW_MAC_ADDR_2 0x6A
+#define REG_SW_MAC_ADDR_3 0x6B
+#define REG_SW_MAC_ADDR_4 0x6C
+#define REG_SW_MAC_ADDR_5 0x6D
+
+#define REG_IND_CTRL_0 0x6E
+
+#define TABLE_EXT_SELECT_S 5
+#define TABLE_EEE_V 1
+#define TABLE_ACL_V 2
+#define TABLE_PME_V 4
+#define TABLE_LINK_MD_V 5
+#define TABLE_EEE (TABLE_EEE_V << TABLE_EXT_SELECT_S)
+#define TABLE_ACL (TABLE_ACL_V << TABLE_EXT_SELECT_S)
+#define TABLE_PME (TABLE_PME_V << TABLE_EXT_SELECT_S)
+#define TABLE_LINK_MD (TABLE_LINK_MD << TABLE_EXT_SELECT_S)
+#define TABLE_READ BIT(4)
+#define TABLE_SELECT_S 2
+#define TABLE_STATIC_MAC_V 0
+#define TABLE_VLAN_V 1
+#define TABLE_DYNAMIC_MAC_V 2
+#define TABLE_MIB_V 3
+#define TABLE_STATIC_MAC (TABLE_STATIC_MAC_V << TABLE_SELECT_S)
+#define TABLE_VLAN (TABLE_VLAN_V << TABLE_SELECT_S)
+#define TABLE_DYNAMIC_MAC (TABLE_DYNAMIC_MAC_V << TABLE_SELECT_S)
+#define TABLE_MIB (TABLE_MIB_V << TABLE_SELECT_S)
+
+#define REG_IND_CTRL_1 0x6F
+
+#define TABLE_ENTRY_MASK 0x03FF
+#define TABLE_EXT_ENTRY_MASK 0x0FFF
+
+#define REG_IND_DATA_8 0x70
+#define REG_IND_DATA_7 0x71
+#define REG_IND_DATA_6 0x72
+#define REG_IND_DATA_5 0x73
+#define REG_IND_DATA_4 0x74
+#define REG_IND_DATA_3 0x75
+#define REG_IND_DATA_2 0x76
+#define REG_IND_DATA_1 0x77
+#define REG_IND_DATA_0 0x78
+
+#define REG_IND_DATA_PME_EEE_ACL 0xA0
+
+#define REG_IND_DATA_CHECK REG_IND_DATA_6
+#define REG_IND_MIB_CHECK REG_IND_DATA_4
+#define REG_IND_DATA_HI REG_IND_DATA_7
+#define REG_IND_DATA_LO REG_IND_DATA_3
+
+#define REG_INT_STATUS 0x7C
+#define REG_INT_ENABLE 0x7D
+
+#define INT_PME BIT(4)
+
+#define REG_ACL_INT_STATUS 0x7E
+#define REG_ACL_INT_ENABLE 0x7F
+
+#define INT_PORT_5 BIT(4)
+#define INT_PORT_4 BIT(3)
+#define INT_PORT_3 BIT(2)
+#define INT_PORT_2 BIT(1)
+#define INT_PORT_1 BIT(0)
+
+#define INT_PORT_ALL \
+ (INT_PORT_5 | INT_PORT_4 | INT_PORT_3 | INT_PORT_2 | INT_PORT_1)
+
+#define REG_SW_CTRL_12 0x80
+#define REG_SW_CTRL_13 0x81
+
+#define SWITCH_802_1P_MASK 3
+#define SWITCH_802_1P_BASE 3
+#define SWITCH_802_1P_SHIFT 2
+
+#define SW_802_1P_MAP_M KS_PRIO_M
+#define SW_802_1P_MAP_S KS_PRIO_S
+
+#define REG_SWITCH_CTRL_14 0x82
+
+#define SW_PRIO_MAPPING_M KS_PRIO_M
+#define SW_PRIO_MAPPING_S 6
+#define SW_PRIO_MAP_3_HI 0
+#define SW_PRIO_MAP_2_HI 2
+#define SW_PRIO_MAP_0_LO 3
+
+#define REG_SW_CTRL_15 0x83
+#define REG_SW_CTRL_16 0x84
+#define REG_SW_CTRL_17 0x85
+#define REG_SW_CTRL_18 0x86
+
+#define SW_SELF_ADDR_FILTER_ENABLE BIT(6)
+
+#define REG_SW_UNK_UCAST_CTRL 0x83
+#define REG_SW_UNK_MCAST_CTRL 0x84
+#define REG_SW_UNK_VID_CTRL 0x85
+#define REG_SW_UNK_IP_MCAST_CTRL 0x86
+
+#define SW_UNK_FWD_ENABLE BIT(5)
+#define SW_UNK_FWD_MAP KS_PORT_M
+
+#define REG_SW_CTRL_19 0x87
+
+#define SW_IN_RATE_LIMIT_PERIOD_M 0x3
+#define SW_IN_RATE_LIMIT_PERIOD_S 4
+#define SW_IN_RATE_LIMIT_16_MS 0
+#define SW_IN_RATE_LIMIT_64_MS 1
+#define SW_IN_RATE_LIMIT_256_MS 2
+#define SW_OUT_RATE_LIMIT_QUEUE_BASED BIT(3)
+#define SW_INS_TAG_ENABLE BIT(2)
+
+#define REG_TOS_PRIO_CTRL_0 0x90
+#define REG_TOS_PRIO_CTRL_1 0x91
+#define REG_TOS_PRIO_CTRL_2 0x92
+#define REG_TOS_PRIO_CTRL_3 0x93
+#define REG_TOS_PRIO_CTRL_4 0x94
+#define REG_TOS_PRIO_CTRL_5 0x95
+#define REG_TOS_PRIO_CTRL_6 0x96
+#define REG_TOS_PRIO_CTRL_7 0x97
+#define REG_TOS_PRIO_CTRL_8 0x98
+#define REG_TOS_PRIO_CTRL_9 0x99
+#define REG_TOS_PRIO_CTRL_10 0x9A
+#define REG_TOS_PRIO_CTRL_11 0x9B
+#define REG_TOS_PRIO_CTRL_12 0x9C
+#define REG_TOS_PRIO_CTRL_13 0x9D
+#define REG_TOS_PRIO_CTRL_14 0x9E
+#define REG_TOS_PRIO_CTRL_15 0x9F
+
+#define TOS_PRIO_M KS_PRIO_M
+#define TOS_PRIO_S KS_PRIO_S
+
+#define REG_SW_CTRL_20 0xA3
+
+#define SW_GMII_DRIVE_STRENGTH_S 4
+#define SW_DRIVE_STRENGTH_M 0x7
+#define SW_DRIVE_STRENGTH_2MA 0
+#define SW_DRIVE_STRENGTH_4MA 1
+#define SW_DRIVE_STRENGTH_8MA 2
+#define SW_DRIVE_STRENGTH_12MA 3
+#define SW_DRIVE_STRENGTH_16MA 4
+#define SW_DRIVE_STRENGTH_20MA 5
+#define SW_DRIVE_STRENGTH_24MA 6
+#define SW_DRIVE_STRENGTH_28MA 7
+#define SW_MII_DRIVE_STRENGTH_S 0
+
+#define REG_SW_CTRL_21 0xA4
+
+#define SW_IPV6_MLD_OPTION BIT(3)
+#define SW_IPV6_MLD_SNOOP BIT(2)
+
+#define REG_PORT_1_CTRL_12 0xB0
+#define REG_PORT_2_CTRL_12 0xC0
+#define REG_PORT_3_CTRL_12 0xD0
+#define REG_PORT_4_CTRL_12 0xE0
+#define REG_PORT_5_CTRL_12 0xF0
+
+#define PORT_PASS_ALL BIT(6)
+#define PORT_INS_TAG_FOR_PORT_5_S 3
+#define PORT_INS_TAG_FOR_PORT_5 BIT(3)
+#define PORT_INS_TAG_FOR_PORT_4 BIT(2)
+#define PORT_INS_TAG_FOR_PORT_3 BIT(1)
+#define PORT_INS_TAG_FOR_PORT_2 BIT(0)
+
+#define REG_PORT_1_CTRL_13 0xB1
+#define REG_PORT_2_CTRL_13 0xC1
+#define REG_PORT_3_CTRL_13 0xD1
+#define REG_PORT_4_CTRL_13 0xE1
+#define REG_PORT_5_CTRL_13 0xF1
+
+#define PORT_QUEUE_SPLIT_H BIT(1)
+#define PORT_QUEUE_SPLIT_1 0
+#define PORT_QUEUE_SPLIT_2 1
+#define PORT_QUEUE_SPLIT_4 2
+#define PORT_DROP_TAG BIT(0)
+
+#define REG_PORT_1_CTRL_14 0xB2
+#define REG_PORT_2_CTRL_14 0xC2
+#define REG_PORT_3_CTRL_14 0xD2
+#define REG_PORT_4_CTRL_14 0xE2
+#define REG_PORT_5_CTRL_14 0xF2
+#define REG_PORT_1_CTRL_15 0xB3
+#define REG_PORT_2_CTRL_15 0xC3
+#define REG_PORT_3_CTRL_15 0xD3
+#define REG_PORT_4_CTRL_15 0xE3
+#define REG_PORT_5_CTRL_15 0xF3
+#define REG_PORT_1_CTRL_16 0xB4
+#define REG_PORT_2_CTRL_16 0xC4
+#define REG_PORT_3_CTRL_16 0xD4
+#define REG_PORT_4_CTRL_16 0xE4
+#define REG_PORT_5_CTRL_16 0xF4
+#define REG_PORT_1_CTRL_17 0xB5
+#define REG_PORT_2_CTRL_17 0xC5
+#define REG_PORT_3_CTRL_17 0xD5
+#define REG_PORT_4_CTRL_17 0xE5
+#define REG_PORT_5_CTRL_17 0xF5
+
+#define REG_PORT_1_RATE_CTRL_3 0xB2
+#define REG_PORT_1_RATE_CTRL_2 0xB3
+#define REG_PORT_1_RATE_CTRL_1 0xB4
+#define REG_PORT_1_RATE_CTRL_0 0xB5
+#define REG_PORT_2_RATE_CTRL_3 0xC2
+#define REG_PORT_2_RATE_CTRL_2 0xC3
+#define REG_PORT_2_RATE_CTRL_1 0xC4
+#define REG_PORT_2_RATE_CTRL_0 0xC5
+#define REG_PORT_3_RATE_CTRL_3 0xD2
+#define REG_PORT_3_RATE_CTRL_2 0xD3
+#define REG_PORT_3_RATE_CTRL_1 0xD4
+#define REG_PORT_3_RATE_CTRL_0 0xD5
+#define REG_PORT_4_RATE_CTRL_3 0xE2
+#define REG_PORT_4_RATE_CTRL_2 0xE3
+#define REG_PORT_4_RATE_CTRL_1 0xE4
+#define REG_PORT_4_RATE_CTRL_0 0xE5
+#define REG_PORT_5_RATE_CTRL_3 0xF2
+#define REG_PORT_5_RATE_CTRL_2 0xF3
+#define REG_PORT_5_RATE_CTRL_1 0xF4
+#define REG_PORT_5_RATE_CTRL_0 0xF5
+
+#define RATE_CTRL_ENABLE BIT(7)
+#define RATE_RATIO_M (BIT(7) - 1)
+
+#define PORT_OUT_RATE_ENABLE BIT(7)
+
+#define REG_PORT_1_RATE_LIMIT 0xB6
+#define REG_PORT_2_RATE_LIMIT 0xC6
+#define REG_PORT_3_RATE_LIMIT 0xD6
+#define REG_PORT_4_RATE_LIMIT 0xE6
+#define REG_PORT_5_RATE_LIMIT 0xF6
+
+#define PORT_IN_PORT_BASED_S 6
+#define PORT_RATE_PACKET_BASED_S 5
+#define PORT_IN_FLOW_CTRL_S 4
+#define PORT_IN_LIMIT_MODE_M 0x3
+#define PORT_IN_LIMIT_MODE_S 2
+#define PORT_COUNT_IFG_S 1
+#define PORT_COUNT_PREAMBLE_S 0
+#define PORT_IN_PORT_BASED BIT(PORT_IN_PORT_BASED_S)
+#define PORT_RATE_PACKET_BASED BIT(PORT_RATE_PACKET_BASED_S)
+#define PORT_IN_FLOW_CTRL BIT(PORT_IN_FLOW_CTRL_S)
+#define PORT_IN_ALL 0
+#define PORT_IN_UNICAST 1
+#define PORT_IN_MULTICAST 2
+#define PORT_IN_BROADCAST 3
+#define PORT_COUNT_IFG BIT(PORT_COUNT_IFG_S)
+#define PORT_COUNT_PREAMBLE BIT(PORT_COUNT_PREAMBLE_S)
+
+#define REG_PORT_1_IN_RATE_0 0xB7
+#define REG_PORT_2_IN_RATE_0 0xC7
+#define REG_PORT_3_IN_RATE_0 0xD7
+#define REG_PORT_4_IN_RATE_0 0xE7
+#define REG_PORT_5_IN_RATE_0 0xF7
+#define REG_PORT_1_IN_RATE_1 0xB8
+#define REG_PORT_2_IN_RATE_1 0xC8
+#define REG_PORT_3_IN_RATE_1 0xD8
+#define REG_PORT_4_IN_RATE_1 0xE8
+#define REG_PORT_5_IN_RATE_1 0xF8
+#define REG_PORT_1_IN_RATE_2 0xB9
+#define REG_PORT_2_IN_RATE_2 0xC9
+#define REG_PORT_3_IN_RATE_2 0xD9
+#define REG_PORT_4_IN_RATE_2 0xE9
+#define REG_PORT_5_IN_RATE_2 0xF9
+#define REG_PORT_1_IN_RATE_3 0xBA
+#define REG_PORT_2_IN_RATE_3 0xCA
+#define REG_PORT_3_IN_RATE_3 0xDA
+#define REG_PORT_4_IN_RATE_3 0xEA
+#define REG_PORT_5_IN_RATE_3 0xFA
+
+#define PORT_IN_RATE_ENABLE BIT(7)
+#define PORT_RATE_LIMIT_M (BIT(7) - 1)
+
+#define REG_PORT_1_OUT_RATE_0 0xBB
+#define REG_PORT_2_OUT_RATE_0 0xCB
+#define REG_PORT_3_OUT_RATE_0 0xDB
+#define REG_PORT_4_OUT_RATE_0 0xEB
+#define REG_PORT_5_OUT_RATE_0 0xFB
+#define REG_PORT_1_OUT_RATE_1 0xBC
+#define REG_PORT_2_OUT_RATE_1 0xCC
+#define REG_PORT_3_OUT_RATE_1 0xDC
+#define REG_PORT_4_OUT_RATE_1 0xEC
+#define REG_PORT_5_OUT_RATE_1 0xFC
+#define REG_PORT_1_OUT_RATE_2 0xBD
+#define REG_PORT_2_OUT_RATE_2 0xCD
+#define REG_PORT_3_OUT_RATE_2 0xDD
+#define REG_PORT_4_OUT_RATE_2 0xED
+#define REG_PORT_5_OUT_RATE_2 0xFD
+#define REG_PORT_1_OUT_RATE_3 0xBE
+#define REG_PORT_2_OUT_RATE_3 0xCE
+#define REG_PORT_3_OUT_RATE_3 0xDE
+#define REG_PORT_4_OUT_RATE_3 0xEE
+#define REG_PORT_5_OUT_RATE_3 0xFE
+
+/* PME */
+
+#define SW_PME_OUTPUT_ENABLE BIT(1)
+#define SW_PME_ACTIVE_HIGH BIT(0)
+
+#define PORT_MAGIC_PACKET_DETECT BIT(2)
+#define PORT_LINK_UP_DETECT BIT(1)
+#define PORT_ENERGY_DETECT BIT(0)
+
+/* ACL */
+
+#define ACL_FIRST_RULE_M 0xF
+
+#define ACL_MODE_M 0x3
+#define ACL_MODE_S 4
+#define ACL_MODE_DISABLE 0
+#define ACL_MODE_LAYER_2 1
+#define ACL_MODE_LAYER_3 2
+#define ACL_MODE_LAYER_4 3
+#define ACL_ENABLE_M 0x3
+#define ACL_ENABLE_S 2
+#define ACL_ENABLE_2_COUNT 0
+#define ACL_ENABLE_2_TYPE 1
+#define ACL_ENABLE_2_MAC 2
+#define ACL_ENABLE_2_BOTH 3
+#define ACL_ENABLE_3_IP 1
+#define ACL_ENABLE_3_SRC_DST_COMP 2
+#define ACL_ENABLE_4_PROTOCOL 0
+#define ACL_ENABLE_4_TCP_PORT_COMP 1
+#define ACL_ENABLE_4_UDP_PORT_COMP 2
+#define ACL_ENABLE_4_TCP_SEQN_COMP 3
+#define ACL_SRC BIT(1)
+#define ACL_EQUAL BIT(0)
+
+#define ACL_MAX_PORT 0xFFFF
+
+#define ACL_MIN_PORT 0xFFFF
+#define ACL_IP_ADDR 0xFFFFFFFF
+#define ACL_TCP_SEQNUM 0xFFFFFFFF
+
+#define ACL_RESERVED 0xF8
+#define ACL_PORT_MODE_M 0x3
+#define ACL_PORT_MODE_S 1
+#define ACL_PORT_MODE_DISABLE 0
+#define ACL_PORT_MODE_EITHER 1
+#define ACL_PORT_MODE_IN_RANGE 2
+#define ACL_PORT_MODE_OUT_OF_RANGE 3
+
+#define ACL_TCP_FLAG_ENABLE BIT(0)
+
+#define ACL_TCP_FLAG_M 0xFF
+
+#define ACL_TCP_FLAG 0xFF
+#define ACL_ETH_TYPE 0xFFFF
+#define ACL_IP_M 0xFFFFFFFF
+
+#define ACL_PRIO_MODE_M 0x3
+#define ACL_PRIO_MODE_S 6
+#define ACL_PRIO_MODE_DISABLE 0
+#define ACL_PRIO_MODE_HIGHER 1
+#define ACL_PRIO_MODE_LOWER 2
+#define ACL_PRIO_MODE_REPLACE 3
+#define ACL_PRIO_M 0x7
+#define ACL_PRIO_S 3
+#define ACL_VLAN_PRIO_REPLACE BIT(2)
+#define ACL_VLAN_PRIO_M 0x7
+#define ACL_VLAN_PRIO_HI_M 0x3
+
+#define ACL_VLAN_PRIO_LO_M 0x8
+#define ACL_VLAN_PRIO_S 7
+#define ACL_MAP_MODE_M 0x3
+#define ACL_MAP_MODE_S 5
+#define ACL_MAP_MODE_DISABLE 0
+#define ACL_MAP_MODE_OR 1
+#define ACL_MAP_MODE_AND 2
+#define ACL_MAP_MODE_REPLACE 3
+#define ACL_MAP_PORT_M 0x1F
+
+#define ACL_CNT_M (BIT(11) - 1)
+#define ACL_CNT_S 5
+#define ACL_MSEC_UNIT BIT(4)
+#define ACL_INTR_MODE BIT(3)
+
+#define REG_PORT_ACL_BYTE_EN_MSB 0x10
+
+#define ACL_BYTE_EN_MSB_M 0x3F
+
+#define REG_PORT_ACL_BYTE_EN_LSB 0x11
+
+#define ACL_ACTION_START 0xA
+#define ACL_ACTION_LEN 2
+#define ACL_INTR_CNT_START 0xB
+#define ACL_RULESET_START 0xC
+#define ACL_RULESET_LEN 2
+#define ACL_TABLE_LEN 14
+
+#define ACL_ACTION_ENABLE 0x000C
+#define ACL_MATCH_ENABLE 0x1FF0
+#define ACL_RULESET_ENABLE 0x2003
+#define ACL_BYTE_ENABLE ((ACL_BYTE_EN_MSB_M << 8) | 0xFF)
+#define ACL_MODE_ENABLE (0x10 << 8)
+
+#define REG_PORT_ACL_CTRL_0 0x12
+
+#define PORT_ACL_WRITE_DONE BIT(6)
+#define PORT_ACL_READ_DONE BIT(5)
+#define PORT_ACL_WRITE BIT(4)
+#define PORT_ACL_INDEX_M 0xF
+
+#define REG_PORT_ACL_CTRL_1 0x13
+
+#define PORT_ACL_FORCE_DLR_MISS BIT(0)
+
+#ifndef PHY_REG_CTRL
+#define PHY_REG_CTRL 0
+
+#define PHY_RESET BIT(15)
+#define PHY_LOOPBACK BIT(14)
+#define PHY_SPEED_100MBIT BIT(13)
+#define PHY_AUTO_NEG_ENABLE BIT(12)
+#define PHY_POWER_DOWN BIT(11)
+#define PHY_MII_DISABLE BIT(10)
+#define PHY_AUTO_NEG_RESTART BIT(9)
+#define PHY_FULL_DUPLEX BIT(8)
+#define PHY_COLLISION_TEST_NOT BIT(7)
+#define PHY_HP_MDIX BIT(5)
+#define PHY_FORCE_MDIX BIT(4)
+#define PHY_AUTO_MDIX_DISABLE BIT(3)
+#define PHY_REMOTE_FAULT_DISABLE BIT(2)
+#define PHY_TRANSMIT_DISABLE BIT(1)
+#define PHY_LED_DISABLE BIT(0)
+
+#define PHY_REG_STATUS 1
+
+#define PHY_100BT4_CAPABLE BIT(15)
+#define PHY_100BTX_FD_CAPABLE BIT(14)
+#define PHY_100BTX_CAPABLE BIT(13)
+#define PHY_10BT_FD_CAPABLE BIT(12)
+#define PHY_10BT_CAPABLE BIT(11)
+#define PHY_MII_SUPPRESS_CAPABLE_NOT BIT(6)
+#define PHY_AUTO_NEG_ACKNOWLEDGE BIT(5)
+#define PHY_REMOTE_FAULT BIT(4)
+#define PHY_AUTO_NEG_CAPABLE BIT(3)
+#define PHY_LINK_STATUS BIT(2)
+#define PHY_JABBER_DETECT_NOT BIT(1)
+#define PHY_EXTENDED_CAPABILITY BIT(0)
+
+#define PHY_REG_ID_1 2
+#define PHY_REG_ID_2 3
+
+#define PHY_REG_AUTO_NEGOTIATION 4
+
+#define PHY_AUTO_NEG_NEXT_PAGE_NOT BIT(15)
+#define PHY_AUTO_NEG_REMOTE_FAULT_NOT BIT(13)
+#define PHY_AUTO_NEG_SYM_PAUSE BIT(10)
+#define PHY_AUTO_NEG_100BT4 BIT(9)
+#define PHY_AUTO_NEG_100BTX_FD BIT(8)
+#define PHY_AUTO_NEG_100BTX BIT(7)
+#define PHY_AUTO_NEG_10BT_FD BIT(6)
+#define PHY_AUTO_NEG_10BT BIT(5)
+#define PHY_AUTO_NEG_SELECTOR 0x001F
+#define PHY_AUTO_NEG_802_3 0x0001
+
+#define PHY_REG_REMOTE_CAPABILITY 5
+
+#define PHY_REMOTE_NEXT_PAGE_NOT BIT(15)
+#define PHY_REMOTE_ACKNOWLEDGE_NOT BIT(14)
+#define PHY_REMOTE_REMOTE_FAULT_NOT BIT(13)
+#define PHY_REMOTE_SYM_PAUSE BIT(10)
+#define PHY_REMOTE_100BTX_FD BIT(8)
+#define PHY_REMOTE_100BTX BIT(7)
+#define PHY_REMOTE_10BT_FD BIT(6)
+#define PHY_REMOTE_10BT BIT(5)
+#endif
+
+#define KSZ8795_ID_HI 0x0022
+#define KSZ8795_ID_LO 0x1550
+
+#define KSZ8795_SW_ID 0x8795
+
+#define PHY_REG_LINK_MD 0x1D
+
+#define PHY_START_CABLE_DIAG BIT(15)
+#define PHY_CABLE_DIAG_RESULT 0x6000
+#define PHY_CABLE_STAT_NORMAL 0x0000
+#define PHY_CABLE_STAT_OPEN 0x2000
+#define PHY_CABLE_STAT_SHORT 0x4000
+#define PHY_CABLE_STAT_FAILED 0x6000
+#define PHY_CABLE_10M_SHORT BIT(12)
+#define PHY_CABLE_FAULT_COUNTER 0x01FF
+
+#define PHY_REG_PHY_CTRL 0x1F
+
+#define PHY_MODE_M 0x7
+#define PHY_MODE_S 8
+#define PHY_STAT_REVERSED_POLARITY BIT(5)
+#define PHY_STAT_MDIX BIT(4)
+#define PHY_FORCE_LINK BIT(3)
+#define PHY_POWER_SAVING_ENABLE BIT(2)
+#define PHY_REMOTE_LOOPBACK BIT(1)
+
+/* Chip resource */
+
+#define PRIO_QUEUES 4
+
+#define KS_PRIO_IN_REG 4
+
+#define TOTAL_PORT_NUM 5
+
+/* Host port can only be last of them. */
+#define SWITCH_PORT_NUM (TOTAL_PORT_NUM - 1)
+
+#define KSZ8795_COUNTER_NUM 0x20
+#define TOTAL_KSZ8795_COUNTER_NUM (KSZ8795_COUNTER_NUM + 4)
+
+#define SWITCH_COUNTER_NUM KSZ8795_COUNTER_NUM
+#define TOTAL_SWITCH_COUNTER_NUM TOTAL_KSZ8795_COUNTER_NUM
+
+/* Common names used by other drivers */
+
+#define P_BCAST_STORM_CTRL REG_PORT_CTRL_0
+#define P_PRIO_CTRL REG_PORT_CTRL_0
+#define P_TAG_CTRL REG_PORT_CTRL_0
+#define P_MIRROR_CTRL REG_PORT_CTRL_1
+#define P_802_1P_CTRL REG_PORT_CTRL_2
+#define P_STP_CTRL REG_PORT_CTRL_2
+#define P_LOCAL_CTRL REG_PORT_CTRL_7
+#define P_REMOTE_STATUS REG_PORT_STATUS_0
+#define P_FORCE_CTRL REG_PORT_CTRL_9
+#define P_NEG_RESTART_CTRL REG_PORT_CTRL_10
+#define P_SPEED_STATUS REG_PORT_STATUS_1
+#define P_LINK_STATUS REG_PORT_STATUS_2
+#define P_PASS_ALL_CTRL REG_PORT_CTRL_12
+#define P_INS_SRC_PVID_CTRL REG_PORT_CTRL_12
+#define P_DROP_TAG_CTRL REG_PORT_CTRL_13
+#define P_RATE_LIMIT_CTRL REG_PORT_RATE_LIMIT
+
+#define S_UNKNOWN_DA_CTRL REG_SWITCH_CTRL_12
+#define S_FORWARD_INVALID_VID_CTRL REG_FORWARD_INVALID_VID
+
+#define S_FLUSH_TABLE_CTRL REG_SW_CTRL_0
+#define S_LINK_AGING_CTRL REG_SW_CTRL_0
+#define S_HUGE_PACKET_CTRL REG_SW_CTRL_1
+#define S_MIRROR_CTRL REG_SW_CTRL_3
+#define S_REPLACE_VID_CTRL REG_SW_CTRL_4
+#define S_PASS_PAUSE_CTRL REG_SW_CTRL_10
+#define S_TAIL_TAG_CTRL REG_SW_CTRL_10
+#define S_802_1P_PRIO_CTRL REG_SW_CTRL_12
+#define S_TOS_PRIO_CTRL REG_TOS_PRIO_CTRL_0
+#define S_IPV6_MLD_CTRL REG_SW_CTRL_21
+
+#define IND_ACC_TABLE(table) ((table) << 8)
+
+/* Driver set switch broadcast storm protection at 10% rate. */
+#define BROADCAST_STORM_PROT_RATE 10
+
+/* 148,800 frames * 67 ms / 100 */
+#define BROADCAST_STORM_VALUE 9969
+
+/**
+ * STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
+ * STATIC_MAC_TABLE_FWD_PORTS 00-001F0000-00000000
+ * STATIC_MAC_TABLE_VALID 00-00200000-00000000
+ * STATIC_MAC_TABLE_OVERRIDE 00-00400000-00000000
+ * STATIC_MAC_TABLE_USE_FID 00-00800000-00000000
+ * STATIC_MAC_TABLE_FID 00-7F000000-00000000
+ */
+
+#define STATIC_MAC_TABLE_ADDR 0x0000FFFF
+#define STATIC_MAC_TABLE_FWD_PORTS 0x001F0000
+#define STATIC_MAC_TABLE_VALID 0x00200000
+#define STATIC_MAC_TABLE_OVERRIDE 0x00400000
+#define STATIC_MAC_TABLE_USE_FID 0x00800000
+#define STATIC_MAC_TABLE_FID 0x7F000000
+
+#define STATIC_MAC_FWD_PORTS_S 16
+#define STATIC_MAC_FID_S 24
+
+/**
+ * VLAN_TABLE_FID 00-007F007F-007F007F
+ * VLAN_TABLE_MEMBERSHIP 00-0F800F80-0F800F80
+ * VLAN_TABLE_VALID 00-10001000-10001000
+ */
+
+#define VLAN_TABLE_FID 0x007F
+#define VLAN_TABLE_MEMBERSHIP 0x0F80
+#define VLAN_TABLE_VALID 0x1000
+
+#define VLAN_TABLE_MEMBERSHIP_S 7
+#define VLAN_TABLE_S 16
+
+/**
+ * DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
+ * DYNAMIC_MAC_TABLE_FID 00-007F0000-00000000
+ * DYNAMIC_MAC_TABLE_NOT_READY 00-00800000-00000000
+ * DYNAMIC_MAC_TABLE_SRC_PORT 00-07000000-00000000
+ * DYNAMIC_MAC_TABLE_TIMESTAMP 00-18000000-00000000
+ * DYNAMIC_MAC_TABLE_ENTRIES 7F-E0000000-00000000
+ * DYNAMIC_MAC_TABLE_MAC_EMPTY 80-00000000-00000000
+ */
+
+#define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
+#define DYNAMIC_MAC_TABLE_FID 0x007F0000
+#define DYNAMIC_MAC_TABLE_SRC_PORT 0x07000000
+#define DYNAMIC_MAC_TABLE_TIMESTAMP 0x18000000
+#define DYNAMIC_MAC_TABLE_ENTRIES 0xE0000000
+
+#define DYNAMIC_MAC_TABLE_NOT_READY 0x80
+
+#define DYNAMIC_MAC_TABLE_ENTRIES_H 0x7F
+#define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x80
+
+#define DYNAMIC_MAC_FID_S 16
+#define DYNAMIC_MAC_SRC_PORT_S 24
+#define DYNAMIC_MAC_TIMESTAMP_S 27
+#define DYNAMIC_MAC_ENTRIES_S 29
+#define DYNAMIC_MAC_ENTRIES_H_S 3
+
+/**
+ * MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
+ * MIB_TOTAL_BYTES 00-0000000F-FFFFFFFF
+ * MIB_PACKET_DROPPED 00-00000000-0000FFFF
+ * MIB_COUNTER_VALID 00-00000020-00000000
+ * MIB_COUNTER_OVERFLOW 00-00000040-00000000
+ */
+
+#define MIB_COUNTER_OVERFLOW BIT(6)
+#define MIB_COUNTER_VALID BIT(5)
+
+#define MIB_COUNTER_VALUE 0x3FFFFFFF
+
+#define KS_MIB_TOTAL_RX_0 0x100
+#define KS_MIB_TOTAL_TX_0 0x101
+#define KS_MIB_PACKET_DROPPED_RX_0 0x102
+#define KS_MIB_PACKET_DROPPED_TX_0 0x103
+#define KS_MIB_TOTAL_RX_1 0x104
+#define KS_MIB_TOTAL_TX_1 0x105
+#define KS_MIB_PACKET_DROPPED_TX_1 0x106
+#define KS_MIB_PACKET_DROPPED_RX_1 0x107
+#define KS_MIB_TOTAL_RX_2 0x108
+#define KS_MIB_TOTAL_TX_2 0x109
+#define KS_MIB_PACKET_DROPPED_TX_2 0x10A
+#define KS_MIB_PACKET_DROPPED_RX_2 0x10B
+#define KS_MIB_TOTAL_RX_3 0x10C
+#define KS_MIB_TOTAL_TX_3 0x10D
+#define KS_MIB_PACKET_DROPPED_TX_3 0x10E
+#define KS_MIB_PACKET_DROPPED_RX_3 0x10F
+#define KS_MIB_TOTAL_RX_4 0x110
+#define KS_MIB_TOTAL_TX_4 0x111
+#define KS_MIB_PACKET_DROPPED_TX_4 0x112
+#define KS_MIB_PACKET_DROPPED_RX_4 0x113
+
+#define MIB_PACKET_DROPPED 0x0000FFFF
+
+#define MIB_TOTAL_BYTES_H 0x0000000F
+
+#define TAIL_TAG_OVERRIDE BIT(6)
+#define TAIL_TAG_LOOKUP BIT(7)
+
+#define VLAN_TABLE_ENTRIES (4096 / 4)
+#define FID_ENTRIES 128
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
new file mode 100644
index 000000000000..d0f8153e86b7
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz8795_spi.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Microchip KSZ8795 series register access through SPI
+ *
+ * Copyright (C) 2017 Microchip Technology Inc.
+ * Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "ksz_common.h"
+
+#define SPI_ADDR_SHIFT 12
+#define SPI_ADDR_ALIGN 3
+#define SPI_TURNAROUND_SHIFT 1
+
+KSZ_REGMAP_TABLE(ksz8795, 16, SPI_ADDR_SHIFT,
+ SPI_TURNAROUND_SHIFT, SPI_ADDR_ALIGN);
+
+static int ksz8795_spi_probe(struct spi_device *spi)
+{
+ struct ksz_device *dev;
+ int i, ret;
+
+ dev = ksz_switch_alloc(&spi->dev, spi);
+ if (!dev)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
+ dev->regmap[i] = devm_regmap_init_spi(spi,
+ &ksz8795_regmap_config
+ [i]);
+ if (IS_ERR(dev->regmap[i])) {
+ ret = PTR_ERR(dev->regmap[i]);
+ dev_err(&spi->dev,
+ "Failed to initialize regmap%i: %d\n",
+ ksz8795_regmap_config[i].val_bits, ret);
+ return ret;
+ }
+ }
+
+ if (spi->dev.platform_data)
+ dev->pdata = spi->dev.platform_data;
+
+ ret = ksz8795_switch_register(dev);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, dev);
+
+ return 0;
+}
+
+static int ksz8795_spi_remove(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ ksz_switch_remove(dev);
+
+ return 0;
+}
+
+static void ksz8795_spi_shutdown(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (dev && dev->dev_ops->shutdown)
+ dev->dev_ops->shutdown(dev);
+}
+
+static const struct of_device_id ksz8795_dt_ids[] = {
+ { .compatible = "microchip,ksz8765" },
+ { .compatible = "microchip,ksz8794" },
+ { .compatible = "microchip,ksz8795" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
+
+static struct spi_driver ksz8795_spi_driver = {
+ .driver = {
+ .name = "ksz8795-switch",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ksz8795_dt_ids),
+ },
+ .probe = ksz8795_spi_probe,
+ .remove = ksz8795_spi_remove,
+ .shutdown = ksz8795_spi_shutdown,
+};
+
+module_spi_driver(ksz8795_spi_driver);
+
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index a8c97f7a79b7..187be42de5f1 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -14,7 +14,6 @@
#include <net/dsa.h>
#include <net/switchdev.h>
-#include "ksz_priv.h"
#include "ksz9477_reg.h"
#include "ksz_common.h"
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index 5a9e27b337a8..a226b389e12d 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -13,7 +13,6 @@
#include <linux/regmap.h>
#include <linux/spi/spi.h>
-#include "ksz_priv.h"
#include "ksz_common.h"
#define SPI_ADDR_SHIFT 24
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index a3d2d67894bd..b0b870f0c252 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -18,17 +18,7 @@
#include <net/dsa.h>
#include <net/switchdev.h>
-#include "ksz_priv.h"
-
-void ksz_port_cleanup(struct ksz_device *dev, int port)
-{
- /* Common code for port cleanup. */
- mutex_lock(&dev->dev_mutex);
- dev->on_ports &= ~(1 << port);
- dev->live_ports &= ~(1 << port);
- mutex_unlock(&dev->dev_mutex);
-}
-EXPORT_SYMBOL_GPL(ksz_port_cleanup);
+#include "ksz_common.h"
void ksz_update_port_member(struct ksz_device *dev, int port)
{
@@ -371,9 +361,13 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct ksz_device *dev = ds->priv;
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
/* setup slave port */
dev->dev_ops->port_setup(dev, port, false);
- dev->dev_ops->phy_setup(dev, port, phy);
+ if (dev->dev_ops->phy_setup)
+ dev->dev_ops->phy_setup(dev, port, phy);
/* port_stp_state_set() will be called after to enable the port so
* there is no need to do anything.
@@ -387,6 +381,9 @@ void ksz_disable_port(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
+ if (!dsa_is_user_port(ds, port))
+ return;
+
dev->on_ports &= ~(1 << port);
dev->live_ports &= ~(1 << port);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index ee7096d8af07..c44a8d23d973 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -7,9 +7,152 @@
#ifndef __KSZ_COMMON_H
#define __KSZ_COMMON_H
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
#include <linux/regmap.h>
+#include <net/dsa.h>
+
+struct vlan_table {
+ u32 table[3];
+};
+
+struct ksz_port_mib {
+ struct mutex cnt_mutex; /* structure access */
+ u8 cnt_ptr;
+ u64 *counters;
+};
+
+struct ksz_port {
+ u16 member;
+ u16 vid_member;
+ int stp_state;
+ struct phy_device phydev;
+
+ u32 on:1; /* port is not disabled by hardware */
+ u32 phy:1; /* port has a PHY */
+ u32 fiber:1; /* port is fiber */
+ u32 sgmii:1; /* port is SGMII */
+ u32 force:1;
+ u32 read:1; /* read MIB counters in background */
+ u32 freeze:1; /* MIB counter freeze is enabled */
+
+ struct ksz_port_mib mib;
+};
+
+struct ksz_device {
+ struct dsa_switch *ds;
+ struct ksz_platform_data *pdata;
+ const char *name;
+
+ struct mutex dev_mutex; /* device access */
+ struct mutex stats_mutex; /* status access */
+ struct mutex alu_mutex; /* ALU access */
+ struct mutex vlan_mutex; /* vlan access */
+ const struct ksz_dev_ops *dev_ops;
+
+ struct device *dev;
+ struct regmap *regmap[3];
+
+ void *priv;
+
+ struct gpio_desc *reset_gpio; /* Optional reset GPIO */
+
+ /* chip specific data */
+ u32 chip_id;
+ int num_vlans;
+ int num_alus;
+ int num_statics;
+ int cpu_port; /* port connected to CPU */
+ int cpu_ports; /* port bitmap can be cpu port */
+ int phy_port_cnt;
+ int port_cnt;
+ int reg_mib_cnt;
+ int mib_cnt;
+ int mib_port_cnt;
+ int last_port; /* ports after that not used */
+ phy_interface_t interface;
+ u32 regs_size;
+ bool phy_errata_9477;
+ bool synclko_125;
+
+ struct vlan_table *vlan_cache;
+
+ struct ksz_port *ports;
+ struct timer_list mib_read_timer;
+ struct work_struct mib_read;
+ unsigned long mib_read_interval;
+ u16 br_member;
+ u16 member;
+ u16 live_ports;
+ u16 on_ports; /* ports enabled by DSA */
+ u16 rx_ports;
+ u16 tx_ports;
+ u16 mirror_rx;
+ u16 mirror_tx;
+ u32 features; /* chip specific features */
+ u32 overrides; /* chip functions set by user */
+ u16 host_mask;
+ u16 port_mask;
+};
+
+struct alu_struct {
+ /* entry 1 */
+ u8 is_static:1;
+ u8 is_src_filter:1;
+ u8 is_dst_filter:1;
+ u8 prio_age:3;
+ u32 _reserv_0_1:23;
+ u8 mstp:3;
+ /* entry 2 */
+ u8 is_override:1;
+ u8 is_use_fid:1;
+ u32 _reserv_1_1:23;
+ u8 port_forward:7;
+ /* entry 3 & 4*/
+ u32 _reserv_2_1:9;
+ u8 fid:7;
+ u8 mac[ETH_ALEN];
+};
+
+struct ksz_dev_ops {
+ u32 (*get_port_addr)(int port, int offset);
+ void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
+ void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
+ void (*phy_setup)(struct ksz_device *dev, int port,
+ struct phy_device *phy);
+ void (*port_cleanup)(struct ksz_device *dev, int port);
+ void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
+ void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+ void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+ int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u8 *timestamp,
+ u16 *entries);
+ int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu);
+ void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu);
+ void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
+ u64 *cnt);
+ void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+ void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
+ void (*port_init_cnt)(struct ksz_device *dev, int port);
+ int (*shutdown)(struct ksz_device *dev);
+ int (*detect)(struct ksz_device *dev);
+ int (*init)(struct ksz_device *dev);
+ void (*exit)(struct ksz_device *dev);
+};
+
+struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
+int ksz_switch_register(struct ksz_device *dev,
+ const struct ksz_dev_ops *ops);
+void ksz_switch_remove(struct ksz_device *dev);
+
+int ksz8795_switch_register(struct ksz_device *dev);
+int ksz9477_switch_register(struct ksz_device *dev);
-void ksz_port_cleanup(struct ksz_device *dev, int port);
void ksz_update_port_member(struct ksz_device *dev, int port);
void ksz_init_mib_timer(struct ksz_device *dev);
@@ -68,6 +211,22 @@ static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
return ret;
}
+static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
+{
+ u32 value[2];
+ int ret;
+
+ ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
+ if (!ret) {
+ /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */
+ value[0] = swab32(value[0]);
+ value[1] = swab32(value[1]);
+ *val = swab64((u64)*value);
+ }
+
+ return ret;
+}
+
static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
{
return regmap_write(dev->regmap[0], reg, value);
@@ -83,6 +242,18 @@ static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
return regmap_write(dev->regmap[2], reg, value);
}
+static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
+{
+ u32 val[2];
+
+ /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */
+ value = swab64(value);
+ val[0] = swab32(value & 0xffffffffULL);
+ val[1] = swab32(value >> 32ULL);
+
+ return regmap_bulk_write(dev->regmap[2], reg, val, 2);
+}
+
static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
u8 *data)
{
diff --git a/drivers/net/dsa/microchip/ksz_priv.h b/drivers/net/dsa/microchip/ksz_priv.h
deleted file mode 100644
index beacf0e40f42..000000000000
--- a/drivers/net/dsa/microchip/ksz_priv.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Microchip KSZ series switch common definitions
- *
- * Copyright (C) 2017-2019 Microchip Technology Inc.
- */
-
-#ifndef __KSZ_PRIV_H
-#define __KSZ_PRIV_H
-
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/phy.h>
-#include <linux/etherdevice.h>
-#include <net/dsa.h>
-
-struct vlan_table {
- u32 table[3];
-};
-
-struct ksz_port_mib {
- struct mutex cnt_mutex; /* structure access */
- u8 cnt_ptr;
- u64 *counters;
-};
-
-struct ksz_port {
- u16 member;
- u16 vid_member;
- int stp_state;
- struct phy_device phydev;
-
- u32 on:1; /* port is not disabled by hardware */
- u32 phy:1; /* port has a PHY */
- u32 fiber:1; /* port is fiber */
- u32 sgmii:1; /* port is SGMII */
- u32 force:1;
- u32 read:1; /* read MIB counters in background */
- u32 freeze:1; /* MIB counter freeze is enabled */
-
- struct ksz_port_mib mib;
-};
-
-struct ksz_device {
- struct dsa_switch *ds;
- struct ksz_platform_data *pdata;
- const char *name;
-
- struct mutex dev_mutex; /* device access */
- struct mutex stats_mutex; /* status access */
- struct mutex alu_mutex; /* ALU access */
- struct mutex vlan_mutex; /* vlan access */
- const struct ksz_dev_ops *dev_ops;
-
- struct device *dev;
- struct regmap *regmap[3];
-
- void *priv;
-
- struct gpio_desc *reset_gpio; /* Optional reset GPIO */
-
- /* chip specific data */
- u32 chip_id;
- int num_vlans;
- int num_alus;
- int num_statics;
- int cpu_port; /* port connected to CPU */
- int cpu_ports; /* port bitmap can be cpu port */
- int phy_port_cnt;
- int port_cnt;
- int reg_mib_cnt;
- int mib_cnt;
- int mib_port_cnt;
- int last_port; /* ports after that not used */
- phy_interface_t interface;
- u32 regs_size;
- bool phy_errata_9477;
- bool synclko_125;
-
- struct vlan_table *vlan_cache;
-
- struct ksz_port *ports;
- struct timer_list mib_read_timer;
- struct work_struct mib_read;
- unsigned long mib_read_interval;
- u16 br_member;
- u16 member;
- u16 live_ports;
- u16 on_ports; /* ports enabled by DSA */
- u16 rx_ports;
- u16 tx_ports;
- u16 mirror_rx;
- u16 mirror_tx;
- u32 features; /* chip specific features */
- u32 overrides; /* chip functions set by user */
- u16 host_mask;
- u16 port_mask;
-};
-
-struct alu_struct {
- /* entry 1 */
- u8 is_static:1;
- u8 is_src_filter:1;
- u8 is_dst_filter:1;
- u8 prio_age:3;
- u32 _reserv_0_1:23;
- u8 mstp:3;
- /* entry 2 */
- u8 is_override:1;
- u8 is_use_fid:1;
- u32 _reserv_1_1:23;
- u8 port_forward:7;
- /* entry 3 & 4*/
- u32 _reserv_2_1:9;
- u8 fid:7;
- u8 mac[ETH_ALEN];
-};
-
-struct ksz_dev_ops {
- u32 (*get_port_addr)(int port, int offset);
- void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
- void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
- void (*phy_setup)(struct ksz_device *dev, int port,
- struct phy_device *phy);
- void (*port_cleanup)(struct ksz_device *dev, int port);
- void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
- void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
- void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
- int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr,
- u8 *fid, u8 *src_port, u8 *timestamp,
- u16 *entries);
- int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
- void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
- void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
- u64 *cnt);
- void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
- u64 *dropped, u64 *cnt);
- void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
- void (*port_init_cnt)(struct ksz_device *dev, int port);
- int (*shutdown)(struct ksz_device *dev);
- int (*detect)(struct ksz_device *dev);
- int (*init)(struct ksz_device *dev);
- void (*exit)(struct ksz_device *dev);
-};
-
-struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
-int ksz_switch_register(struct ksz_device *dev,
- const struct ksz_dev_ops *ops);
-void ksz_switch_remove(struct ksz_device *dev);
-
-int ksz9477_switch_register(struct ksz_device *dev);
-
-#endif
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 3181e95586d6..c48e29486b10 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -726,6 +726,9 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
mutex_lock(&priv->reg_mutex);
/* Setup the MAC for the user port */
@@ -751,6 +754,9 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
+ if (!dsa_is_user_port(ds, port))
+ return;
+
mutex_lock(&priv->reg_mutex);
/* Clear up all port matrix which could be restored in the next
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index d0a97eb73a37..d0bf98c10b2b 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -10,6 +10,7 @@
* Vivien Didelot <vivien.didelot@savoirfairelinux.com>
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -80,6 +81,36 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
return 0;
}
+int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 mask, u16 val)
+{
+ u16 data;
+ int err;
+ int i;
+
+ /* There's no bus specific operation to wait for a mask */
+ for (i = 0; i < 16; i++) {
+ err = mv88e6xxx_read(chip, addr, reg, &data);
+ if (err)
+ return err;
+
+ if ((data & mask) == val)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ dev_err(chip->dev, "Timeout while waiting for switch\n");
+ return -ETIMEDOUT;
+}
+
+int mv88e6xxx_wait_bit(struct mv88e6xxx_chip *chip, int addr, int reg,
+ int bit, int val)
+{
+ return mv88e6xxx_wait_mask(chip, addr, reg, BIT(bit),
+ val ? BIT(bit) : 0x0000);
+}
+
struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
{
struct mv88e6xxx_mdio_bus *mdio_bus;
@@ -363,45 +394,6 @@ static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
mv88e6xxx_reg_unlock(chip);
}
-int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
-{
- int i;
-
- for (i = 0; i < 16; i++) {
- u16 val;
- int err;
-
- err = mv88e6xxx_read(chip, addr, reg, &val);
- if (err)
- return err;
-
- if (!(val & mask))
- return 0;
-
- usleep_range(1000, 2000);
- }
-
- dev_err(chip->dev, "Timeout while waiting for switch\n");
- return -ETIMEDOUT;
-}
-
-/* Indirect write to single pointer-data register with an Update bit */
-int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update)
-{
- u16 val;
- int err;
-
- /* Wait until the previous operation is completed */
- err = mv88e6xxx_wait(chip, addr, reg, BIT(15));
- if (err)
- return err;
-
- /* Set the Update bit to trigger a write operation */
- val = BIT(15) | update;
-
- return mv88e6xxx_write(chip, addr, reg, val);
-}
-
int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
int speed, int duplex, int pause,
phy_interface_t mode)
@@ -425,7 +417,9 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
*/
if (state.link == link &&
state.speed == speed &&
- state.duplex == duplex)
+ state.duplex == duplex &&
+ (state.interface == mode ||
+ state.interface == PHY_INTERFACE_MODE_NA))
return 0;
/* Port's MAC control must not be changed unless the link is down */
@@ -1336,9 +1330,7 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
{
DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
- struct mv88e6xxx_vtu_entry vlan = {
- .vid = chip->info->max_vid,
- };
+ struct mv88e6xxx_vtu_entry vlan;
int i, err;
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
@@ -1353,6 +1345,9 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
}
/* Set every FID bit used by the VLAN entries */
+ vlan.vid = chip->info->max_vid;
+ vlan.valid = false;
+
do {
err = mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
@@ -1375,51 +1370,11 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
-static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
- struct mv88e6xxx_vtu_entry *entry, bool new)
-{
- int err;
-
- if (!vid)
- return -EOPNOTSUPP;
-
- entry->vid = vid - 1;
- entry->valid = false;
-
- err = mv88e6xxx_vtu_getnext(chip, entry);
- if (err)
- return err;
-
- if (entry->vid == vid && entry->valid)
- return 0;
-
- if (new) {
- int i;
-
- /* Initialize a fresh VLAN entry */
- memset(entry, 0, sizeof(*entry));
- entry->valid = true;
- entry->vid = vid;
-
- /* Exclude all ports */
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
- entry->member[i] =
- MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER;
-
- return mv88e6xxx_atu_new(chip, &entry->fid);
- }
-
- /* switchdev expects -EOPNOTSUPP to honor software VLANs */
- return -EOPNOTSUPP;
-}
-
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid_begin, u16 vid_end)
{
struct mv88e6xxx_chip *chip = ds->priv;
- struct mv88e6xxx_vtu_entry vlan = {
- .vid = vid_begin - 1,
- };
+ struct mv88e6xxx_vtu_entry vlan;
int i, err;
/* DSA and CPU ports have to be members of multiple vlans */
@@ -1429,12 +1384,13 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (!vid_begin)
return -EOPNOTSUPP;
- mv88e6xxx_reg_lock(chip);
+ vlan.vid = vid_begin - 1;
+ vlan.valid = false;
do {
err = mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
- goto unlock;
+ return err;
if (!vlan.valid)
break;
@@ -1463,15 +1419,11 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
port, vlan.vid, i,
netdev_name(dsa_to_port(ds, i)->bridge_dev));
- err = -EOPNOTSUPP;
- goto unlock;
+ return -EOPNOTSUPP;
}
} while (vlan.vid < vid_end);
-unlock:
- mv88e6xxx_reg_unlock(chip);
-
- return err;
+ return 0;
}
static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
@@ -1505,38 +1457,51 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
/* If the requested port doesn't belong to the same bridge as the VLAN
* members, do not support it (yet) and fallback to software VLAN.
*/
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin,
vlan->vid_end);
- if (err)
- return err;
+ mv88e6xxx_reg_unlock(chip);
/* We don't need any dynamic resource from the kernel (yet),
* so skip the prepare phase.
*/
- return 0;
+ return err;
}
static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
const unsigned char *addr, u16 vid,
u8 state)
{
- struct mv88e6xxx_vtu_entry vlan;
struct mv88e6xxx_atu_entry entry;
+ struct mv88e6xxx_vtu_entry vlan;
+ u16 fid;
int err;
/* Null VLAN ID corresponds to the port private database */
- if (vid == 0)
- err = mv88e6xxx_port_get_fid(chip, port, &vlan.fid);
- else
- err = mv88e6xxx_vtu_get(chip, vid, &vlan, false);
- if (err)
- return err;
+ if (vid == 0) {
+ err = mv88e6xxx_port_get_fid(chip, port, &fid);
+ if (err)
+ return err;
+ } else {
+ vlan.vid = vid - 1;
+ vlan.valid = false;
+
+ err = mv88e6xxx_vtu_getnext(chip, &vlan);
+ if (err)
+ return err;
+
+ /* switchdev expects -EOPNOTSUPP to honor software VLANs */
+ if (vlan.vid != vid || !vlan.valid)
+ return -EOPNOTSUPP;
+
+ fid = vlan.fid;
+ }
entry.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED;
ether_addr_copy(entry.mac, addr);
eth_addr_dec(entry.mac);
- err = mv88e6xxx_g1_atu_getnext(chip, vlan.fid, &entry);
+ err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry);
if (err)
return err;
@@ -1557,7 +1522,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
entry.state = state;
}
- return mv88e6xxx_g1_atu_loadpurge(chip, vlan.fid, &entry);
+ return mv88e6xxx_g1_atu_loadpurge(chip, fid, &entry);
}
static int mv88e6xxx_port_add_broadcast(struct mv88e6xxx_chip *chip, int port,
@@ -1583,23 +1548,58 @@ static int mv88e6xxx_broadcast_setup(struct mv88e6xxx_chip *chip, u16 vid)
return 0;
}
-static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
+static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
u16 vid, u8 member)
{
+ const u8 non_member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER;
struct mv88e6xxx_vtu_entry vlan;
- int err;
+ int i, err;
- err = mv88e6xxx_vtu_get(chip, vid, &vlan, true);
- if (err)
- return err;
+ if (!vid)
+ return -EOPNOTSUPP;
- vlan.member[port] = member;
+ vlan.vid = vid - 1;
+ vlan.valid = false;
- err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ err = mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
return err;
- return mv88e6xxx_broadcast_setup(chip, vid);
+ if (vlan.vid != vid || !vlan.valid) {
+ memset(&vlan, 0, sizeof(vlan));
+
+ err = mv88e6xxx_atu_new(chip, &vlan.fid);
+ if (err)
+ return err;
+
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
+ if (i == port)
+ vlan.member[i] = member;
+ else
+ vlan.member[i] = non_member;
+
+ vlan.vid = vid;
+ vlan.valid = true;
+
+ err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_broadcast_setup(chip, vlan.vid);
+ if (err)
+ return err;
+ } else if (vlan.member[port] != member) {
+ vlan.member[port] = member;
+
+ err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ if (err)
+ return err;
+ } else {
+ dev_info(chip->dev, "p%d: already a member of VLAN %d\n",
+ port, vid);
+ }
+
+ return 0;
}
static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
@@ -1624,7 +1624,7 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- if (_mv88e6xxx_port_vlan_add(chip, port, vid, member))
+ if (mv88e6xxx_port_vlan_join(chip, port, vid, member))
dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port,
vid, untagged ? 'u' : 't');
@@ -1635,18 +1635,27 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_unlock(chip);
}
-static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
- int port, u16 vid)
+static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
+ int port, u16 vid)
{
struct mv88e6xxx_vtu_entry vlan;
int i, err;
- err = mv88e6xxx_vtu_get(chip, vid, &vlan, false);
+ if (!vid)
+ return -EOPNOTSUPP;
+
+ vlan.vid = vid - 1;
+ vlan.valid = false;
+
+ err = mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
return err;
- /* Tell switchdev if this VLAN is handled in software */
- if (vlan.member[port] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ /* If the VLAN doesn't exist in hardware or the port isn't a member,
+ * tell switchdev that this VLAN is likely handled in software.
+ */
+ if (vlan.vid != vid || !vlan.valid ||
+ vlan.member[port] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
return -EOPNOTSUPP;
vlan.member[port] = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER;
@@ -1685,7 +1694,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
goto unlock;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- err = _mv88e6xxx_port_vlan_del(chip, port, vid);
+ err = mv88e6xxx_port_vlan_leave(chip, port, vid);
if (err)
goto unlock;
@@ -1768,9 +1777,7 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
- struct mv88e6xxx_vtu_entry vlan = {
- .vid = chip->info->max_vid,
- };
+ struct mv88e6xxx_vtu_entry vlan;
u16 fid;
int err;
@@ -1784,6 +1791,9 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
return err;
/* Dump VLANs' Filtering Information Databases */
+ vlan.vid = chip->info->max_vid;
+ vlan.valid = false;
+
do {
err = mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
@@ -2047,10 +2057,26 @@ static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port,
bool on)
{
- if (chip->info->ops->serdes_power)
- return chip->info->ops->serdes_power(chip, port, on);
+ int err;
- return 0;
+ if (!chip->info->ops->serdes_power)
+ return 0;
+
+ if (on) {
+ err = chip->info->ops->serdes_power(chip, port, true);
+ if (err)
+ return err;
+
+ if (chip->info->ops->serdes_irq_setup)
+ err = chip->info->ops->serdes_irq_setup(chip, port);
+ } else {
+ if (chip->info->ops->serdes_irq_free)
+ chip->info->ops->serdes_irq_free(chip, port);
+
+ err = chip->info->ops->serdes_power(chip, port, false);
+ }
+
+ return err;
}
static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
@@ -2141,16 +2167,6 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- /* Enable the SERDES interface for DSA and CPU ports. Normal
- * ports SERDES are enabled when the port is enabled, thus
- * saving a bit of power.
- */
- if ((dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) {
- err = mv88e6xxx_serdes_power(chip, port, true);
- if (err)
- return err;
- }
-
/* Port Control 2: don't force a good FCS, set the maximum frame size to
* 10240 bytes, disable 802.1q tags checking, don't discard tagged or
* untagged frames on this port, do a destination address lookup on all
@@ -2227,9 +2243,11 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
- err = mv88e6xxx_setup_message_port(chip, port);
- if (err)
- return err;
+ if (chip->info->ops->port_setup_message_port) {
+ err = chip->info->ops->port_setup_message_port(chip, port);
+ if (err)
+ return err;
+ }
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
@@ -2256,12 +2274,7 @@ static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port,
int err;
mv88e6xxx_reg_lock(chip);
-
err = mv88e6xxx_serdes_power(chip, port, true);
-
- if (!err && chip->info->ops->serdes_irq_setup)
- err = chip->info->ops->serdes_irq_setup(chip, port);
-
mv88e6xxx_reg_unlock(chip);
return err;
@@ -2272,16 +2285,8 @@ static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port)
struct mv88e6xxx_chip *chip = ds->priv;
mv88e6xxx_reg_lock(chip);
-
- if (mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED))
- dev_err(chip->dev, "failed to disable port\n");
-
- if (chip->info->ops->serdes_irq_free)
- chip->info->ops->serdes_irq_free(chip, port);
-
if (mv88e6xxx_serdes_power(chip, port, false))
dev_err(chip->dev, "failed to power off SERDES\n");
-
mv88e6xxx_reg_unlock(chip);
}
@@ -2336,8 +2341,10 @@ static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port,
static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT,
- PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY);
+ int bit = __bf_shf(PORT_RESERVED_1A_BUSY);
+
+ return mv88e6xxx_wait_bit(chip, PORT_RESERVED_1A_CTRL_PORT,
+ PORT_RESERVED_1A, bit, 0);
}
@@ -2444,17 +2451,14 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
- if (dsa_is_unused_port(ds, i)) {
- err = mv88e6xxx_port_set_state(chip, i,
- BR_STATE_DISABLED);
- if (err)
- goto unlock;
-
- err = mv88e6xxx_serdes_power(chip, i, false);
- if (err)
- goto unlock;
-
+ if (dsa_is_unused_port(ds, i))
continue;
+
+ /* Prevent the use of an invalid port. */
+ if (mv88e6xxx_is_invalid_port(chip, i)) {
+ dev_err(chip->dev, "port %d is invalid\n", i);
+ err = -EINVAL;
+ goto unlock;
}
err = mv88e6xxx_setup_port(chip, i);
@@ -2773,6 +2777,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2807,6 +2812,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.port_link_state = mv88e6185_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2843,6 +2849,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2877,6 +2884,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2914,6 +2922,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.port_set_pause = mv88e6185_port_set_pause,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2958,6 +2967,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -2998,6 +3008,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3031,6 +3042,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3072,6 +3084,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3113,6 +3126,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3155,6 +3169,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3196,6 +3211,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3234,6 +3250,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.port_set_pause = mv88e6185_port_set_pause,
.port_link_state = mv88e6185_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3276,6 +3293,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3321,6 +3339,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390x_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3366,6 +3385,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3413,6 +3433,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3471,6 +3492,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.reset = mv88e6250_g1_reset,
.vtu_getnext = mv88e6250_g1_vtu_getnext,
.vtu_loadpurge = mv88e6250_g1_vtu_loadpurge,
+ .avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6250_ptp_ops,
.phylink_validate = mv88e6065_phylink_validate,
};
@@ -3498,6 +3521,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3545,6 +3569,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3588,6 +3613,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3631,6 +3657,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3674,6 +3701,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3713,6 +3741,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3756,6 +3785,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3808,6 +3838,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3857,6 +3888,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390x_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -4235,6 +4267,33 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.ops = &mv88e6191_ops,
},
+ [MV88E6220] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6220,
+ .family = MV88E6XXX_FAMILY_6250,
+ .name = "Marvell 88E6220",
+ .num_databases = 64,
+
+ /* Ports 2-4 are not routed to pins
+ * => usable ports 0, 1, 5, 6
+ */
+ .num_ports = 7,
+ .num_internal_phys = 2,
+ .invalid_port_mask = BIT(2) | BIT(3) | BIT(4),
+ .max_vid = 4095,
+ .port_base_addr = 0x08,
+ .phy_base_addr = 0x00,
+ .global1_addr = 0x0f,
+ .global2_addr = 0x07,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .dual_chip = true,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
+ .ptp_support = true,
+ .ops = &mv88e6250_ops,
+ },
+
[MV88E6240] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6240,
.family = MV88E6XXX_FAMILY_6352,
@@ -4277,6 +4336,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.dual_chip = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
+ .ptp_support = true,
.ops = &mv88e6250_ops,
},
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 4646e46d47f2..a406be2f5652 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -57,6 +57,7 @@ enum mv88e6xxx_model {
MV88E6190,
MV88E6190X,
MV88E6191,
+ MV88E6220,
MV88E6240,
MV88E6250,
MV88E6290,
@@ -77,7 +78,7 @@ enum mv88e6xxx_family {
MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */
MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */
MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */
- MV88E6XXX_FAMILY_6250, /* 6250 */
+ MV88E6XXX_FAMILY_6250, /* 6220 6250 */
MV88E6XXX_FAMILY_6320, /* 6320 6321 */
MV88E6XXX_FAMILY_6341, /* 6141 6341 */
MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */
@@ -105,6 +106,11 @@ struct mv88e6xxx_info {
unsigned int g2_irqs;
bool pvt;
+ /* Mark certain ports as invalid. This is required for example for the
+ * MV88E6220 (which is in general a MV88E6250 with 7 ports) but the
+ * ports 2-4 are not routet to pins.
+ */
+ unsigned int invalid_port_mask;
/* Multi-chip Addressing Mode.
* Some chips respond to only 2 registers of its own SMI device address
* when it is non-zero, and use indirect access to internal registers.
@@ -389,6 +395,7 @@ struct mv88e6xxx_ops {
u8 out);
int (*port_disable_learn_limit)(struct mv88e6xxx_chip *chip, int port);
int (*port_disable_pri_override)(struct mv88e6xxx_chip *chip, int port);
+ int (*port_setup_message_port)(struct mv88e6xxx_chip *chip, int port);
/* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc.
* Some chips allow this to be configured on specific ports.
@@ -532,6 +539,10 @@ struct mv88e6xxx_ptp_ops {
int arr1_sts_reg;
int dep_sts_reg;
u32 rx_filters;
+ u32 cc_shift;
+ u32 cc_mult;
+ u32 cc_mult_num;
+ u32 cc_mult_dem;
};
#define STATS_TYPE_PORT BIT(0)
@@ -570,11 +581,17 @@ static inline unsigned int mv88e6xxx_num_gpio(struct mv88e6xxx_chip *chip)
return chip->info->num_gpio;
}
+static inline bool mv88e6xxx_is_invalid_port(struct mv88e6xxx_chip *chip, int port)
+{
+ return (chip->info->invalid_port_mask & BIT(port)) != 0;
+}
+
int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
-int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 update);
-int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask);
+int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 mask, u16 val);
+int mv88e6xxx_wait_bit(struct mv88e6xxx_chip *chip, int addr, int reg,
+ int bit, int val);
int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
int speed, int duplex, int pause,
phy_interface_t mode);
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 1323ef30a5e9..25ec4c0ac589 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -27,100 +27,52 @@ int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
return mv88e6xxx_write(chip, addr, reg, val);
}
-int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
+int mv88e6xxx_g1_wait_bit(struct mv88e6xxx_chip *chip, int reg, int
+ bit, int val)
{
- return mv88e6xxx_wait(chip, chip->info->global1_addr, reg, mask);
+ return mv88e6xxx_wait_bit(chip, chip->info->global1_addr, reg,
+ bit, val);
+}
+
+int mv88e6xxx_g1_wait_mask(struct mv88e6xxx_chip *chip, int reg,
+ u16 mask, u16 val)
+{
+ return mv88e6xxx_wait_mask(chip, chip->info->global1_addr, reg,
+ mask, val);
}
/* Offset 0x00: Switch Global Status Register */
static int mv88e6185_g1_wait_ppu_disabled(struct mv88e6xxx_chip *chip)
{
- u16 state;
- int i, err;
-
- for (i = 0; i < 16; i++) {
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &state);
- if (err)
- return err;
-
- /* Check the value of the PPUState bits 15:14 */
- state &= MV88E6185_G1_STS_PPU_STATE_MASK;
- if (state != MV88E6185_G1_STS_PPU_STATE_POLLING)
- return 0;
-
- usleep_range(1000, 2000);
- }
-
- return -ETIMEDOUT;
+ return mv88e6xxx_g1_wait_mask(chip, MV88E6XXX_G1_STS,
+ MV88E6185_G1_STS_PPU_STATE_MASK,
+ MV88E6185_G1_STS_PPU_STATE_DISABLED);
}
static int mv88e6185_g1_wait_ppu_polling(struct mv88e6xxx_chip *chip)
{
- u16 state;
- int i, err;
-
- for (i = 0; i < 16; ++i) {
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &state);
- if (err)
- return err;
-
- /* Check the value of the PPUState bits 15:14 */
- state &= MV88E6185_G1_STS_PPU_STATE_MASK;
- if (state == MV88E6185_G1_STS_PPU_STATE_POLLING)
- return 0;
-
- usleep_range(1000, 2000);
- }
-
- return -ETIMEDOUT;
+ return mv88e6xxx_g1_wait_mask(chip, MV88E6XXX_G1_STS,
+ MV88E6185_G1_STS_PPU_STATE_MASK,
+ MV88E6185_G1_STS_PPU_STATE_POLLING);
}
static int mv88e6352_g1_wait_ppu_polling(struct mv88e6xxx_chip *chip)
{
- u16 state;
- int i, err;
+ int bit = __bf_shf(MV88E6352_G1_STS_PPU_STATE);
- for (i = 0; i < 16; ++i) {
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &state);
- if (err)
- return err;
-
- /* Check the value of the PPUState (or InitState) bit 15 */
- if (state & MV88E6352_G1_STS_PPU_STATE)
- return 0;
-
- usleep_range(1000, 2000);
- }
-
- return -ETIMEDOUT;
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
{
- const unsigned long timeout = jiffies + 1 * HZ;
- u16 val;
- int err;
+ int bit = __bf_shf(MV88E6XXX_G1_STS_INIT_READY);
/* Wait up to 1 second for the switch to be ready. The InitReady bit 11
* is set to a one when all units inside the device (ATU, VTU, etc.)
* have finished their initialization and are ready to accept frames.
*/
- while (time_before(jiffies, timeout)) {
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
- if (err)
- return err;
-
- if (val & MV88E6XXX_G1_STS_INIT_READY)
- break;
-
- usleep_range(1000, 2000);
- }
-
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
-
- return 0;
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
@@ -476,8 +428,9 @@ int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index)
static int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g1_wait(chip, MV88E6XXX_G1_STATS_OP,
- MV88E6XXX_G1_STATS_OP_BUSY);
+ int bit = __bf_shf(MV88E6XXX_G1_STATS_OP_BUSY);
+
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STATS_OP, bit, 0);
}
int mv88e6095_g1_stats_set_histogram(struct mv88e6xxx_chip *chip)
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index d444266f7d78..78b9ae22d18c 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -249,7 +249,10 @@
int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val);
int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val);
-int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask);
+int mv88e6xxx_g1_wait_bit(struct mv88e6xxx_chip *chip, int reg, int
+ bit, int val);
+int mv88e6xxx_g1_wait_mask(struct mv88e6xxx_chip *chip, int reg,
+ u16 mask, u16 val);
int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 1cf388e9bd94..18b86515b6bc 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -5,6 +5,8 @@
* Copyright (c) 2008 Marvell Semiconductor
* Copyright (c) 2017 Savoir-faire Linux, Inc.
*/
+
+#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
@@ -75,8 +77,9 @@ int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g1_wait(chip, MV88E6XXX_G1_ATU_OP,
- MV88E6XXX_G1_ATU_OP_BUSY);
+ int bit = __bf_shf(MV88E6XXX_G1_ATU_OP_BUSY);
+
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_ATU_OP, bit, 0);
}
static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 6cac997360e8..33056a609e96 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -7,6 +7,7 @@
* Copyright (c) 2017 Savoir-faire Linux, Inc.
*/
+#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
@@ -67,8 +68,9 @@ static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_g1_vtu_op_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g1_wait(chip, MV88E6XXX_G1_VTU_OP,
- MV88E6XXX_G1_VTU_OP_BUSY);
+ int bit = __bf_shf(MV88E6XXX_G1_VTU_OP_BUSY);
+
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_VTU_OP, bit, 0);
}
static int mv88e6xxx_g1_vtu_op(struct mv88e6xxx_chip *chip, u16 op)
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 2305b94b3051..bdbb72fc20ed 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -26,14 +26,11 @@ int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
return mv88e6xxx_write(chip, chip->info->global2_addr, reg, val);
}
-int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update)
+int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg, int
+ bit, int val)
{
- return mv88e6xxx_update(chip, chip->info->global2_addr, reg, update);
-}
-
-int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
-{
- return mv88e6xxx_wait(chip, chip->info->global2_addr, reg, mask);
+ return mv88e6xxx_wait_bit(chip, chip->info->global2_addr, reg,
+ bit, val);
}
/* Offset 0x00: Interrupt Source Register */
@@ -123,7 +120,8 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
* but bit 4 is reserved on older chips, so it is safe to use.
*/
- return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_DEVICE_MAPPING, val);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_DEVICE_MAPPING,
+ MV88E6XXX_G2_DEVICE_MAPPING_UPDATE | val);
}
/* Offset 0x07: Trunk Mask Table register */
@@ -136,7 +134,8 @@ static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
if (hash)
val |= MV88E6XXX_G2_TRUNK_MASK_HASH;
- return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MASK, val);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_TRUNK_MASK,
+ MV88E6XXX_G2_TRUNK_MASK_UPDATE | val);
}
/* Offset 0x08: Trunk Mapping Table register */
@@ -147,7 +146,8 @@ static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
u16 val = (id << 11) | (map & port_mask);
- return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MAPPING, val);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_TRUNK_MAPPING,
+ MV88E6XXX_G2_TRUNK_MAPPING_UPDATE | val);
}
int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip)
@@ -178,8 +178,9 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_g2_irl_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_IRL_CMD,
- MV88E6XXX_G2_IRL_CMD_BUSY);
+ int bit = __bf_shf(MV88E6XXX_G2_IRL_CMD_BUSY);
+
+ return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_IRL_CMD, bit, 0);
}
static int mv88e6xxx_g2_irl_op(struct mv88e6xxx_chip *chip, u16 op, int port,
@@ -214,8 +215,9 @@ int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port)
static int mv88e6xxx_g2_pvt_op_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_PVT_ADDR,
- MV88E6XXX_G2_PVT_ADDR_BUSY);
+ int bit = __bf_shf(MV88E6XXX_G2_PVT_ADDR_BUSY);
+
+ return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_PVT_ADDR, bit, 0);
}
static int mv88e6xxx_g2_pvt_op(struct mv88e6xxx_chip *chip, int src_dev,
@@ -261,7 +263,8 @@ static int mv88e6xxx_g2_switch_mac_write(struct mv88e6xxx_chip *chip,
{
u16 val = (pointer << 8) | data;
- return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_SWITCH_MAC, val);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MAC,
+ MV88E6XXX_G2_SWITCH_MAC_UPDATE | val);
}
int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
@@ -284,7 +287,8 @@ static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer,
{
u16 val = (pointer << 8) | (data & 0x7);
- return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_PRIO_OVERRIDE, val);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_PRIO_OVERRIDE,
+ MV88E6XXX_G2_PRIO_OVERRIDE_UPDATE | val);
}
int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
@@ -308,9 +312,16 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_EEPROM_CMD,
- MV88E6XXX_G2_EEPROM_CMD_BUSY |
- MV88E6XXX_G2_EEPROM_CMD_RUNNING);
+ int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
+ int err;
+
+ err = mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_EEPROM_CMD, bit, 0);
+ if (err)
+ return err;
+
+ bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_RUNNING);
+
+ return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_EEPROM_CMD, bit, 0);
}
static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
@@ -572,8 +583,9 @@ int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_g2_smi_phy_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_SMI_PHY_CMD,
- MV88E6XXX_G2_SMI_PHY_CMD_BUSY);
+ int bit = __bf_shf(MV88E6XXX_G2_SMI_PHY_CMD_BUSY);
+
+ return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_SMI_PHY_CMD, bit, 0);
}
static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
@@ -840,12 +852,13 @@ const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops = {
static int mv88e6390_watchdog_setup(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_update(chip, MV88E6390_G2_WDOG_CTL,
- MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE |
- MV88E6390_G2_WDOG_CTL_CUT_THROUGH |
- MV88E6390_G2_WDOG_CTL_QUEUE_CONTROLLER |
- MV88E6390_G2_WDOG_CTL_EGRESS |
- MV88E6390_G2_WDOG_CTL_FORCE_IRQ);
+ return mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_UPDATE |
+ MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE |
+ MV88E6390_G2_WDOG_CTL_CUT_THROUGH |
+ MV88E6390_G2_WDOG_CTL_QUEUE_CONTROLLER |
+ MV88E6390_G2_WDOG_CTL_EGRESS |
+ MV88E6390_G2_WDOG_CTL_FORCE_IRQ);
}
static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
@@ -878,8 +891,9 @@ static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
static void mv88e6390_watchdog_free(struct mv88e6xxx_chip *chip)
{
- mv88e6xxx_g2_update(chip, MV88E6390_G2_WDOG_CTL,
- MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE);
+ mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_UPDATE |
+ MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE);
}
const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index a664fc25f132..42da4bca73e8 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -295,8 +295,8 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val);
int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val);
-int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update);
-int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask);
+int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg,
+ int bit, int val);
int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
@@ -376,12 +376,8 @@ static inline int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 v
return -EOPNOTSUPP;
}
-static inline int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
+static inline int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip,
+ int reg, int bit, int val)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/dsa/mv88e6xxx/global2_avb.c b/drivers/net/dsa/mv88e6xxx/global2_avb.c
index 116b8cf5a6e3..657783e043ff 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_avb.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_avb.c
@@ -11,6 +11,8 @@
* Brandon Streiff <brandon.streiff@ni.com>
*/
+#include <linux/bitfield.h>
+
#include "global2.h"
/* Offset 0x16: AVB Command Register
@@ -27,17 +29,33 @@
/* mv88e6xxx_g2_avb_read -- Read one or multiple 16-bit words.
* The hardware supports snapshotting up to four contiguous registers.
*/
+static int mv88e6xxx_g2_avb_wait(struct mv88e6xxx_chip *chip)
+{
+ int bit = __bf_shf(MV88E6352_G2_AVB_CMD_BUSY);
+
+ return mv88e6xxx_g2_wait_bit(chip, MV88E6352_G2_AVB_CMD, bit, 0);
+}
+
static int mv88e6xxx_g2_avb_read(struct mv88e6xxx_chip *chip, u16 readop,
u16 *data, int len)
{
int err;
int i;
+ err = mv88e6xxx_g2_avb_wait(chip);
+ if (err)
+ return err;
+
/* Hardware can only snapshot four words. */
if (len > 4)
return -E2BIG;
- err = mv88e6xxx_g2_update(chip, MV88E6352_G2_AVB_CMD, readop);
+ err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_CMD,
+ MV88E6352_G2_AVB_CMD_BUSY | readop);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_avb_wait(chip);
if (err)
return err;
@@ -57,11 +75,18 @@ static int mv88e6xxx_g2_avb_write(struct mv88e6xxx_chip *chip, u16 writeop,
{
int err;
+ err = mv88e6xxx_g2_avb_wait(chip);
+ if (err)
+ return err;
+
err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_DATA, data);
if (err)
return err;
- return mv88e6xxx_g2_update(chip, MV88E6352_G2_AVB_CMD, writeop);
+ err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_CMD,
+ MV88E6352_G2_AVB_CMD_BUSY | writeop);
+
+ return mv88e6xxx_g2_avb_wait(chip);
}
static int mv88e6352_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index baddecadd8be..33b7b9570d29 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -37,7 +37,8 @@ static int mv88e6xxx_g2_scratch_write(struct mv88e6xxx_chip *chip, int reg,
{
u16 value = (reg << 8) | data;
- return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC, value);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC,
+ MV88E6XXX_G2_SCRATCH_MISC_UPDATE | value);
}
/**
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 04309ef0a1cc..c95cdb73e5a2 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -590,6 +590,7 @@ int mv88e6250_port_link_state(struct mv88e6xxx_chip *chip, int port,
state->link = !!(reg & MV88E6250_PORT_STS_LINK);
state->an_enabled = 1;
state->an_complete = state->link;
+ state->interface = PHY_INTERFACE_MODE_NA;
return 0;
}
@@ -600,6 +601,43 @@ int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
int err;
u16 reg;
+ switch (chip->ports[port].cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_RGMII:
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL,
+ &reg);
+ if (err)
+ return err;
+
+ if ((reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK) &&
+ (reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK))
+ state->interface = PHY_INTERFACE_MODE_RGMII_ID;
+ else if (reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK)
+ state->interface = PHY_INTERFACE_MODE_RGMII_RXID;
+ else if (reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK)
+ state->interface = PHY_INTERFACE_MODE_RGMII_TXID;
+ else
+ state->interface = PHY_INTERFACE_MODE_RGMII;
+ break;
+ case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
+ state->interface = PHY_INTERFACE_MODE_1000BASEX;
+ break;
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ state->interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ state->interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+ case MV88E6XXX_PORT_STS_CMODE_XAUI:
+ state->interface = PHY_INTERFACE_MODE_XAUI;
+ break;
+ case MV88E6XXX_PORT_STS_CMODE_RXAUI:
+ state->interface = PHY_INTERFACE_MODE_RXAUI;
+ break;
+ default:
+ /* we do not support other cmode values here */
+ state->interface = PHY_INTERFACE_MODE_NA;
+ }
+
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 8d5a6cd6fb19..1abf5ea033e2 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -42,6 +42,7 @@
#define MV88E6XXX_PORT_STS_TX_PAUSED 0x0020
#define MV88E6XXX_PORT_STS_FLOW_CTL 0x0010
#define MV88E6XXX_PORT_STS_CMODE_MASK 0x000f
+#define MV88E6XXX_PORT_STS_CMODE_RGMII 0x0007
#define MV88E6XXX_PORT_STS_CMODE_100BASE_X 0x0008
#define MV88E6XXX_PORT_STS_CMODE_1000BASE_X 0x0009
#define MV88E6XXX_PORT_STS_CMODE_SGMII 0x000a
@@ -117,6 +118,7 @@
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6190 0x1900
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6191 0x1910
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6185 0x1a70
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6220 0x2200
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6240 0x2400
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6250 0x2500
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6290 0x2900
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index 768d256f7c9f..073cbd0bb91b 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -15,11 +15,31 @@
#include "hwtstamp.h"
#include "ptp.h"
-/* Raw timestamps are in units of 8-ns clock periods. */
-#define CC_SHIFT 28
-#define CC_MULT (8 << CC_SHIFT)
-#define CC_MULT_NUM (1 << 9)
-#define CC_MULT_DEM 15625ULL
+#define MV88E6XXX_MAX_ADJ_PPB 1000000
+
+/* Family MV88E6250:
+ * Raw timestamps are in units of 10-ns clock periods.
+ *
+ * clkadj = scaled_ppm * 10*2^28 / (10^6 * 2^16)
+ * simplifies to
+ * clkadj = scaled_ppm * 2^7 / 5^5
+ */
+#define MV88E6250_CC_SHIFT 28
+#define MV88E6250_CC_MULT (10 << MV88E6250_CC_SHIFT)
+#define MV88E6250_CC_MULT_NUM (1 << 7)
+#define MV88E6250_CC_MULT_DEM 3125ULL
+
+/* Other families:
+ * Raw timestamps are in units of 8-ns clock periods.
+ *
+ * clkadj = scaled_ppm * 8*2^28 / (10^6 * 2^16)
+ * simplifies to
+ * clkadj = scaled_ppm * 2^9 / 5^6
+ */
+#define MV88E6XXX_CC_SHIFT 28
+#define MV88E6XXX_CC_MULT (8 << MV88E6XXX_CC_SHIFT)
+#define MV88E6XXX_CC_MULT_NUM (1 << 9)
+#define MV88E6XXX_CC_MULT_DEM 15625ULL
#define TAI_EVENT_WORK_INTERVAL msecs_to_jiffies(100)
@@ -179,6 +199,7 @@ out:
static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
int neg_adj = 0;
u32 diff, mult;
u64 adj;
@@ -187,10 +208,11 @@ static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
neg_adj = 1;
scaled_ppm = -scaled_ppm;
}
- mult = CC_MULT;
- adj = CC_MULT_NUM;
+
+ mult = ptp_ops->cc_mult;
+ adj = ptp_ops->cc_mult_num;
adj *= scaled_ppm;
- diff = div_u64(adj, CC_MULT_DEM);
+ diff = div_u64(adj, ptp_ops->cc_mult_dem);
mv88e6xxx_reg_lock(chip);
@@ -310,7 +332,27 @@ static int mv88e6352_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
return 0;
}
-const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
+const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
+ .clock_read = mv88e6165_ptp_clock_read,
+ .global_enable = mv88e6165_global_enable,
+ .global_disable = mv88e6165_global_disable,
+ .arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS,
+ .arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS,
+ .dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS,
+ .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+ .cc_shift = MV88E6XXX_CC_SHIFT,
+ .cc_mult = MV88E6XXX_CC_MULT,
+ .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
+ .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
+};
+
+const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
.clock_read = mv88e6352_ptp_clock_read,
.ptp_enable = mv88e6352_ptp_enable,
.ptp_verify = mv88e6352_ptp_verify,
@@ -331,22 +373,37 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+ .cc_shift = MV88E6250_CC_SHIFT,
+ .cc_mult = MV88E6250_CC_MULT,
+ .cc_mult_num = MV88E6250_CC_MULT_NUM,
+ .cc_mult_dem = MV88E6250_CC_MULT_DEM,
};
-const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
- .clock_read = mv88e6165_ptp_clock_read,
- .global_enable = mv88e6165_global_enable,
- .global_disable = mv88e6165_global_disable,
- .arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS,
- .arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS,
- .dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS,
+const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
+ .clock_read = mv88e6352_ptp_clock_read,
+ .ptp_enable = mv88e6352_ptp_enable,
+ .ptp_verify = mv88e6352_ptp_verify,
+ .event_work = mv88e6352_tai_event_work,
+ .port_enable = mv88e6352_hwtstamp_port_enable,
+ .port_disable = mv88e6352_hwtstamp_port_disable,
+ .n_ext_ts = 1,
+ .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
+ .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
+ .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
.rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+ .cc_shift = MV88E6XXX_CC_SHIFT,
+ .cc_mult = MV88E6XXX_CC_MULT,
+ .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
+ .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
};
static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
@@ -384,8 +441,8 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
memset(&chip->tstamp_cc, 0, sizeof(chip->tstamp_cc));
chip->tstamp_cc.read = mv88e6xxx_ptp_clock_read;
chip->tstamp_cc.mask = CYCLECOUNTER_MASK(32);
- chip->tstamp_cc.mult = CC_MULT;
- chip->tstamp_cc.shift = CC_SHIFT;
+ chip->tstamp_cc.mult = ptp_ops->cc_mult;
+ chip->tstamp_cc.shift = ptp_ops->cc_shift;
timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc,
ktime_to_ns(ktime_get_real()));
@@ -397,7 +454,6 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
chip->ptp_clock_info.owner = THIS_MODULE;
snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name),
"%s", dev_name(chip->dev));
- chip->ptp_clock_info.max_adj = 1000000;
chip->ptp_clock_info.n_ext_ts = ptp_ops->n_ext_ts;
chip->ptp_clock_info.n_per_out = 0;
@@ -413,6 +469,7 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
}
chip->ptp_clock_info.pin_config = chip->pin_config;
+ chip->ptp_clock_info.max_adj = MV88E6XXX_MAX_ADJ_PPB;
chip->ptp_clock_info.adjfine = mv88e6xxx_ptp_adjfine;
chip->ptp_clock_info.adjtime = mv88e6xxx_ptp_adjtime;
chip->ptp_clock_info.gettime64 = mv88e6xxx_ptp_gettime;
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h
index 0a1f8de8f062..269d5d16a466 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.h
+++ b/drivers/net/dsa/mv88e6xxx/ptp.h
@@ -148,8 +148,9 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
#define ptp_to_chip(ptp) container_of(ptp, struct mv88e6xxx_chip, \
ptp_clock_info)
-extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops;
+extern const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops;
+extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
@@ -167,8 +168,9 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
{
}
-static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {};
+static const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {};
+static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */
diff --git a/drivers/net/dsa/mv88e6xxx/smi.c b/drivers/net/dsa/mv88e6xxx/smi.c
index 5fc78a063843..282fe08db050 100644
--- a/drivers/net/dsa/mv88e6xxx/smi.c
+++ b/drivers/net/dsa/mv88e6xxx/smi.c
@@ -64,8 +64,10 @@ static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
if (err)
return err;
- if (!!(data >> bit) == !!val)
+ if (!!(data & BIT(bit)) == !!val)
return 0;
+
+ usleep_range(1000, 2000);
}
return -ETIMEDOUT;
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 147051404194..8785c2ff3825 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -847,8 +847,7 @@ static void poll_vortex(struct net_device *dev)
static int vortex_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *ndev = pci_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
if (!ndev || !netif_running(ndev))
return 0;
@@ -861,8 +860,7 @@ static int vortex_suspend(struct device *dev)
static int vortex_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *ndev = pci_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
int err;
if (!ndev || !netif_running(ndev))
@@ -2175,7 +2173,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr = skb_frag_dma_map(vp->gendev, frag,
0,
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
if (dma_mapping_error(vp->gendev, dma_addr)) {
for(i = i-1; i >= 0; i--)
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 010a2f48aea5..2a9f8643629c 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -110,7 +110,7 @@ static void greth_print_tx_packet(struct sk_buff *skb)
print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
skb_frag_address(&skb_shinfo(skb)->frags[i]),
- skb_shinfo(skb)->frags[i].size, true);
+ skb_frag_size(&skb_shinfo(skb)->frags[i]), true);
}
}
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index edbb4b3604c7..174344c450af 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2426,7 +2426,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
u32 thiscopy, remainder;
struct sk_buff *skb = tcb->skb;
u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
- struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
+ skb_frag_t *frags = &skb_shinfo(skb)->frags[0];
struct phy_device *phydev = adapter->netdev->phydev;
dma_addr_t dma_addr;
struct tx_ring *tx_ring = &adapter->tx_ring;
@@ -2488,11 +2488,11 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
frag++;
}
} else {
- desc[frag].len_vlan = frags[i - 1].size;
+ desc[frag].len_vlan = skb_frag_size(&frags[i - 1]);
dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
&frags[i - 1],
0,
- frags[i - 1].size,
+ desc[frag].len_vlan,
DMA_TO_DEVICE);
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 650d1bae5f56..1793950f0582 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1100,7 +1100,6 @@ static int au1000_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "failed to retrieve IRQ\n");
err = -ENODEV;
goto out;
}
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 87ff5d6d1b22..c6c2a54c1121 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -697,16 +697,14 @@ static void ni65_free_buffer(struct priv *p)
for(i=0;i<TMDNUM;i++) {
kfree(p->tmdbounce[i]);
#ifdef XMT_VIA_SKB
- if(p->tmd_skb[i])
- dev_kfree_skb(p->tmd_skb[i]);
+ dev_kfree_skb(p->tmd_skb[i]);
#endif
}
for(i=0;i<RMDNUM;i++)
{
#ifdef RCV_VIA_SKB
- if(p->recv_skb[i])
- dev_kfree_skb(p->recv_skb[i]);
+ dev_kfree_skb(p->recv_skb[i]);
#else
kfree(p->recvbounce[i]);
#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index b91143947ed2..b0a6c96b6ef4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -438,7 +438,6 @@ static const struct file_operations xi2c_reg_value_fops = {
void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
{
- struct dentry *pfile;
char *buf;
/* Set defaults */
@@ -451,88 +450,48 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
return;
pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
- if (!pdata->xgbe_debugfs) {
- netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
- kfree(buf);
- return;
- }
- pfile = debugfs_create_file("xgmac_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xgmac_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xgmac_register", 0600, pdata->xgbe_debugfs, pdata,
+ &xgmac_reg_addr_fops);
- pfile = debugfs_create_file("xgmac_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xgmac_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xgmac_register_value", 0600, pdata->xgbe_debugfs,
+ pdata, &xgmac_reg_value_fops);
- pfile = debugfs_create_file("xpcs_mmd", 0600,
- pdata->xgbe_debugfs, pdata,
- &xpcs_mmd_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xpcs_mmd", 0600, pdata->xgbe_debugfs, pdata,
+ &xpcs_mmd_fops);
- pfile = debugfs_create_file("xpcs_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xpcs_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xpcs_register", 0600, pdata->xgbe_debugfs, pdata,
+ &xpcs_reg_addr_fops);
- pfile = debugfs_create_file("xpcs_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xpcs_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xpcs_register_value", 0600, pdata->xgbe_debugfs,
+ pdata, &xpcs_reg_value_fops);
if (pdata->xprop_regs) {
- pfile = debugfs_create_file("xprop_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xprop_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
-
- pfile = debugfs_create_file("xprop_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xprop_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
+ debugfs_create_file("xprop_register", 0600, pdata->xgbe_debugfs,
+ pdata, &xprop_reg_addr_fops);
+
+ debugfs_create_file("xprop_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xprop_reg_value_fops);
}
if (pdata->xi2c_regs) {
- pfile = debugfs_create_file("xi2c_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xi2c_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
-
- pfile = debugfs_create_file("xi2c_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xi2c_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
+ debugfs_create_file("xi2c_register", 0600, pdata->xgbe_debugfs,
+ pdata, &xi2c_reg_addr_fops);
+
+ debugfs_create_file("xi2c_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xi2c_reg_value_fops);
}
if (pdata->vdata->an_cdr_workaround) {
- pfile = debugfs_create_bool("an_cdr_workaround", 0600,
- pdata->xgbe_debugfs,
- &pdata->debugfs_an_cdr_workaround);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_bool failed\n");
-
- pfile = debugfs_create_bool("an_cdr_track_early", 0600,
- pdata->xgbe_debugfs,
- &pdata->debugfs_an_cdr_track_early);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_bool failed\n");
+ debugfs_create_bool("an_cdr_workaround", 0600,
+ pdata->xgbe_debugfs,
+ &pdata->debugfs_an_cdr_workaround);
+
+ debugfs_create_bool("an_cdr_track_early", 0600,
+ pdata->xgbe_debugfs,
+ &pdata->debugfs_an_cdr_track_early);
}
kfree(buf);
@@ -546,7 +505,6 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
{
- struct dentry *pfile;
char *buf;
if (!pdata->xgbe_debugfs)
@@ -559,11 +517,8 @@ void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf))
goto out;
- pfile = debugfs_rename(pdata->xgbe_debugfs->d_parent,
- pdata->xgbe_debugfs,
- pdata->xgbe_debugfs->d_parent, buf);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_rename failed\n");
+ debugfs_rename(pdata->xgbe_debugfs->d_parent, pdata->xgbe_debugfs,
+ pdata->xgbe_debugfs->d_parent, buf);
out:
kfree(buf);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 533094233659..230726d7b74f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -526,7 +526,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_packet_data *packet;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t skb_dma;
unsigned int start_index, cur_index;
unsigned int offset, tso, vlan, datalen, len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 3dd0cecddba8..98f8f2033154 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1833,7 +1833,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring, struct sk_buff *skb,
struct xgbe_packet_data *packet)
{
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int context_desc;
unsigned int len;
unsigned int i;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index d0f3dfb88202..4ebd2410185a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -301,7 +301,6 @@ static int xgbe_platform_probe(struct platform_device *pdev)
struct xgbe_prv_data *pdata;
struct device *dev = &pdev->dev;
struct platform_device *phy_pdev;
- struct resource *res;
const char *phy_mode;
unsigned int phy_memnum, phy_irqnum;
unsigned int dma_irqnum, dma_irqend;
@@ -353,8 +352,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
}
/* Obtain the mmio areas for the device */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->xgmac_regs = devm_ioremap_resource(dev, res);
+ pdata->xgmac_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->xgmac_regs)) {
dev_err(dev, "xgmac ioremap failed\n");
ret = PTR_ERR(pdata->xgmac_regs);
@@ -363,8 +361,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pdata->xpcs_regs = devm_ioremap_resource(dev, res);
+ pdata->xpcs_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pdata->xpcs_regs)) {
dev_err(dev, "xpcs ioremap failed\n");
ret = PTR_ERR(pdata->xpcs_regs);
@@ -373,8 +370,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->rxtx_regs = devm_ioremap_resource(dev, res);
+ pdata->rxtx_regs = devm_platform_ioremap_resource(phy_pdev,
+ phy_memnum++);
if (IS_ERR(pdata->rxtx_regs)) {
dev_err(dev, "rxtx ioremap failed\n");
ret = PTR_ERR(pdata->rxtx_regs);
@@ -383,8 +380,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->sir0_regs = devm_ioremap_resource(dev, res);
+ pdata->sir0_regs = devm_platform_ioremap_resource(phy_pdev,
+ phy_memnum++);
if (IS_ERR(pdata->sir0_regs)) {
dev_err(dev, "sir0 ioremap failed\n");
ret = PTR_ERR(pdata->sir0_regs);
@@ -393,8 +390,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->sir1_regs = devm_ioremap_resource(dev, res);
+ pdata->sir1_regs = devm_platform_ioremap_resource(phy_pdev,
+ phy_memnum++);
if (IS_ERR(pdata->sir1_regs)) {
dev_err(dev, "sir1 ioremap failed\n");
ret = PTR_ERR(pdata->sir1_regs);
@@ -467,10 +464,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
/* Get the device interrupt */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "platform_get_irq 0 failed\n");
+ if (ret < 0)
goto err_io;
- }
pdata->dev_irq = ret;
/* Get the per channel DMA interrupts */
@@ -479,12 +474,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
for (i = 0; (i < max) && (dma_irqnum < dma_irqend); i++) {
ret = platform_get_irq(pdata->platdev, dma_irqnum++);
- if (ret < 0) {
- netdev_err(pdata->netdev,
- "platform_get_irq %u failed\n",
- dma_irqnum - 1);
+ if (ret < 0)
goto err_io;
- }
pdata->channel_irq[i] = ret;
}
@@ -496,10 +487,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
/* Get the auto-negotiation interrupt */
ret = platform_get_irq(phy_pdev, phy_irqnum++);
- if (ret < 0) {
- dev_err(dev, "platform_get_irq phy 0 failed\n");
+ if (ret < 0)
goto err_io;
- }
pdata->an_irq = ret;
/* Configure the netdev resource */
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 79048cc46703..02b4f3af02b5 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -54,10 +54,8 @@ static int xge_get_resources(struct xge_pdata *pdata)
}
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "Unable to get irq\n");
+ if (ret < 0)
return ret;
- }
pdata->resources.irq = ret;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 61a465097cb8..5f657879134e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -712,11 +712,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
udelay(5);
} else {
#ifdef CONFIG_ACPI
- if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
- acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
- "_RST", NULL, NULL);
- } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
- "_INI")) {
+ acpi_status status;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
"_INI", NULL, NULL);
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 10b1c053e70a..d8612131c55e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -340,7 +340,8 @@ static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
nr_frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < 2 && i < nr_frags; i++)
- len += skb_shinfo(skb)->frags[i].size;
+ len += skb_frag_size(
+ &skb_shinfo(skb)->frags[i]);
/* HW requires header must reside in 3 buffer */
if (unlikely(hdr_len > len)) {
@@ -1616,7 +1617,6 @@ static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
{
struct platform_device *pdev = pdata->pdev;
- struct device *dev = &pdev->dev;
int i, ret, max_irqs;
if (phy_interface_mode_is_rgmii(pdata->phy_mode))
@@ -1636,9 +1636,7 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
pdata->cq_cnt = max_irqs / 2;
break;
}
- dev_err(dev, "Unable to get ENET IRQ\n");
- ret = ret ? : -ENXIO;
- return ret;
+ return ret ? : -ENXIO;
}
pdata->irqs[i] = ret;
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index 6453fc2ebb1f..f482ced2cadd 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -460,12 +460,14 @@ static int xgene_enet_reset(struct xgene_enet_pdata *p)
}
} else {
#ifdef CONFIG_ACPI
- if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_RST"))
- acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
- "_RST", NULL, NULL);
- else if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_INI"))
+ acpi_status status;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
"_INI", NULL, NULL);
+ }
#endif
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 133eb91c542e..304b5d43f236 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -393,11 +393,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
udelay(5);
} else {
#ifdef CONFIG_ACPI
- if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
- acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
- "_RST", NULL, NULL);
- } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
- "_INI")) {
+ acpi_status status;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
"_INI", NULL, NULL);
}
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index c40daad515d5..a58185b1d8bf 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -815,8 +815,8 @@ static int reverse6[64] = {
static unsigned int
crc416(unsigned int curval, unsigned short nxtval)
{
- register unsigned int counter, cur = curval, next = nxtval;
- register int high_crc_set, low_data_set;
+ unsigned int counter, cur = curval, next = nxtval;
+ int high_crc_set, low_data_set;
/* Swap bytes */
next = ((next & 0x00FF) << 8) | (next >> 8);
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 6703960c7cf5..7548247455d7 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1148,7 +1148,7 @@ static int ag71xx_rings_init(struct ag71xx *ag)
return -ENOMEM;
}
- rx->buf = &tx->buf[BIT(tx->order)];
+ rx->buf = &tx->buf[tx_size];
rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
@@ -1686,7 +1686,7 @@ static int ag71xx_probe(struct platform_device *pdev)
}
ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start,
- res->end - res->start + 1);
+ resource_size(res));
if (!ag->mac_base) {
err = -ENOMEM;
goto err_free;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e3538ba7d0e7..d4bbcdfd691a 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1465,9 +1465,7 @@ static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
tpd->len = cpu_to_le16(maplen);
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[f];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
if (++txq->write_idx == txq->count)
txq->write_idx = 0;
@@ -1879,8 +1877,7 @@ static void alx_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int alx_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_priv *alx = dev_get_drvdata(dev);
if (!netif_running(alx->dev))
return 0;
@@ -1891,8 +1888,7 @@ static int alx_suspend(struct device *dev)
static int alx_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_priv *alx = dev_get_drvdata(dev);
struct alx_hw *hw = &alx->hw;
int err;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index be7f9cebb675..2b239ecea05f 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2150,9 +2150,7 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[f];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
use_tpd = atl1c_get_tpd(adapter, type);
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
@@ -2422,8 +2420,7 @@ static int atl1c_close(struct net_device *netdev)
static int atl1c_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
u32 wufc = adapter->wol;
@@ -2437,7 +2434,7 @@ static int atl1c_suspend(struct device *dev)
if (wufc)
if (atl1c_phy_to_ps_link(hw) != 0)
- dev_dbg(&pdev->dev, "phy power saving failed");
+ dev_dbg(dev, "phy power saving failed");
atl1c_power_saving(hw, wufc);
@@ -2447,8 +2444,7 @@ static int atl1c_suspend(struct device *dev)
#ifdef CONFIG_PM_SLEEP
static int atl1c_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 7f14e010bfeb..4f7b65825c15 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1770,11 +1770,10 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
u16 i;
u16 seg_num;
- frag = &skb_shinfo(skb)->frags[f];
buf_len = skb_frag_size(frag);
seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index b5c6dc914720..b498fd6a47d0 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2256,10 +2256,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
u16 i, nseg;
- frag = &skb_shinfo(skb)->frags[f];
buf_len = skb_frag_size(frag);
nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
@@ -2754,8 +2753,7 @@ static int atl1_close(struct net_device *netdev)
#ifdef CONFIG_PM_SLEEP
static int atl1_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_hw *hw = &adapter->hw;
u32 ctrl = 0;
@@ -2780,7 +2778,7 @@ static int atl1_suspend(struct device *dev)
val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
if (val) {
if (netif_msg_ifdown(adapter))
- dev_printk(KERN_DEBUG, &pdev->dev,
+ dev_printk(KERN_DEBUG, dev,
"error getting speed/duplex\n");
goto disable_wol;
}
@@ -2837,8 +2835,7 @@ static int atl1_suspend(struct device *dev)
static int atl1_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1_adapter *adapter = netdev_priv(netdev);
iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 3b3370a94a9c..37752d9514e7 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1351,10 +1351,8 @@ static int nb8800_probe(struct platform_device *pdev)
ops = match->data;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "No IRQ\n");
+ if (irq <= 0)
return -EINVAL;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 291e4afd4a1a..620cd3fc1fbc 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1693,7 +1693,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct bcm_enet_priv *priv;
struct net_device *dev;
struct bcm63xx_enet_platform_data *pd;
- struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
+ struct resource *res_irq, *res_irq_rx, *res_irq_tx;
struct mii_bus *bus;
int i, ret;
@@ -1719,8 +1719,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (ret)
goto out;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto out;
@@ -2762,15 +2761,13 @@ struct platform_driver bcm63xx_enetsw_driver = {
/* reserve & remap memory space shared between all macs */
static int bcm_enet_shared_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *p[3];
unsigned int i;
memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
for (i = 0; i < 3; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- p[i] = devm_ioremap_resource(&pdev->dev, res);
+ p[i] = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(p[i]))
return PTR_ERR(p[i]);
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 9483553ce444..7df887e4024c 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -708,8 +708,7 @@ static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
for (i = 0; i < priv->num_rx_bds; i++) {
cb = &priv->rx_cbs[i];
skb = bcm_sysport_rx_refill(priv, cb);
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
if (!cb->skb)
return -ENOMEM;
}
@@ -2420,12 +2419,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
struct device_node *dn;
struct net_device *dev;
const void *macaddr;
- struct resource *r;
u32 txq, rxq;
int ret;
dn = pdev->dev.of_node;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
of_id = of_match_node(bcm_sysport_of_match, dn);
if (!of_id || !of_id->data)
return -EINVAL;
@@ -2473,7 +2470,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_free_netdev;
}
- priv->base = devm_ioremap_resource(&pdev->dev, r);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto err_free_netdev;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 6dc0dd91ad11..c46c1b1416f7 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -199,10 +199,8 @@ static int bgmac_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "MAC address not present in device tree\n");
bgmac->irq = platform_get_irq(pdev, 0);
- if (bgmac->irq < 0) {
- dev_err(&pdev->dev, "Unable to obtain IRQ\n");
+ if (bgmac->irq < 0)
return bgmac->irq;
- }
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "amac_base");
if (!regs) {
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 4632dd5dbad1..148734b166f0 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -172,7 +172,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
flags = 0;
for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int len = skb_frag_size(frag);
index = (index + 1) % BGMAC_TX_RING_SLOTS;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index dfdd14eadd57..fbc196b480b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8673,8 +8673,7 @@ bnx2_remove_one(struct pci_dev *pdev)
static int
bnx2_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnx2 *bp = netdev_priv(dev);
if (netif_running(dev)) {
@@ -8693,8 +8692,7 @@ bnx2_suspend(struct device *device)
static int
bnx2_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnx2 *bp = netdev_priv(dev);
if (!netif_running(dev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8dce4069472b..b9ad43d3dc51 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -116,6 +116,9 @@ enum board_idx {
BCM57508,
BCM57504,
BCM57502,
+ BCM57508_NPAR,
+ BCM57504_NPAR,
+ BCM57502_NPAR,
BCM58802,
BCM58804,
BCM58808,
@@ -161,6 +164,9 @@ static const struct {
[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
+ [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
+ [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
+ [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -209,6 +215,12 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
+ { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
@@ -828,16 +840,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
return 0;
}
-static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
- u32 agg_bufs)
+static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ u16 cp_cons, u16 curr)
+{
+ struct rx_agg_cmp *agg;
+
+ cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ return agg;
+}
+
+static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 agg_id, u16 curr)
+{
+ struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
+
+ return &tpa_info->agg_arr[curr];
+}
+
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
+ u16 start, u32 agg_bufs, bool tpa)
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt *bp = bnapi->bp;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
u16 sw_prod = rxr->rx_sw_agg_prod;
+ bool p5_tpa = false;
u32 i;
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ p5_tpa = true;
+
for (i = 0; i < agg_bufs; i++) {
u16 cons;
struct rx_agg_cmp *agg;
@@ -845,8 +882,10 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
struct rx_bd *prod_bd;
struct page *page;
- agg = (struct rx_agg_cmp *)
- &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ if (p5_tpa)
+ agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
+ else
+ agg = bnxt_get_agg(bp, cpr, idx, start + i);
cons = agg->rx_agg_cmp_opaque;
__clear_bit(cons, rxr->rx_agg_bmap);
@@ -874,7 +913,6 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
prod = NEXT_RX_AGG(prod);
sw_prod = NEXT_RX_AGG(sw_prod);
- cp_cons = NEXT_CMP(cp_cons);
}
rxr->rx_agg_prod = prod;
rxr->rx_sw_agg_prod = sw_prod;
@@ -888,7 +926,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
{
unsigned int payload = offset_and_len >> 16;
unsigned int len = offset_and_len & 0xffff;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct page *page = data;
u16 prod = rxr->rx_prod;
struct sk_buff *skb;
@@ -919,7 +957,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
frag = &skb_shinfo(skb)->frags[0];
skb_frag_size_sub(frag, payload);
- frag->page_offset += payload;
+ skb_frag_off_add(frag, payload);
skb->data_len -= payload;
skb->tail += payload;
@@ -957,15 +995,19 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 cp_cons,
- u32 agg_bufs)
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct pci_dev *pdev = bp->pdev;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
+ bool p5_tpa = false;
u32 i;
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ p5_tpa = true;
+
for (i = 0; i < agg_bufs; i++) {
u16 cons, frag_len;
struct rx_agg_cmp *agg;
@@ -973,8 +1015,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
struct page *page;
dma_addr_t mapping;
- agg = (struct rx_agg_cmp *)
- &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ if (p5_tpa)
+ agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
+ else
+ agg = bnxt_get_agg(bp, cpr, idx, i);
cons = agg->rx_agg_cmp_opaque;
frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
@@ -1008,7 +1052,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
* allocated already.
*/
rxr->rx_agg_prod = prod;
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
+ bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
return NULL;
}
@@ -1021,7 +1065,6 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
skb->truesize += PAGE_SIZE;
prod = NEXT_RX_AGG(prod);
- cp_cons = NEXT_CMP(cp_cons);
}
rxr->rx_agg_prod = prod;
return skb;
@@ -1081,9 +1124,10 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
struct rx_tpa_end_cmp *tpa_end = cmp;
- agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
- RX_TPA_END_CMP_AGG_BUFS) >>
- RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return 0;
+
+ agg_bufs = TPA_END_AGG_BUFS(tpa_end);
}
if (agg_bufs) {
@@ -1120,26 +1164,60 @@ static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
rxr->rx_next_cons = 0xffff;
}
+static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
+{
+ struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+ u16 idx = agg_id & MAX_TPA_P5_MASK;
+
+ if (test_bit(idx, map->agg_idx_bmap))
+ idx = find_first_zero_bit(map->agg_idx_bmap,
+ BNXT_AGG_IDX_BMAP_SIZE);
+ __set_bit(idx, map->agg_idx_bmap);
+ map->agg_id_tbl[agg_id] = idx;
+ return idx;
+}
+
+static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
+{
+ struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+
+ __clear_bit(idx, map->agg_idx_bmap);
+}
+
+static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
+{
+ struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+
+ return map->agg_id_tbl[agg_id];
+}
+
static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct rx_tpa_start_cmp *tpa_start,
struct rx_tpa_start_cmp_ext *tpa_start1)
{
- u8 agg_id = TPA_START_AGG_ID(tpa_start);
- u16 cons, prod;
- struct bnxt_tpa_info *tpa_info;
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct bnxt_tpa_info *tpa_info;
+ u16 cons, prod, agg_id;
struct rx_bd *prod_bd;
dma_addr_t mapping;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ agg_id = TPA_START_AGG_ID_P5(tpa_start);
+ agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
+ } else {
+ agg_id = TPA_START_AGG_ID(tpa_start);
+ }
cons = tpa_start->rx_tpa_start_cmp_opaque;
prod = rxr->rx_prod;
cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf = &rxr->rx_buf_ring[prod];
tpa_info = &rxr->rx_tpa[agg_id];
- if (unlikely(cons != rxr->rx_next_cons)) {
- netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
- cons, rxr->rx_next_cons);
+ if (unlikely(cons != rxr->rx_next_cons ||
+ TPA_START_ERROR(tpa_start))) {
+ netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
+ cons, rxr->rx_next_cons,
+ TPA_START_ERROR_CODE(tpa_start1));
bnxt_sched_reset(bp, rxr);
return;
}
@@ -1184,6 +1262,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
+ tpa_info->agg_count = 0;
rxr->rx_prod = NEXT_RX(prod);
cons = NEXT_RX(cons);
@@ -1195,13 +1274,37 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
cons_rx_buf->data = NULL;
}
-static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
- u32 agg_bufs)
+static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
{
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
}
+#ifdef CONFIG_INET
+static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
+{
+ struct udphdr *uh = NULL;
+
+ if (ip_proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+}
+#endif
+
static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
int payload_off, int tcp_ts,
struct sk_buff *skb)
@@ -1259,28 +1362,39 @@ static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
}
if (inner_mac_off) { /* tunnel */
- struct udphdr *uh = NULL;
__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
ETH_HLEN - 2));
- if (proto == htons(ETH_P_IP)) {
- struct iphdr *iph = (struct iphdr *)skb->data;
+ bnxt_gro_tunnel(skb, proto);
+ }
+#endif
+ return skb;
+}
- if (iph->protocol == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- } else {
- struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
+ int payload_off, int tcp_ts,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ u16 outer_ip_off, inner_ip_off, inner_mac_off;
+ u32 hdr_info = tpa_info->hdr_info;
+ int iphdr_len, nw_off;
- if (iph->nexthdr == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- }
- if (uh) {
- if (uh->check)
- skb_shinfo(skb)->gso_type |=
- SKB_GSO_UDP_TUNNEL_CSUM;
- else
- skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
- }
+ inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
+ inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
+ outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
+
+ nw_off = inner_ip_off - ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
+ sizeof(struct ipv6hdr) : sizeof(struct iphdr);
+ skb_set_transport_header(skb, nw_off + iphdr_len);
+
+ if (inner_mac_off) { /* tunnel */
+ __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
+ ETH_HLEN - 2));
+
+ bnxt_gro_tunnel(skb, proto);
}
#endif
return skb;
@@ -1327,28 +1441,8 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
return NULL;
}
- if (nw_off) { /* tunnel */
- struct udphdr *uh = NULL;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = (struct iphdr *)skb->data;
-
- if (iph->protocol == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- } else {
- struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
-
- if (iph->nexthdr == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- }
- if (uh) {
- if (uh->check)
- skb_shinfo(skb)->gso_type |=
- SKB_GSO_UDP_TUNNEL_CSUM;
- else
- skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
- }
- }
+ if (nw_off) /* tunnel */
+ bnxt_gro_tunnel(skb, skb->protocol);
#endif
return skb;
}
@@ -1371,9 +1465,10 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
skb_shinfo(skb)->gso_size =
le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
skb_shinfo(skb)->gso_type = tpa_info->gso_type;
- payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
- RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
- RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
+ else
+ payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
if (likely(skb))
tcp_gro_complete(skb);
@@ -1401,14 +1496,14 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- u8 agg_id = TPA_END_AGG_ID(tpa_end);
u8 *data_ptr, agg_bufs;
- u16 cp_cons = RING_CMP(*raw_cons);
unsigned int len;
struct bnxt_tpa_info *tpa_info;
dma_addr_t mapping;
struct sk_buff *skb;
+ u16 idx = 0, agg_id;
void *data;
+ bool gro;
if (unlikely(bnapi->in_reset)) {
int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
@@ -1418,26 +1513,43 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- tpa_info = &rxr->rx_tpa[agg_id];
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ agg_id = TPA_END_AGG_ID_P5(tpa_end);
+ agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
+ agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
+ tpa_info = &rxr->rx_tpa[agg_id];
+ if (unlikely(agg_bufs != tpa_info->agg_count)) {
+ netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
+ agg_bufs, tpa_info->agg_count);
+ agg_bufs = tpa_info->agg_count;
+ }
+ tpa_info->agg_count = 0;
+ *event |= BNXT_AGG_EVENT;
+ bnxt_free_agg_idx(rxr, agg_id);
+ idx = agg_id;
+ gro = !!(bp->flags & BNXT_FLAG_GRO);
+ } else {
+ agg_id = TPA_END_AGG_ID(tpa_end);
+ agg_bufs = TPA_END_AGG_BUFS(tpa_end);
+ tpa_info = &rxr->rx_tpa[agg_id];
+ idx = RING_CMP(*raw_cons);
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
+ return ERR_PTR(-EBUSY);
+
+ *event |= BNXT_AGG_EVENT;
+ idx = NEXT_CMP(idx);
+ }
+ gro = !!TPA_END_GRO(tpa_end);
+ }
data = tpa_info->data;
data_ptr = tpa_info->data_ptr;
prefetch(data_ptr);
len = tpa_info->len;
mapping = tpa_info->mapping;
- agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
- RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
-
- if (agg_bufs) {
- if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
- return ERR_PTR(-EBUSY);
-
- *event |= BNXT_AGG_EVENT;
- cp_cons = NEXT_CMP(cp_cons);
- }
-
if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
if (agg_bufs > MAX_SKB_FRAGS)
netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
agg_bufs, (int)MAX_SKB_FRAGS);
@@ -1447,7 +1559,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (len <= bp->rx_copy_thresh) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
return NULL;
}
} else {
@@ -1456,7 +1568,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
return NULL;
}
@@ -1471,7 +1583,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
kfree(data);
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1479,7 +1591,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
+ skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
return NULL;
@@ -1508,12 +1620,24 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
}
- if (TPA_END_GRO(tpa_end))
+ if (gro)
skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
return skb;
}
+static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ struct rx_agg_cmp *rx_agg)
+{
+ u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
+ struct bnxt_tpa_info *tpa_info;
+
+ agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
+ tpa_info = &rxr->rx_tpa[agg_id];
+ BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
+ tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
+}
+
static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
struct sk_buff *skb)
{
@@ -1555,6 +1679,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rxcmp = (struct rx_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ cmp_type = RX_CMP_TYPE(rxcmp);
+
+ if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
+ bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
+ goto next_rx_no_prod_no_len;
+ }
+
tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
cp_cons = RING_CMP(tmp_raw_cons);
rxcmp1 = (struct rx_cmp_ext *)
@@ -1563,8 +1694,6 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
- cmp_type = RX_CMP_TYPE(rxcmp);
-
prod = rxr->rx_prod;
if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
@@ -1623,7 +1752,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
bnxt_reuse_rx_data(rxr, cons, data);
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
+ false);
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
@@ -1646,7 +1776,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
+ agg_bufs, false);
rc = -ENOMEM;
goto next_rx;
}
@@ -1666,7 +1797,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
+ skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
@@ -2325,10 +2456,11 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_tpa_idx_map *map;
int j;
if (rxr->rx_tpa) {
- for (j = 0; j < MAX_TPA; j++) {
+ for (j = 0; j < bp->max_tpa; j++) {
struct bnxt_tpa_info *tpa_info =
&rxr->rx_tpa[j];
u8 *data = tpa_info->data;
@@ -2395,6 +2527,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
__free_page(rxr->rx_page);
rxr->rx_page = NULL;
}
+ map = rxr->rx_tpa_idx_map;
+ if (map)
+ memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
}
}
@@ -2483,6 +2618,61 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
return 0;
}
+static void bnxt_free_tpa_info(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+
+ kfree(rxr->rx_tpa_idx_map);
+ rxr->rx_tpa_idx_map = NULL;
+ if (rxr->rx_tpa) {
+ kfree(rxr->rx_tpa[0].agg_arr);
+ rxr->rx_tpa[0].agg_arr = NULL;
+ }
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+ }
+}
+
+static int bnxt_alloc_tpa_info(struct bnxt *bp)
+{
+ int i, j, total_aggs = 0;
+
+ bp->max_tpa = MAX_TPA;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (!bp->max_tpa_v2)
+ return 0;
+ bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
+ total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
+ }
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct rx_agg_cmp *agg;
+
+ rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ return -ENOMEM;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ continue;
+ agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
+ rxr->rx_tpa[0].agg_arr = agg;
+ if (!agg)
+ return -ENOMEM;
+ for (j = 1; j < bp->max_tpa; j++)
+ rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
+ rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa_idx_map)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
static void bnxt_free_rx_rings(struct bnxt *bp)
{
int i;
@@ -2490,6 +2680,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
if (!bp->rx_ring)
return;
+ bnxt_free_tpa_info(bp);
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
@@ -2503,9 +2694,6 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
page_pool_destroy(rxr->page_pool);
rxr->page_pool = NULL;
- kfree(rxr->rx_tpa);
- rxr->rx_tpa = NULL;
-
kfree(rxr->rx_agg_bmap);
rxr->rx_agg_bmap = NULL;
@@ -2539,7 +2727,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
- int i, rc, agg_rings = 0, tpa_rings = 0;
+ int i, rc = 0, agg_rings = 0;
if (!bp->rx_ring)
return -ENOMEM;
@@ -2547,9 +2735,6 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_AGG_RINGS)
agg_rings = 1;
- if (bp->flags & BNXT_FLAG_TPA)
- tpa_rings = 1;
-
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
@@ -2591,17 +2776,11 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
if (!rxr->rx_agg_bmap)
return -ENOMEM;
-
- if (tpa_rings) {
- rxr->rx_tpa = kcalloc(MAX_TPA,
- sizeof(struct bnxt_tpa_info),
- GFP_KERNEL);
- if (!rxr->rx_tpa)
- return -ENOMEM;
- }
}
}
- return 0;
+ if (bp->flags & BNXT_FLAG_TPA)
+ rc = bnxt_alloc_tpa_info(bp);
+ return rc;
}
static void bnxt_free_tx_rings(struct bnxt *bp)
@@ -2953,7 +3132,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
u8 *data;
dma_addr_t mapping;
- for (i = 0; i < MAX_TPA; i++) {
+ for (i = 0; i < bp->max_tpa; i++) {
data = __bnxt_alloc_rx_data(bp, &mapping,
GFP_KERNEL);
if (!data)
@@ -3468,7 +3647,7 @@ static void bnxt_free_ring_stats(struct bnxt *bp)
if (!bp->bnapi)
return;
- size = sizeof(struct ctx_hw_stats);
+ size = bp->hw_ring_stats_size;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -3487,7 +3666,7 @@ static int bnxt_alloc_stats(struct bnxt *bp)
u32 size, i;
struct pci_dev *pdev = bp->pdev;
- size = sizeof(struct ctx_hw_stats);
+ size = bp->hw_ring_stats_size;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -4414,6 +4593,7 @@ static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
struct hwrm_vnic_tpa_cfg_input req = {0};
if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
@@ -4453,9 +4633,14 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
nsegs = (MAX_SKB_FRAGS - n) / n;
}
- segs = ilog2(nsegs);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ segs = MAX_TPA_SEGS_P5;
+ max_aggs = bp->max_tpa;
+ } else {
+ segs = ilog2(nsegs);
+ }
req.max_agg_segs = cpu_to_le16(segs);
- req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
+ req.max_aggs = cpu_to_le16(max_aggs);
req.min_agg_len = cpu_to_le32(512);
}
@@ -4800,6 +4985,7 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
struct hwrm_vnic_qcaps_input req = {0};
int rc;
+ bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
if (bp->hwrm_spec_code < 0x10600)
return 0;
@@ -4815,6 +5001,10 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (flags &
VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
+ bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
+ if (bp->max_tpa_v2)
+ bp->hw_ring_stats_size =
+ sizeof(struct ctx_hw_stats_ext);
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -6016,6 +6206,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
+ req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
mutex_lock(&bp->hwrm_cmd_lock);
@@ -9306,7 +9497,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (changes & BNXT_FLAG_TPA) {
update_tpa = true;
if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
- (flags & BNXT_FLAG_TPA) == 0)
+ (flags & BNXT_FLAG_TPA) == 0 ||
+ (bp->flags & BNXT_FLAG_CHIP_P5))
re_init = true;
}
@@ -9316,9 +9508,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (flags != bp->flags) {
u32 old_flags = bp->flags;
- bp->flags = flags;
-
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+ bp->flags = flags;
if (update_tpa)
bnxt_set_ring_params(bp);
return rc;
@@ -9326,12 +9517,14 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (re_init) {
bnxt_close_nic(bp, false, false);
+ bp->flags = flags;
if (update_tpa)
bnxt_set_ring_params(bp);
return bnxt_open_nic(bp, false, false);
}
if (update_tpa) {
+ bp->flags = flags;
rc = bnxt_set_tpa(bp,
(flags & BNXT_FLAG_TPA) ?
true : false);
@@ -9728,6 +9921,68 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
+static int bnxt_fw_init_one_p1(struct bnxt *bp)
+{
+ int rc;
+
+ bp->fw_cap = 0;
+ rc = bnxt_hwrm_ver_get(bp);
+ if (rc)
+ return rc;
+
+ if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
+ rc = bnxt_alloc_kong_hwrm_resources(bp);
+ if (rc)
+ bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
+ }
+
+ if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
+ bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
+ rc = bnxt_alloc_hwrm_short_cmd_req(bp);
+ if (rc)
+ return rc;
+ }
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc)
+ return -ENODEV;
+
+ bnxt_hwrm_fw_set_time(bp);
+ return 0;
+}
+
+static int bnxt_fw_init_one_p2(struct bnxt *bp)
+{
+ int rc;
+
+ /* Get the MAX capabilities for this function */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
+ rc);
+ return -ENODEV;
+ }
+
+ rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
+ if (rc)
+ netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
+ rc);
+
+ rc = bnxt_hwrm_func_drv_rgtr(bp);
+ if (rc)
+ return -ENODEV;
+
+ rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+ if (rc)
+ return -ENODEV;
+
+ bnxt_hwrm_func_qcfg(bp);
+ bnxt_hwrm_vnic_qcaps(bp);
+ bnxt_hwrm_port_led_qcaps(bp);
+ bnxt_ethtool_init(bp);
+ bnxt_dcb_init(bp);
+ return 0;
+}
+
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
{
int rc;
@@ -10683,32 +10938,18 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_pci_clean;
mutex_init(&bp->hwrm_cmd_lock);
- rc = bnxt_hwrm_ver_get(bp);
+
+ rc = bnxt_fw_init_one_p1(bp);
if (rc)
goto init_err_pci_clean;
- if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
- rc = bnxt_alloc_kong_hwrm_resources(bp);
- if (rc)
- bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
- }
-
- if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
- bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
- rc = bnxt_alloc_hwrm_short_cmd_req(bp);
- if (rc)
- goto init_err_pci_clean;
- }
-
if (BNXT_CHIP_P5(bp))
bp->flags |= BNXT_FLAG_CHIP_P5;
- rc = bnxt_hwrm_func_reset(bp);
+ rc = bnxt_fw_init_one_p2(bp);
if (rc)
goto init_err_pci_clean;
- bnxt_hwrm_fw_set_time(bp);
-
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
@@ -10746,41 +10987,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->gro_func = bnxt_gro_func_5730x;
if (BNXT_CHIP_P4(bp))
bp->gro_func = bnxt_gro_func_5731x;
+ else if (BNXT_CHIP_P5(bp))
+ bp->gro_func = bnxt_gro_func_5750x;
}
if (!BNXT_CHIP_P4_PLUS(bp))
bp->flags |= BNXT_FLAG_DOUBLE_DB;
- rc = bnxt_hwrm_func_drv_rgtr(bp);
- if (rc)
- goto init_err_pci_clean;
-
- rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
- if (rc)
- goto init_err_pci_clean;
-
bp->ulp_probe = bnxt_ulp_probe;
- rc = bnxt_hwrm_queue_qportcfg(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
- rc);
- rc = -1;
- goto init_err_pci_clean;
- }
- /* Get the MAX capabilities for this function */
- rc = bnxt_hwrm_func_qcaps(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
- rc);
- rc = -1;
- goto init_err_pci_clean;
- }
-
- rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
- if (rc)
- netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
- rc);
-
rc = bnxt_init_mac_addr(bp);
if (rc) {
dev_err(&pdev->dev, "Unable to initialize mac address.\n");
@@ -10794,11 +11008,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
}
- bnxt_hwrm_func_qcfg(bp);
- bnxt_hwrm_vnic_qcaps(bp);
- bnxt_hwrm_port_led_qcaps(bp);
- bnxt_ethtool_init(bp);
- bnxt_dcb_init(bp);
/* MTU range: 60 - FW defined max */
dev->min_mtu = ETH_ZLEN;
@@ -10934,8 +11143,7 @@ shutdown_exit:
#ifdef CONFIG_PM_SLEEP
static int bnxt_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
@@ -10951,8 +11159,7 @@ static int bnxt_suspend(struct device *device)
static int bnxt_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 16694b704d15..1b1610d5b573 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -113,6 +113,7 @@ struct tx_cmp {
#define CMP_TYPE_RX_AGG_CMP 18
#define CMP_TYPE_RX_L2_TPA_START_CMP 19
#define CMP_TYPE_RX_L2_TPA_END_CMP 21
+ #define CMP_TYPE_RX_TPA_AGG_CMP 22
#define CMP_TYPE_STATUS_CMP 32
#define CMP_TYPE_REMOTE_DRIVER_REQ 34
#define CMP_TYPE_REMOTE_DRIVER_RESP 36
@@ -263,14 +264,21 @@ struct rx_agg_cmp {
u32 rx_agg_cmp_opaque;
__le32 rx_agg_cmp_v;
#define RX_AGG_CMP_V (1 << 0)
+ #define RX_AGG_CMP_AGG_ID (0xffff << 16)
+ #define RX_AGG_CMP_AGG_ID_SHIFT 16
__le32 rx_agg_cmp_unused;
};
+#define TPA_AGG_AGG_ID(rx_agg) \
+ ((le32_to_cpu((rx_agg)->rx_agg_cmp_v) & \
+ RX_AGG_CMP_AGG_ID) >> RX_AGG_CMP_AGG_ID_SHIFT)
+
struct rx_tpa_start_cmp {
__le32 rx_tpa_start_cmp_len_flags_type;
#define RX_TPA_START_CMP_TYPE (0x3f << 0)
#define RX_TPA_START_CMP_FLAGS (0x3ff << 6)
#define RX_TPA_START_CMP_FLAGS_SHIFT 6
+ #define RX_TPA_START_CMP_FLAGS_ERROR (0x1 << 6)
#define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7)
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7)
@@ -278,6 +286,7 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
#define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10)
+ #define RX_TPA_START_CMP_FLAGS_TIMESTAMP (0x1 << 11)
#define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12)
#define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12
#define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
@@ -291,6 +300,8 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
#define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_START_CMP_AGG_ID_SHIFT 25
+ #define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_start_cmp_rss_hash;
};
@@ -308,6 +319,14 @@ struct rx_tpa_start_cmp {
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
+#define TPA_START_AGG_ID_P5(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_AGG_ID_P5) >> RX_TPA_START_CMP_AGG_ID_SHIFT_P5)
+
+#define TPA_START_ERROR(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR))
+
struct rx_tpa_start_cmp_ext {
__le32 rx_tpa_start_cmp_flags2;
#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0)
@@ -315,10 +334,20 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
#define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8)
+ #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID (0x1 << 9)
+ #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT (0x3 << 10)
+ #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10
+ #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL (0xffff << 16)
+ #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16
__le32 rx_tpa_start_cmp_metadata;
__le32 rx_tpa_start_cmp_cfa_code_v2;
#define RX_TPA_START_CMP_V2 (0x1 << 0)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK (0x7 << 1)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT 1
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
#define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
#define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
__le32 rx_tpa_start_cmp_hdr_info;
@@ -332,6 +361,11 @@ struct rx_tpa_start_cmp_ext {
(!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \
cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))
+#define TPA_START_ERROR_CODE(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
+ RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \
+ RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT)
+
struct rx_tpa_end_cmp {
__le32 rx_tpa_end_cmp_len_flags_type;
#define RX_TPA_END_CMP_TYPE (0x3f << 0)
@@ -361,6 +395,8 @@ struct rx_tpa_end_cmp {
#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
#define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_END_CMP_AGG_ID_SHIFT 25
+ #define RX_TPA_END_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_end_cmp_tsdelta;
#define RX_TPA_END_GRO_TS (0x1 << 31)
@@ -370,6 +406,18 @@ struct rx_tpa_end_cmp {
((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
+#define TPA_END_AGG_ID_P5(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_AGG_ID_P5) >> RX_TPA_END_CMP_AGG_ID_SHIFT_P5)
+
+#define TPA_END_PAYLOAD_OFF(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT)
+
+#define TPA_END_AGG_BUFS(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT)
+
#define TPA_END_TPA_SEGS(rx_tpa_end) \
((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
@@ -389,6 +437,10 @@ struct rx_tpa_end_cmp {
struct rx_tpa_end_cmp_ext {
__le32 rx_tpa_end_cmp_dup_acks;
#define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0)
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET_P5 (0xff << 16)
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5 16
+ #define RX_TPA_END_CMP_AGG_BUFS_P5 (0xff << 24)
+ #define RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5 24
__le32 rx_tpa_end_cmp_seg_len;
#define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0)
@@ -396,7 +448,13 @@ struct rx_tpa_end_cmp_ext {
__le32 rx_tpa_end_cmp_errors_v2;
#define RX_TPA_END_CMP_V2 (0x1 << 0)
#define RX_TPA_END_CMP_ERRORS (0x3 << 1)
+ #define RX_TPA_END_CMP_ERRORS_P5 (0x7 << 1)
#define RX_TPA_END_CMPL_ERRORS_SHIFT 1
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_RSV_ERROR (0x4 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
u32 rx_tpa_end_cmp_start_opaque;
};
@@ -405,6 +463,15 @@ struct rx_tpa_end_cmp_ext {
((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \
cpu_to_le32(RX_TPA_END_CMP_ERRORS))
+#define TPA_END_PAYLOAD_OFF_P5(rx_tpa_end_ext) \
+ ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_P5) >> \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5)
+
+#define TPA_END_AGG_BUFS_P5(rx_tpa_end_ext) \
+ ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \
+ RX_TPA_END_CMP_AGG_BUFS_P5) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5)
+
struct nqe_cn {
__le16 type;
#define NQ_CN_TYPE_MASK 0x3fUL
@@ -487,6 +554,9 @@ struct nqe_cn {
#define BNXT_DEFAULT_TX_RING_SIZE 511
#define MAX_TPA 64
+#define MAX_TPA_P5 256
+#define MAX_TPA_P5_MASK (MAX_TPA_P5 - 1)
+#define MAX_TPA_SEGS_P5 0x3f
#if (BNXT_PAGE_SHIFT == 16)
#define MAX_RX_PAGES 1
@@ -768,6 +838,15 @@ struct bnxt_tpa_info {
((hdr_info) & 0x1ff)
u16 cfa_code; /* cfa_code in TPA start compl */
+ u8 agg_count;
+ struct rx_agg_cmp *agg_arr;
+};
+
+#define BNXT_AGG_IDX_BMAP_SIZE (MAX_TPA_P5 / BITS_PER_LONG)
+
+struct bnxt_tpa_idx_map {
+ u16 agg_id_tbl[1024];
+ unsigned long agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE];
};
struct bnxt_rx_ring_info {
@@ -797,6 +876,7 @@ struct bnxt_rx_ring_info {
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
struct bnxt_tpa_info *rx_tpa;
+ struct bnxt_tpa_idx_map *rx_tpa_idx_map;
struct bnxt_ring_struct rx_ring_struct;
struct bnxt_ring_struct rx_agg_ring_struct;
@@ -1282,7 +1362,9 @@ struct bnxt {
#define CHIP_NUM_5745X 0xd730
-#define CHIP_NUM_57500 0x1750
+#define CHIP_NUM_57508 0x1750
+#define CHIP_NUM_57504 0x1751
+#define CHIP_NUM_57502 0x1752
#define CHIP_NUM_58802 0xd802
#define CHIP_NUM_58804 0xd804
@@ -1379,12 +1461,14 @@ struct bnxt {
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
- !(bp->flags & BNXT_FLAG_CHIP_P5) && \
- !is_kdump_kernel())
+ (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
+ (bp)->max_tpa_v2) && !is_kdump_kernel())
/* Chip class phase 5 */
#define BNXT_CHIP_P5(bp) \
- ((bp)->chip_num == CHIP_NUM_57500)
+ ((bp)->chip_num == CHIP_NUM_57508 || \
+ (bp)->chip_num == CHIP_NUM_57504 || \
+ (bp)->chip_num == CHIP_NUM_57502)
/* Chip class phase 4.x */
#define BNXT_CHIP_P4(bp) \
@@ -1414,6 +1498,8 @@ struct bnxt {
u16, void *, u8 *, dma_addr_t,
unsigned int);
+ u16 max_tpa_v2;
+ u16 max_tpa;
u32 rx_buf_size;
u32 rx_buf_use_size; /* useable size */
u16 rx_offset;
@@ -1525,6 +1611,7 @@ struct bnxt {
int hw_port_stats_size;
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
+ u16 hw_ring_stats_size;
u8 pri2cos[8];
u8 pri2cos_valid;
@@ -1637,7 +1724,6 @@ struct bnxt {
u8 switch_id[8];
struct bnxt_tc_info *tc_info;
struct dentry *debugfs_pdev;
- struct dentry *debugfs_dim;
struct device *hwmon_dev;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
index 61393f351a77..156c2404854f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -61,45 +61,30 @@ static const struct file_operations debugfs_dim_fops = {
.read = debugfs_dim_read,
};
-static struct dentry *debugfs_dim_ring_init(struct dim *dim, int ring_idx,
- struct dentry *dd)
+static void debugfs_dim_ring_init(struct dim *dim, int ring_idx,
+ struct dentry *dd)
{
static char qname[16];
snprintf(qname, 10, "%d", ring_idx);
- return debugfs_create_file(qname, 0600, dd,
- dim, &debugfs_dim_fops);
+ debugfs_create_file(qname, 0600, dd, dim, &debugfs_dim_fops);
}
void bnxt_debug_dev_init(struct bnxt *bp)
{
const char *pname = pci_name(bp->pdev);
- struct dentry *pdevf;
+ struct dentry *dir;
int i;
bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt);
- if (bp->debugfs_pdev) {
- pdevf = debugfs_create_dir("dim", bp->debugfs_pdev);
- if (!pdevf) {
- pr_err("failed to create debugfs entry %s/dim\n",
- pname);
- return;
- }
- bp->debugfs_dim = pdevf;
- /* create files for each rx ring */
- for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+ dir = debugfs_create_dir("dim", bp->debugfs_pdev);
- if (cpr && bp->bnapi[i]->rx_ring) {
- pdevf = debugfs_dim_ring_init(&cpr->dim, i,
- bp->debugfs_dim);
- if (!pdevf)
- pr_err("failed to create debugfs entry %s/dim/%d\n",
- pname, i);
- }
- }
- } else {
- pr_err("failed to create debugfs entry %s\n", pname);
+ /* create files for each rx ring */
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+
+ if (cpr && bp->bnapi[i]->rx_ring)
+ debugfs_dim_ring_init(&cpr->dim, i, dir);
}
}
@@ -114,8 +99,6 @@ void bnxt_debug_dev_exit(struct bnxt *bp)
void bnxt_debug_init(void)
{
bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL);
- if (!bnxt_debug_mnt)
- pr_err("failed to init bnxt_en debugfs\n");
}
void bnxt_debug_exit(void)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8445a0cce849..b624174c8594 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -137,7 +137,44 @@ reset_coalesce:
return rc;
}
-#define BNXT_NUM_STATS 22
+static const char * const bnxt_ring_stats_str[] = {
+ "rx_ucast_packets",
+ "rx_mcast_packets",
+ "rx_bcast_packets",
+ "rx_discards",
+ "rx_drops",
+ "rx_ucast_bytes",
+ "rx_mcast_bytes",
+ "rx_bcast_bytes",
+ "tx_ucast_packets",
+ "tx_mcast_packets",
+ "tx_bcast_packets",
+ "tx_discards",
+ "tx_drops",
+ "tx_ucast_bytes",
+ "tx_mcast_bytes",
+ "tx_bcast_bytes",
+};
+
+static const char * const bnxt_ring_tpa_stats_str[] = {
+ "tpa_packets",
+ "tpa_bytes",
+ "tpa_events",
+ "tpa_aborts",
+};
+
+static const char * const bnxt_ring_tpa2_stats_str[] = {
+ "rx_tpa_eligible_pkt",
+ "rx_tpa_eligible_bytes",
+ "rx_tpa_pkt",
+ "rx_tpa_bytes",
+ "rx_tpa_errors",
+};
+
+static const char * const bnxt_ring_sw_stats_str[] = {
+ "rx_l4_csum_errors",
+ "missed_irqs",
+};
#define BNXT_RX_STATS_ENTRY(counter) \
{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
@@ -207,6 +244,20 @@ reset_coalesce:
BNXT_TX_STATS_EXT_COS_ENTRY(6), \
BNXT_TX_STATS_EXT_COS_ENTRY(7) \
+#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \
+ BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \
+ BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
+
+#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
+
#define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
{ BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \
__stringify(counter##_pri##n) }
@@ -352,6 +403,7 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
};
static const struct {
@@ -417,9 +469,29 @@ static const struct {
ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
+static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
+{
+ if (BNXT_SUPPORTS_TPA(bp)) {
+ if (bp->max_tpa_v2)
+ return ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
+ return ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ }
+ return 0;
+}
+
+static int bnxt_get_num_ring_stats(struct bnxt *bp)
+{
+ int num_stats;
+
+ num_stats = ARRAY_SIZE(bnxt_ring_stats_str) +
+ ARRAY_SIZE(bnxt_ring_sw_stats_str) +
+ bnxt_get_num_tpa_ring_stats(bp);
+ return num_stats * bp->cp_nr_rings;
+}
+
static int bnxt_get_num_stats(struct bnxt *bp)
{
- int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+ int num_stats = bnxt_get_num_ring_stats(bp);
num_stats += BNXT_NUM_SW_FUNC_STATS;
@@ -460,10 +532,11 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
{
u32 i, j = 0;
struct bnxt *bp = netdev_priv(dev);
- u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
+ u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) +
+ bnxt_get_num_tpa_ring_stats(bp);
if (!bp->bnapi) {
- j += BNXT_NUM_STATS * bp->cp_nr_rings + BNXT_NUM_SW_FUNC_STATS;
+ j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
goto skip_ring_stats;
}
@@ -551,56 +624,39 @@ skip_ring_stats:
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnxt *bp = netdev_priv(dev);
- u32 i;
+ static const char * const *str;
+ u32 i, j, num_str;
switch (stringset) {
- /* The number of strings must match BNXT_NUM_STATS defined above. */
case ETH_SS_STATS:
for (i = 0; i < bp->cp_nr_rings; i++) {
- sprintf(buf, "[%d]: rx_ucast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_mcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_bcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_discards", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_drops", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_ucast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_mcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_bcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_ucast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_mcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_bcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_discards", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_drops", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_ucast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_mcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_bcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_events", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_aborts", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_l4_csum_errors", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: missed_irqs", i);
- buf += ETH_GSTRING_LEN;
+ num_str = ARRAY_SIZE(bnxt_ring_stats_str);
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+ if (!BNXT_SUPPORTS_TPA(bp))
+ goto skip_tpa_stats;
+
+ if (bp->max_tpa_v2) {
+ num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
+ str = bnxt_ring_tpa2_stats_str;
+ } else {
+ num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ str = bnxt_ring_tpa_stats_str;
+ }
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i, str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+skip_tpa_stats:
+ num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str);
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_sw_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
}
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
strcpy(buf, bnxt_sw_func_stats[i].string);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 12bbb2a207d0..2cdef753a1bc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,7 +1,8 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2019 Broadcom Limited
+ * Copyright (c) 2014-2018 Broadcom Limited
+ * Copyright (c) 2018-2019 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -39,15 +40,15 @@ struct hwrm_resp_hdr {
#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
-#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL
-#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL
+#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
#define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL
-#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL
#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
-#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE
+#define TLV_TYPE_ENGINE_CKV_SRT_ECC_PUBLIC_KEY 0x8009UL
+#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_SRT_ECC_PUBLIC_KEY
/* tlv (size:64b/8B) */
@@ -267,7 +268,6 @@ struct cmd_nums {
#define HWRM_CFA_EEM_OP 0x123UL
#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
#define HWRM_CFA_TFLIB 0x125UL
- #define HWRM_ENGINE_CKV_HELLO 0x12dUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
#define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
@@ -313,6 +313,7 @@ struct cmd_nums {
#define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
#define HWRM_FUNC_VF_BW_CFG 0x195UL
#define HWRM_FUNC_VF_BW_QCFG 0x196UL
+ #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -410,8 +411,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_RSVD 69
-#define HWRM_VERSION_STR "1.10.0.69"
+#define HWRM_VERSION_RSVD 89
+#define HWRM_VERSION_STR "1.10.0.89"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -624,6 +625,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1122,6 +1125,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
#define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
#define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
+ #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1241,6 +1245,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
#define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
#define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
+ #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -2916,7 +2921,7 @@ struct tx_port_stats_ext {
__le64 pfc_pri7_tx_transitions;
};
-/* rx_port_stats_ext (size:2624b/328B) */
+/* rx_port_stats_ext (size:3648b/456B) */
struct rx_port_stats_ext {
__le64 link_down_events;
__le64 continuous_pause_events;
@@ -2959,6 +2964,22 @@ struct rx_port_stats_ext {
__le64 rx_buffer_passed_threshold;
__le64 rx_pcs_symbol_err;
__le64 rx_corrected_bits;
+ __le64 rx_discard_bytes_cos0;
+ __le64 rx_discard_bytes_cos1;
+ __le64 rx_discard_bytes_cos2;
+ __le64 rx_discard_bytes_cos3;
+ __le64 rx_discard_bytes_cos4;
+ __le64 rx_discard_bytes_cos5;
+ __le64 rx_discard_bytes_cos6;
+ __le64 rx_discard_bytes_cos7;
+ __le64 rx_discard_packets_cos0;
+ __le64 rx_discard_packets_cos1;
+ __le64 rx_discard_packets_cos2;
+ __le64 rx_discard_packets_cos3;
+ __le64 rx_discard_packets_cos4;
+ __le64 rx_discard_packets_cos5;
+ __le64 rx_discard_packets_cos6;
+ __le64 rx_discard_packets_cos7;
};
/* hwrm_port_qstats_ext_input (size:320b/40B) */
@@ -6115,6 +6136,21 @@ struct hwrm_cfa_flow_alloc_output {
u8 valid;
};
+/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */
+struct hwrm_cfa_flow_alloc_cmd_err {
+ u8 code;
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB
+ u8 unused_0[7];
+};
+
/* hwrm_cfa_flow_free_input (size:256b/32B) */
struct hwrm_cfa_flow_free_input {
__le16 req_type;
@@ -6305,7 +6341,7 @@ struct hwrm_cfa_eem_qcaps_input {
__le32 unused_0;
};
-/* hwrm_cfa_eem_qcaps_output (size:256b/32B) */
+/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
struct hwrm_cfa_eem_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -6322,15 +6358,17 @@ struct hwrm_cfa_eem_qcaps_output {
#define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
#define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
#define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL
__le32 max_entries_supported;
__le16 key_entry_size;
__le16 record_entry_size;
__le16 efc_entry_size;
- u8 unused_1;
+ __le16 fid_entry_size;
+ u8 unused_1[7];
u8 valid;
};
-/* hwrm_cfa_eem_cfg_input (size:320b/40B) */
+/* hwrm_cfa_eem_cfg_input (size:384b/48B) */
struct hwrm_cfa_eem_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -6350,6 +6388,9 @@ struct hwrm_cfa_eem_cfg_input {
__le16 key1_ctx_id;
__le16 record_ctx_id;
__le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ __le16 unused_2;
+ __le32 unused_3;
};
/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
@@ -6375,7 +6416,7 @@ struct hwrm_cfa_eem_qcfg_input {
__le32 unused_0;
};
-/* hwrm_cfa_eem_qcfg_output (size:192b/24B) */
+/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
struct hwrm_cfa_eem_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -6386,7 +6427,12 @@ struct hwrm_cfa_eem_qcfg_output {
#define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
#define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
__le32 num_entries;
- u8 unused_0[7];
+ __le16 key0_ctx_id;
+ __le16 key1_ctx_id;
+ __le16 record_ctx_id;
+ __le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ u8 unused_2[5];
u8 valid;
};
@@ -6567,6 +6613,31 @@ struct ctx_hw_stats {
__le64 tpa_aborts;
};
+/* ctx_hw_stats_ext (size:1344b/168B) */
+struct ctx_hw_stats_ext {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+};
+
/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
struct hwrm_stat_ctx_alloc_input {
__le16 req_type;
@@ -6578,7 +6649,8 @@ struct hwrm_stat_ctx_alloc_input {
__le32 update_period_ms;
u8 stat_ctx_flags;
#define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
- u8 unused_0[3];
+ u8 unused_0;
+ __le16 stats_dma_length;
};
/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
@@ -7204,7 +7276,9 @@ struct coredump_segment_record {
u8 version_hi;
u8 version_low;
u8 seg_flags;
- u8 unused_0[7];
+ u8 compress_flags;
+ #define SFLAG_COMPRESSED_ZLIB 0x1UL
+ u8 unused_0[6];
};
/* hwrm_dbg_coredump_list_input (size:256b/32B) */
@@ -7729,6 +7803,9 @@ struct hwrm_nvm_set_variable_input {
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
u8 unused_0;
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 57dc3cbff36e..155599dcee76 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4096,12 +4096,16 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u32 port_id;
+ int i;
cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
GFP_KERNEL);
if (!cp->csk_tbl)
return -ENOMEM;
+ for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
+ atomic_set(&cp->csk_tbl[i].ref_count, 0);
+
port_id = prandom_u32();
port_id %= CNIC_LOCAL_PORT_RANGE;
if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
@@ -5480,6 +5484,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
cdev->unregister_device = cnic_unregister_device;
cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
+ atomic_set(&cdev->ref_count, 0);
cp = cdev->cnic_priv;
cp->dev = cdev;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index d3a0b614dbfa..1586316eb6f1 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2515,19 +2515,14 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
struct netdev_queue *txq;
- struct sk_buff *skb;
- struct enet_cb *cb;
int i;
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
- for (i = 0; i < priv->num_tx_bds; i++) {
- cb = priv->tx_cbs + i;
- skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb);
- if (skb)
- dev_kfree_skb(skb);
- }
+ for (i = 0; i < priv->num_tx_bds; i++)
+ dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
+ priv->tx_cbs + i));
for (i = 0; i < priv->hw_params->tx_queues; i++) {
txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
@@ -3437,7 +3432,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
struct bcmgenet_priv *priv;
struct net_device *dev;
const void *macaddr;
- struct resource *r;
unsigned int i;
int err = -EIO;
const char *phy_mode_str;
@@ -3477,8 +3471,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
macaddr = pd->mac_address;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, r);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
err = PTR_ERR(priv->base);
goto err;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 4c404d2213f9..77f3511b97de 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -18041,8 +18041,7 @@ static void tg3_remove_one(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int tg3_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct tg3 *tp = netdev_priv(dev);
int err = 0;
@@ -18098,8 +18097,7 @@ unlock:
static int tg3_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct tg3 *tp = netdev_priv(dev);
int err = 0;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7767ae6fa1fd..e338272931d1 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3032,7 +3032,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
head_unmap->nvecs++;
for (i = 0, vect_id = 0; i < vectors - 1; i++) {
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 size = skb_frag_size(frag);
if (unlikely(size == 0)) {
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 99f49d059414..f96a42af1014 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1104,7 +1104,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ len = skb_frag_size(frag);
paddr = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index eab805579f96..7f3b2e3b0868 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1492,11 +1492,11 @@ static void free_netsgbuf(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -1535,11 +1535,11 @@ static void free_netsgbuf_with_resp(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -2424,7 +2424,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
} else {
int i, frags;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct octnic_gather *g;
spin_lock(&lio->glist_lock[q_idx]);
@@ -2462,11 +2462,9 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[i - 1];
g->sg[(i >> 2)].ptr[(i & 3)] =
- dma_map_page(&oct->pci_dev->dev,
- frag->page.p,
- frag->page_offset,
- frag->size,
- DMA_TO_DEVICE);
+ skb_frag_dma_map(&oct->pci_dev->dev,
+ frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
if (dma_mapping_error(&oct->pci_dev->dev,
g->sg[i >> 2].ptr[i & 3])) {
@@ -2478,7 +2476,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[j - 1];
dma_unmap_page(&oct->pci_dev->dev,
g->sg[j >> 2].ptr[j & 3],
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
}
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
@@ -2486,7 +2484,8 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+ add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
+ (i & 3));
i++;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index db0b90555acb..370d76822ee0 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -837,11 +837,11 @@ static void free_netsgbuf(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -881,11 +881,11 @@ static void free_netsgbuf_with_resp(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -1497,7 +1497,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
ndata.reqtype = REQTYPE_NORESP_NET;
} else {
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct octnic_gather *g;
int i, frags;
@@ -1535,11 +1535,9 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[i - 1];
g->sg[(i >> 2)].ptr[(i & 3)] =
- dma_map_page(&oct->pci_dev->dev,
- frag->page.p,
- frag->page_offset,
- frag->size,
- DMA_TO_DEVICE);
+ skb_frag_dma_map(&oct->pci_dev->dev,
+ frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
if (dma_mapping_error(&oct->pci_dev->dev,
g->sg[i >> 2].ptr[i & 3])) {
dma_unmap_single(&oct->pci_dev->dev,
@@ -1550,7 +1548,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[j - 1];
dma_unmap_page(&oct->pci_dev->dev,
g->sg[j >> 2].ptr[j & 3],
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
}
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
@@ -1558,7 +1556,8 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+ add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
+ (i & 3));
i++;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
index 021d99cd1665..614d07be7181 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -260,9 +260,7 @@ static int octeon_mbox_process_cmd(struct octeon_mbox *mbox,
dev_info(&oct->pci_dev->dev,
"got a request for FLR from VF that owns DPI ring %u\n",
mbox->q_no);
- pcie_capability_set_word(
- oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no],
- PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+ pcie_flr(oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no]);
break;
case OCTEON_PF_CHANGED_VF_MACADDR:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 192bc92da881..4ab57d33a87e 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1588,15 +1588,13 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
goto doorbell;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
qentry = nicvf_get_nxt_sqentry(sq, qentry);
size = skb_frag_size(frag);
dma_addr = dma_map_page_attrs(&nic->pdev->dev,
skb_frag_page(frag),
- frag->page_offset, size,
+ skb_frag_off(frag), size,
DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 89db739b7819..6dabbf1502c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2132,7 +2132,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
struct port_info *pi = netdev_priv(qs->netdev);
struct sk_buff *skb = NULL;
struct cpl_rx_pkt *cpl;
- struct skb_frag_struct *rx_frag;
+ skb_frag_t *rx_frag;
int nr_frags;
int offset = 0;
@@ -2182,7 +2182,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
rx_frag += nr_frags;
__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
- rx_frag->page_offset = sd->pg_chunk.offset + offset;
+ skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset);
skb_frag_size_set(rx_frag, len);
skb->len += len;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index d692251ee252..ae6a47dd7dc9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3531,7 +3531,6 @@ int t4_setup_debugfs(struct adapter *adap)
{
int i;
u32 size = 0;
- struct dentry *de;
static struct t4_debugfs_entry t4_debugfs_files[] = {
{ "cim_la", &cim_la_fops, 0400, 0 },
@@ -3642,8 +3641,8 @@ int t4_setup_debugfs(struct adapter *adap)
}
}
- de = debugfs_create_file_size("flash", 0400, adap->debugfs_root, adap,
- &flash_debugfs_fops, adap->params.sf_size);
+ debugfs_create_file_size("flash", 0400, adap->debugfs_root, adap,
+ &flash_debugfs_fops, adap->params.sf_size);
debugfs_create_bool("use_backdoor", 0600,
adap->debugfs_root, &adap->use_bd);
debugfs_create_bool("trace_rss", 0600,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 4311ad9c84b2..71854a19cebe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6269,10 +6269,7 @@ static int __init cxgb4_init_module(void)
{
int ret;
- /* Debugfs support is optional, just warn if this fails */
cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!cxgb4_debugfs_root)
- pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4_driver);
if (ret < 0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c
index eaf1fb74689c..01c65d13fc0e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c
@@ -57,7 +57,7 @@ struct smt_data *t4_init_smt(void)
s->smtab[i].state = SMT_STATE_UNUSED;
memset(&s->smtab[i].src_mac, 0, ETH_ALEN);
spin_lock_init(&s->smtab[i].lock);
- atomic_set(&s->smtab[i].refcnt, 0);
+ s->smtab[i].refcnt = 0;
}
return s;
}
@@ -68,7 +68,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
struct smt_entry *e, *end;
for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
- if (atomic_read(&e->refcnt) == 0) {
+ if (e->refcnt == 0) {
if (!first_free)
first_free = e;
} else {
@@ -97,11 +97,9 @@ found_reuse:
static void t4_smte_free(struct smt_entry *e)
{
- spin_lock_bh(&e->lock);
- if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
+ if (e->refcnt == 0) { /* hasn't been recycled */
e->state = SMT_STATE_UNUSED;
}
- spin_unlock_bh(&e->lock);
}
/**
@@ -111,8 +109,10 @@ static void t4_smte_free(struct smt_entry *e)
*/
void cxgb4_smt_release(struct smt_entry *e)
{
- if (atomic_dec_and_test(&e->refcnt))
+ spin_lock_bh(&e->lock);
+ if ((--e->refcnt) == 0)
t4_smte_free(e);
+ spin_unlock_bh(&e->lock);
}
EXPORT_SYMBOL(cxgb4_smt_release);
@@ -215,14 +215,14 @@ static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
e = find_or_alloc_smte(s, smac);
if (e) {
spin_lock(&e->lock);
- if (!atomic_read(&e->refcnt)) {
- atomic_set(&e->refcnt, 1);
+ if (!e->refcnt) {
+ e->refcnt = 1;
e->state = SMT_STATE_SWITCHING;
e->pfvf = pfvf;
memcpy(e->src_mac, smac, ETH_ALEN);
write_smt_entry(adap, e);
} else {
- atomic_inc(&e->refcnt);
+ ++e->refcnt;
}
spin_unlock(&e->lock);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.h b/drivers/net/ethernet/chelsio/cxgb4/smt.h
index d6c2cc271398..1268d6e93a47 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.h
@@ -59,7 +59,7 @@ struct smt_entry {
u16 idx;
u16 pfvf;
u8 src_mac[ETH_ALEN];
- atomic_t refcnt;
+ int refcnt;
spinlock_t lock; /* protect smt entry add,removal */
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 6d4cf3d0b2f0..f6fc0875d5b0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2478,11 +2478,10 @@ static int setup_debugfs(struct adapter *adapter)
* Debugfs support is best effort.
*/
for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
- (void)debugfs_create_file(debugfs_files[i].name,
- debugfs_files[i].mode,
- adapter->debugfs_root,
- (void *)adapter,
- debugfs_files[i].fops);
+ debugfs_create_file(debugfs_files[i].name,
+ debugfs_files[i].mode,
+ adapter->debugfs_root, (void *)adapter,
+ debugfs_files[i].fops);
return 0;
}
@@ -3257,11 +3256,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
adapter->debugfs_root =
debugfs_create_dir(pci_name(pdev),
cxgb4vf_debugfs_root);
- if (IS_ERR_OR_NULL(adapter->debugfs_root))
- dev_warn(&pdev->dev, "could not create debugfs"
- " directory");
- else
- setup_debugfs(adapter);
+ setup_debugfs(adapter);
}
/*
@@ -3486,13 +3481,11 @@ static int __init cxgb4vf_module_init(void)
return -EINVAL;
}
- /* Debugfs support is optional, just warn if this fails */
+ /* Debugfs support is optional, debugfs will warn if this fails */
cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
- pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4vf_driver);
- if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
+ if (ret < 0)
debugfs_remove(cxgb4vf_debugfs_root);
return ret;
}
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index b3e7fafee3df..c9aebcde403a 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1844,16 +1844,12 @@ cleanup_module(void)
static int __init cs89x0_platform_probe(struct platform_device *pdev)
{
struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- struct net_local *lp;
- struct resource *mem_res;
void __iomem *virt_addr;
int err;
if (!dev)
return -ENOMEM;
- lp = netdev_priv(dev);
-
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq <= 0) {
dev_warn(&dev->dev, "interrupt resource missing\n");
@@ -1861,8 +1857,7 @@ static int __init cs89x0_platform_probe(struct platform_device *pdev)
goto free;
}
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- virt_addr = devm_ioremap_resource(&pdev->dev, mem_res);
+ virt_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(virt_addr)) {
err = PTR_ERR(virt_addr);
goto free;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 9003eb6716cd..e736ce2c58ca 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1182,9 +1182,8 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
buflen = skb_headlen(skb);
} else {
skb_frag = skb_si->frags + frag;
- buffer = page_address(skb_frag_page(skb_frag)) +
- skb_frag->page_offset;
- buflen = skb_frag->size;
+ buffer = skb_frag_address(skb_frag);
+ buflen = skb_frag_size(skb_frag);
}
if (frag == last_frag) {
@@ -2423,10 +2422,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
/* Interrupt */
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(dev, "no IRQ\n");
+ if (irq <= 0)
return irq ? irq : -ENODEV;
- }
port->irq = irq;
/* Clock the port */
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 386bdc1378d1..cce90b5925d9 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1500,8 +1500,6 @@ dm9000_probe(struct platform_device *pdev)
ndev->irq = platform_get_irq(pdev, 0);
if (ndev->irq < 0) {
- dev_err(db->dev, "interrupt resource unavailable: %d\n",
- ndev->irq);
ret = ndev->irq;
goto out;
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index f287b5da5546..cf3e6f2892ff 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -192,7 +192,6 @@ struct be_eq_obj {
} ____cacheline_aligned_in_smp;
struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
- bool enable;
u32 min_eqd; /* in usecs */
u32 max_eqd; /* in usecs */
u32 prev_eqd; /* in usecs */
@@ -589,6 +588,7 @@ struct be_adapter {
struct be_drv_stats drv_stats;
struct be_aic_obj aic_obj[MAX_EVT_QS];
+ bool aic_enabled;
u8 vlan_prio_bmap; /* Available Priority BitMap */
u16 recommended_prio_bits;/* Recommended Priority bits in vlan tag */
struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 492f8769ac12..5bb5abf99588 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -329,8 +329,8 @@ static int be_get_coalesce(struct net_device *netdev,
et->tx_coalesce_usecs_high = aic->max_eqd;
et->tx_coalesce_usecs_low = aic->min_eqd;
- et->use_adaptive_rx_coalesce = aic->enable;
- et->use_adaptive_tx_coalesce = aic->enable;
+ et->use_adaptive_rx_coalesce = adapter->aic_enabled;
+ et->use_adaptive_tx_coalesce = adapter->aic_enabled;
return 0;
}
@@ -346,8 +346,9 @@ static int be_set_coalesce(struct net_device *netdev,
struct be_eq_obj *eqo;
int i;
+ adapter->aic_enabled = et->use_adaptive_rx_coalesce;
+
for_all_evt_queues(adapter, eqo, i) {
- aic->enable = et->use_adaptive_rx_coalesce;
aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 4d8e40ac66d2..39eb7d525043 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1014,7 +1014,7 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
@@ -2147,7 +2147,7 @@ static int be_get_new_eqd(struct be_eq_obj *eqo)
int i;
aic = &adapter->aic_obj[eqo->idx];
- if (!aic->enable) {
+ if (!adapter->aic_enabled) {
if (aic->jiffies)
aic->jiffies = 0;
eqd = aic->et_eqd;
@@ -2204,7 +2204,7 @@ static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
int eqd;
u32 mult_enc;
- if (!aic->enable)
+ if (!adapter->aic_enabled)
return 0;
if (jiffies_to_msecs(now - aic->jiffies) < 1)
@@ -2346,8 +2346,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
memcpy(skb->data, start, hdr_len);
skb_shinfo(skb)->nr_frags = 1;
skb_frag_set_page(skb, 0, page_info->page);
- skb_shinfo(skb)->frags[0].page_offset =
- page_info->page_offset + hdr_len;
+ skb_frag_off_set(&skb_shinfo(skb)->frags[0],
+ page_info->page_offset + hdr_len);
skb_frag_size_set(&skb_shinfo(skb)->frags[0],
curr_frag_len - hdr_len);
skb->data_len = curr_frag_len - hdr_len;
@@ -2372,8 +2372,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
/* Fresh page */
j++;
skb_frag_set_page(skb, j, page_info->page);
- skb_shinfo(skb)->frags[j].page_offset =
- page_info->page_offset;
+ skb_frag_off_set(&skb_shinfo(skb)->frags[j],
+ page_info->page_offset);
skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
skb_shinfo(skb)->nr_frags++;
} else {
@@ -2454,8 +2454,8 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
/* First frag or Fresh page */
j++;
skb_frag_set_page(skb, j, page_info->page);
- skb_shinfo(skb)->frags[j].page_offset =
- page_info->page_offset;
+ skb_frag_off_set(&skb_shinfo(skb)->frags[j],
+ page_info->page_offset);
skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
} else {
put_page(page_info->page);
@@ -2959,6 +2959,8 @@ static int be_evt_queues_create(struct be_adapter *adapter)
max(adapter->cfg_num_rx_irqs,
adapter->cfg_num_tx_irqs));
+ adapter->aic_enabled = true;
+
for_all_evt_queues(adapter, eqo, i) {
int numa_node = dev_to_node(&adapter->pdev->dev);
@@ -2966,7 +2968,6 @@ static int be_evt_queues_create(struct be_adapter *adapter)
eqo->adapter = adapter;
eqo->idx = i;
aic->max_eqd = BE_MAX_EQD;
- aic->enable = true;
eq = &eqo->q;
rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 027225e1ade2..815fb62c4b02 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -576,7 +576,6 @@ static s32 nps_enet_probe(struct platform_device *pdev)
struct nps_enet_priv *priv;
s32 err = 0;
const char *mac_addr;
- struct resource *res_regs;
if (!dev->of_node)
return -ENODEV;
@@ -595,8 +594,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
/* FIXME :: no multicast support yet */
ndev->flags &= ~IFF_MULTICAST;
- res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs_base = devm_ioremap_resource(dev, res_regs);
+ priv->regs_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs_base)) {
err = PTR_ERR(priv->regs_base);
goto out_netdev;
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index a9b105803fb7..73e4f2648e49 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -32,6 +32,7 @@ config FTGMAC100
depends on ARM || NDS32 || COMPILE_TEST
depends on !64BIT || BROKEN
select PHYLIB
+ select MDIO_ASPEED if MACH_ASPEED_G6
---help---
This driver supports the FTGMAC100 Gigabit Ethernet controller
from Faraday. It is used on Faraday A369, Andes AG102 and some
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 030fed65393e..9b7af94a40bb 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -774,7 +775,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ len = skb_frag_size(frag);
/* Map it */
map = skb_frag_dma_map(priv->dev, frag, 0, len,
@@ -1619,8 +1620,13 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
if (!priv->mii_bus)
return -EIO;
- if (priv->is_aspeed) {
- /* This driver supports the old MDIO interface */
+ if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2500-mac")) {
+ /* The AST2600 has a separate MDIO controller */
+
+ /* For the AST2400 and AST2500 this driver only supports the
+ * old MDIO interface
+ */
reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
@@ -1797,7 +1803,8 @@ static int ftgmac100_probe(struct platform_device *pdev)
np = pdev->dev.of_node;
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
- of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
+ of_device_is_compatible(np, "aspeed,ast2500-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2600-mac"))) {
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
priv->is_aspeed = true;
@@ -1817,7 +1824,29 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
if (!priv->ndev)
goto err_ncsi_dev;
- } else {
+ } else if (np && of_get_property(np, "phy-handle", NULL)) {
+ struct phy_device *phy;
+
+ phy = of_phy_get_and_connect(priv->netdev, np,
+ &ftgmac100_adjust_link);
+ if (!phy) {
+ dev_err(&pdev->dev, "Failed to connect to phy\n");
+ goto err_setup_mdio;
+ }
+
+ /* Indicate that we support PAUSE frames (see comment in
+ * Documentation/networking/phy.txt)
+ */
+ phy_support_asym_pause(phy);
+
+ /* Display what we found */
+ phy_attached_info(phy);
+ } else if (np && !of_get_child_by_name(np, "mdio")) {
+ /* Support legacy ASPEED devicetree descriptions that decribe a
+ * MAC with an embedded MDIO controller but have no "mdio"
+ * child node. Automatically scan the MDIO bus for available
+ * PHYs.
+ */
priv->use_ncsi = false;
err = ftgmac100_setup_mdio(netdev);
if (err)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index f38c3fa7d705..b4b82b9c5cd6 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -485,7 +485,7 @@ static struct dpaa_bp *dpaa_bpid2pool(int bpid)
static bool dpaa_bpid2pool_use(int bpid)
{
if (dpaa_bpid2pool(bpid)) {
- atomic_inc(&dpaa_bp_array[bpid]->refs);
+ refcount_inc(&dpaa_bp_array[bpid]->refs);
return true;
}
@@ -496,7 +496,7 @@ static bool dpaa_bpid2pool_use(int bpid)
static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
{
dpaa_bp_array[bpid] = dpaa_bp;
- atomic_set(&dpaa_bp->refs, 1);
+ refcount_set(&dpaa_bp->refs, 1);
}
static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
@@ -584,7 +584,7 @@ static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
if (!bp)
return;
- if (!atomic_dec_and_test(&bp->refs))
+ if (!refcount_dec_and_test(&bp->refs))
return;
if (bp->free_buf_cb)
@@ -1958,7 +1958,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
/* populate the rest of SGT entries */
for (i = 0; i < nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
- frag_len = frag->size;
+ frag_len = skb_frag_size(frag);
WARN_ON(!skb_frag_page(frag));
addr = skb_frag_dma_map(dev, frag, 0,
frag_len, dma_dir);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index af320f83c742..f7e59e8db075 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -32,6 +32,7 @@
#define __DPAA_H
#include <linux/netdevice.h>
+#include <linux/refcount.h>
#include <soc/fsl/qman.h>
#include <soc/fsl/bman.h>
@@ -99,7 +100,7 @@ struct dpaa_bp {
int (*seed_cb)(struct dpaa_bp *);
/* bpool can be emptied before freeing by this cb */
void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
- atomic_t refs;
+ refcount_t refs;
};
struct dpaa_rx_errors {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
index a027f4a9d0cc..a9afe46b837f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -164,70 +164,30 @@ static const struct file_operations dpaa2_dbg_ch_ops = {
void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
{
- if (!dpaa2_dbg_root)
- return;
+ struct dentry *dir;
/* Create a directory for the interface */
- priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
- dpaa2_dbg_root);
- if (!priv->dbg.dir) {
- netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
- return;
- }
+ dir = debugfs_create_dir(priv->net_dev->name, dpaa2_dbg_root);
+ priv->dbg.dir = dir;
/* per-cpu stats file */
- priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
- priv->dbg.dir, priv,
- &dpaa2_dbg_cpu_ops);
- if (!priv->dbg.cpu_stats) {
- netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
- goto err_cpu_stats;
- }
+ debugfs_create_file("cpu_stats", 0444, dir, priv, &dpaa2_dbg_cpu_ops);
/* per-fq stats file */
- priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
- priv->dbg.dir, priv,
- &dpaa2_dbg_fq_ops);
- if (!priv->dbg.fq_stats) {
- netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
- goto err_fq_stats;
- }
+ debugfs_create_file("fq_stats", 0444, dir, priv, &dpaa2_dbg_fq_ops);
/* per-fq stats file */
- priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
- priv->dbg.dir, priv,
- &dpaa2_dbg_ch_ops);
- if (!priv->dbg.fq_stats) {
- netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
- goto err_ch_stats;
- }
-
- return;
-
-err_ch_stats:
- debugfs_remove(priv->dbg.fq_stats);
-err_fq_stats:
- debugfs_remove(priv->dbg.cpu_stats);
-err_cpu_stats:
- debugfs_remove(priv->dbg.dir);
+ debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_ops);
}
void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
{
- debugfs_remove(priv->dbg.fq_stats);
- debugfs_remove(priv->dbg.ch_stats);
- debugfs_remove(priv->dbg.cpu_stats);
- debugfs_remove(priv->dbg.dir);
+ debugfs_remove_recursive(priv->dbg.dir);
}
void dpaa2_eth_dbg_init(void)
{
dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
- if (!dpaa2_dbg_root) {
- pr_err("DPAA2-ETH: debugfs create failed\n");
- return;
- }
-
pr_debug("DPAA2-ETH: debugfs created\n");
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
index 4f63de997a26..15598b28f03b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
@@ -11,9 +11,6 @@ struct dpaa2_eth_priv;
struct dpaa2_debugfs {
struct dentry *dir;
- struct dentry *fq_stats;
- struct dentry *ch_stats;
- struct dentry *cpu_stats;
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 04a59db03f2b..c219587bd334 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -20,6 +20,15 @@ config FSL_ENETC_VF
If compiled as module (M), the module name is fsl-enetc-vf.
+config FSL_ENETC_MDIO
+ tristate "ENETC MDIO driver"
+ depends on PCI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ help
+ This driver supports NXP ENETC Central MDIO controller as a PCIe
+ physical function (PF) device.
+
+ If compiled as module (M), the module name is fsl-enetc-mdio.
+
config FSL_ENETC_PTP_CLOCK
tristate "ENETC PTP clock driver"
depends on PTP_1588_CLOCK_QORIQ && (FSL_ENETC || FSL_ENETC_VF)
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index 7139e414dccf..d200c27c3bf6 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -1,19 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
+
+common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
+
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
-fsl-enetc-$(CONFIG_FSL_ENETC) += enetc.o enetc_cbdr.o enetc_ethtool.o \
- enetc_mdio.o
+fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs)
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
-fsl-enetc-objs := enetc_pf.o $(fsl-enetc-y)
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
+fsl-enetc-vf-y := enetc_vf.o $(common-objs)
-ifeq ($(CONFIG_FSL_ENETC)$(CONFIG_FSL_ENETC_VF), yy)
-fsl-enetc-vf-objs := enetc_vf.o
-else
-fsl-enetc-vf-$(CONFIG_FSL_ENETC_VF) += enetc.o enetc_cbdr.o \
- enetc_ethtool.o
-fsl-enetc-vf-objs := enetc_vf.o $(fsl-enetc-vf-y)
-endif
+obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o
+fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o
obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o
-fsl-enetc-ptp-$(CONFIG_FSL_ENETC_PTP_CLOCK) += enetc_ptp.o
+fsl-enetc-ptp-y := enetc_ptp.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 223709443ea4..b6ff89307409 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -110,7 +110,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
int active_offloads)
{
struct enetc_tx_swbd *tx_swbd;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int len = skb_headlen(skb);
union enetc_tx_bd temp_bd;
union enetc_tx_bd *txbd;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 77b9cd10ba2b..149883c8f0b8 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -6,18 +6,20 @@
#include <linux/iopoll.h>
#include <linux/of.h>
-#include "enetc_pf.h"
+#include "enetc_mdio.h"
-struct enetc_mdio_regs {
- u32 mdio_cfg; /* MDIO configuration and status */
- u32 mdio_ctl; /* MDIO control */
- u32 mdio_data; /* MDIO data */
- u32 mdio_addr; /* MDIO address */
-};
+#define ENETC_MDIO_REG_OFFSET 0x1c00
+#define ENETC_MDIO_CFG 0x0 /* MDIO configuration and status */
+#define ENETC_MDIO_CTL 0x4 /* MDIO control */
+#define ENETC_MDIO_DATA 0x8 /* MDIO data */
+#define ENETC_MDIO_ADDR 0xc /* MDIO address */
-#define bus_to_enetc_regs(bus) (struct enetc_mdio_regs __iomem *)((bus)->priv)
+#define enetc_mdio_rd(hw, off) \
+ enetc_port_rd(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET)
+#define enetc_mdio_wr(hw, off, val) \
+ enetc_port_wr(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET, val)
+#define enetc_mdio_rd_reg(off) enetc_mdio_rd(hw, off)
-#define ENETC_MDIO_REG_OFFSET 0x1c00
#define ENETC_MDC_DIV 258
#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8)
@@ -33,18 +35,18 @@ struct enetc_mdio_regs {
#define MDIO_DATA(x) ((x) & 0xffff)
#define TIMEOUT 1000
-static int enetc_mdio_wait_complete(struct enetc_mdio_regs __iomem *regs)
+static int enetc_mdio_wait_complete(struct enetc_hw *hw)
{
u32 val;
- return readx_poll_timeout(enetc_rd_reg, &regs->mdio_cfg, val,
+ return readx_poll_timeout(enetc_mdio_rd_reg, MDIO_CFG, val,
!(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT);
}
-static int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
- u16 value)
+int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
{
- struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus);
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ struct enetc_hw *hw = mdio_priv->hw;
u32 mdio_ctl, mdio_cfg;
u16 dev_addr;
int ret;
@@ -59,38 +61,39 @@ static int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_wr_reg(&regs->mdio_cfg, mdio_cfg);
+ enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
/* set port and dev addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_wr_reg(&regs->mdio_ctl, mdio_ctl);
+ enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_wr_reg(&regs->mdio_addr, regnum & 0xffff);
+ enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
}
/* write the value */
- enetc_wr_reg(&regs->mdio_data, MDIO_DATA(value));
+ enetc_mdio_wr(hw, MDIO_DATA, MDIO_DATA(value));
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
return 0;
}
-static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
- struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus);
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ struct enetc_hw *hw = mdio_priv->hw;
u32 mdio_ctl, mdio_cfg;
u16 dev_addr, value;
int ret;
@@ -104,41 +107,41 @@ static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_wr_reg(&regs->mdio_cfg, mdio_cfg);
+ enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
/* set port and device addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_wr_reg(&regs->mdio_ctl, mdio_ctl);
+ enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_wr_reg(&regs->mdio_addr, regnum & 0xffff);
+ enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
}
/* initiate the read */
- enetc_wr_reg(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
+ enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
/* return all Fs if nothing was there */
- if (enetc_rd_reg(&regs->mdio_cfg) & MDIO_CFG_RD_ER) {
+ if (enetc_mdio_rd(hw, MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
- value = enetc_rd_reg(&regs->mdio_data) & 0xffff;
+ value = enetc_mdio_rd(hw, MDIO_DATA) & 0xffff;
return value;
}
@@ -146,12 +149,12 @@ static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
int enetc_mdio_probe(struct enetc_pf *pf)
{
struct device *dev = &pf->si->pdev->dev;
- struct enetc_mdio_regs __iomem *regs;
+ struct enetc_mdio_priv *mdio_priv;
struct device_node *np;
struct mii_bus *bus;
- int ret;
+ int err;
- bus = mdiobus_alloc_size(sizeof(regs));
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
if (!bus)
return -ENOMEM;
@@ -159,41 +162,31 @@ int enetc_mdio_probe(struct enetc_pf *pf)
bus->read = enetc_mdio_read;
bus->write = enetc_mdio_write;
bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
- /* store the enetc mdio base address for this bus */
- regs = pf->si->hw.port + ENETC_MDIO_REG_OFFSET;
- bus->priv = regs;
-
np = of_get_child_by_name(dev->of_node, "mdio");
if (!np) {
dev_err(dev, "MDIO node missing\n");
- ret = -EINVAL;
- goto err_registration;
+ return -EINVAL;
}
- ret = of_mdiobus_register(bus, np);
- if (ret) {
+ err = of_mdiobus_register(bus, np);
+ if (err) {
of_node_put(np);
dev_err(dev, "cannot register MDIO bus\n");
- goto err_registration;
+ return err;
}
of_node_put(np);
pf->mdio = bus;
return 0;
-
-err_registration:
- mdiobus_free(bus);
-
- return ret;
}
void enetc_mdio_remove(struct enetc_pf *pf)
{
- if (pf->mdio) {
+ if (pf->mdio)
mdiobus_unregister(pf->mdio);
- mdiobus_free(pf->mdio);
- }
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.h b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
new file mode 100644
index 000000000000..60c9a3889824
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+
+#include <linux/phy.h>
+#include "enetc_pf.h"
+
+struct enetc_mdio_priv {
+ struct enetc_hw *hw;
+};
+
+int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
+int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
new file mode 100644
index 000000000000..fbd41ce01f06
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+#include <linux/of_mdio.h>
+#include "enetc_mdio.h"
+
+#define ENETC_MDIO_DEV_ID 0xee01
+#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO"
+#define ENETC_MDIO_BUS_NAME ENETC_MDIO_DEV_NAME " Bus"
+#define ENETC_MDIO_DRV_NAME ENETC_MDIO_DEV_NAME " driver"
+
+static int enetc_pci_mdio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct enetc_mdio_priv *mdio_priv;
+ struct device *dev = &pdev->dev;
+ struct enetc_hw *hw;
+ struct mii_bus *bus;
+ int err;
+
+ hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = ENETC_MDIO_BUS_NAME;
+ bus->read = enetc_mdio_read;
+ bus->write = enetc_mdio_write;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = hw;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ pcie_flr(pdev);
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(dev, "device enable failed\n");
+ return err;
+ }
+
+ err = pci_request_region(pdev, 0, KBUILD_MODNAME);
+ if (err) {
+ dev_err(dev, "pci_request_region failed\n");
+ goto err_pci_mem_reg;
+ }
+
+ hw->port = pci_iomap(pdev, 0, 0);
+ if (!hw->port) {
+ err = -ENXIO;
+ dev_err(dev, "iomap failed\n");
+ goto err_ioremap;
+ }
+
+ err = of_mdiobus_register(bus, dev->of_node);
+ if (err)
+ goto err_mdiobus_reg;
+
+ pci_set_drvdata(pdev, bus);
+
+ return 0;
+
+err_mdiobus_reg:
+ iounmap(mdio_priv->hw->port);
+err_ioremap:
+ pci_release_mem_regions(pdev);
+err_pci_mem_reg:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void enetc_pci_mdio_remove(struct pci_dev *pdev)
+{
+ struct mii_bus *bus = pci_get_drvdata(pdev);
+ struct enetc_mdio_priv *mdio_priv;
+
+ mdiobus_unregister(bus);
+ mdio_priv = bus->priv;
+ iounmap(mdio_priv->hw->port);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id enetc_pci_mdio_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_MDIO_DEV_ID) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_pci_mdio_id_table);
+
+static struct pci_driver enetc_pci_mdio_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc_pci_mdio_id_table,
+ .probe = enetc_pci_mdio_probe,
+ .remove = enetc_pci_mdio_remove,
+};
+module_pci_driver(enetc_pci_mdio_driver);
+
+MODULE_DESCRIPTION(ENETC_MDIO_DRV_NAME);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 258b3cb38a6f..7d6513ff8507 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -750,6 +750,7 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
{
struct enetc_pf *pf = enetc_si_priv(priv->si);
struct device_node *np = priv->dev->of_node;
+ struct device_node *mdio_np;
int err;
if (!np) {
@@ -773,7 +774,9 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
priv->phy_node = of_node_get(np);
}
- if (!of_phy_is_fixed_link(np)) {
+ mdio_np = of_get_child_by_name(np, "mdio");
+ if (mdio_np) {
+ of_node_put(mdio_np);
err = enetc_mdio_probe(pf);
if (err) {
of_node_put(priv->phy_node);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e5610a4da539..d4d4c72adf49 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -208,8 +208,11 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
/* FEC MII MMFR bits definition */
#define FEC_MMFR_ST (1 << 30)
+#define FEC_MMFR_ST_C45 (0)
#define FEC_MMFR_OP_READ (2 << 28)
+#define FEC_MMFR_OP_READ_C45 (3 << 28)
#define FEC_MMFR_OP_WRITE (1 << 28)
+#define FEC_MMFR_OP_ADDR_WRITE (0)
#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
#define FEC_MMFR_TA (2 << 16)
@@ -365,7 +368,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
- frag_len = skb_shinfo(skb)->frags[frag].size;
+ frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
/* Handle the last BD specially */
if (frag == nr_frags - 1) {
@@ -387,7 +390,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
ebdp->cbd_esc = cpu_to_fec32(estatus);
}
- bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
+ bufaddr = skb_frag_address(this_frag);
index = fec_enet_get_bd_index(bdp, &txq->bd);
if (((unsigned long) bufaddr) & fep->tx_align ||
@@ -1767,7 +1770,8 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
unsigned long time_left;
- int ret = 0;
+ int ret = 0, frame_start, frame_addr, frame_op;
+ bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
@@ -1775,9 +1779,37 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
reinit_completion(&fep->mdio_done);
+ if (is_c45) {
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ frame_addr = (regnum >> 16);
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ time_left = wait_for_completion_timeout(&fep->mdio_done,
+ usecs_to_jiffies(FEC_MII_TIMEOUT));
+ if (time_left == 0) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ frame_op = FEC_MMFR_OP_READ_C45;
+
+ } else {
+ /* C22 read */
+ frame_op = FEC_MMFR_OP_READ;
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
+ }
+
/* start a read op */
- writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ writel(frame_start | frame_op |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
@@ -1804,7 +1836,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
unsigned long time_left;
- int ret;
+ int ret, frame_start, frame_addr;
+ bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
@@ -1814,9 +1847,33 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
reinit_completion(&fep->mdio_done);
+ if (is_c45) {
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ frame_addr = (regnum >> 16);
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ time_left = wait_for_completion_timeout(&fep->mdio_done,
+ usecs_to_jiffies(FEC_MII_TIMEOUT));
+ if (time_left == 0) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ } else {
+ /* C22 write */
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
+ }
+
/* start a write op */
- writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ writel(frame_start | FEC_MMFR_OP_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
FEC_MMFR_TA | FEC_MMFR_DATA(value),
fep->hwp + FEC_MII_DATA);
@@ -1828,6 +1885,7 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
ret = -ETIMEDOUT;
}
+out:
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -3338,7 +3396,6 @@ fec_probe(struct platform_device *pdev)
struct fec_platform_data *pdata;
struct net_device *ndev;
int i, irq, ret = 0;
- struct resource *r;
const struct of_device_id *of_id;
static int dev_id;
struct device_node *np = pdev->dev.of_node, *phy_node;
@@ -3378,8 +3435,7 @@ fec_probe(struct platform_device *pdev)
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fep->hwp = devm_ioremap_resource(&pdev->dev, r);
+ fep->hwp = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fep->hwp)) {
ret = PTR_ERR(fep->hwp);
goto failed_ioremap;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 5fad73b2e123..3981c06f082f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -501,7 +501,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
nr_frags = skb_shinfo(skb)->nr_frags;
frag = skb_shinfo(skb)->frags;
for (i = 0; i < nr_frags; i++, frag++) {
- if (!IS_ALIGNED(frag->page_offset, 4)) {
+ if (!IS_ALIGNED(skb_frag_off(frag), 4)) {
is_aligned = 0;
break;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7ea19e173339..412c0340fed9 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2005,8 +2005,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
- if (rx_queue->skb)
- dev_kfree_skb(rx_queue->skb);
+ dev_kfree_skb(rx_queue->skb);
for (i = 0; i < rx_queue->rx_ring_size; i++) {
struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 689f18e3100f..90ab7ade44c4 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -877,7 +877,6 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
ndev->irq = platform_get_irq(pdev, 0);
if (ndev->irq <= 0) {
- dev_err(dev, "No irq resource\n");
ret = -ENODEV;
goto out_disconnect_phy;
}
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 349970557c52..95a6b0926170 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -719,7 +719,7 @@ static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- int len = frag->size;
+ int len = skb_frag_size(frag);
addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
ret = dma_mapping_error(priv->dev, addr);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 2235dd55fab2..a48396dd4ebb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -245,7 +245,7 @@ static int hns_nic_maybe_stop_tso(
int frag_num;
struct sk_buff *skb = *out_skb;
struct sk_buff *new_skb = NULL;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
size = skb_headlen(skb);
buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
@@ -309,7 +309,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
struct hnae_ring *ring = ring_data->ring;
struct device *dev = ring_to_dev(ring);
struct netdev_queue *dev_queue;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int buf_num;
int seg_num;
dma_addr_t dma;
@@ -1182,6 +1182,8 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
if (unlikely(ret))
return -ENODEV;
+ phy_attached_info(phy_dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 75329ab775a6..f8a87f8ca983 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -47,6 +47,8 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
+ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
+ HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */
};
/* below are per-VF mac-vlan subcodes */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 908d4f45c06a..528f6243cdc6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -46,7 +46,7 @@ void hnae3_set_client_init_flag(struct hnae3_client *client,
EXPORT_SYMBOL(hnae3_set_client_init_flag);
static int hnae3_get_client_init_flag(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev)
+ struct hnae3_ae_dev *ae_dev)
{
int inited = 0;
@@ -104,7 +104,6 @@ int hnae3_register_client(struct hnae3_client *client)
{
struct hnae3_client *client_tmp;
struct hnae3_ae_dev *ae_dev;
- int ret = 0;
if (!client)
return -ENODEV;
@@ -123,7 +122,7 @@ int hnae3_register_client(struct hnae3_client *client)
/* if the client could not be initialized on current port, for
* any error reasons, move on to next available port
*/
- ret = hnae3_init_client_instance(client, ae_dev);
+ int ret = hnae3_init_client_instance(client, ae_dev);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed for port, ret = %d\n",
@@ -164,7 +163,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
const struct pci_device_id *id;
struct hnae3_ae_dev *ae_dev;
struct hnae3_client *client;
- int ret = 0;
+ int ret;
if (!ae_algo)
return;
@@ -258,7 +257,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
- int ret = 0;
+ int ret;
if (!ae_dev)
return -ENODEV;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 48c7b70fc2c4..3e21533a2bf5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -58,10 +58,10 @@
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
#define hnae3_dev_roce_supported(hdev) \
- hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
#define hnae3_dev_dcb_supported(hdev) \
- hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
#define hnae3_dev_fd_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
@@ -85,13 +85,18 @@ struct hnae3_queue {
void __iomem *io_base;
struct hnae3_ae_algo *ae_algo;
struct hnae3_handle *handle;
- int tqp_index; /* index in a handle */
- u32 buf_size; /* size for hnae_desc->addr, preset by AE */
- u16 tx_desc_num;/* total number of tx desc */
- u16 rx_desc_num;/* total number of rx desc */
+ int tqp_index; /* index in a handle */
+ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
+ u16 tx_desc_num; /* total number of tx desc */
+ u16 rx_desc_num; /* total number of rx desc */
};
-/*hnae3 loop mode*/
+struct hns3_mac_stats {
+ u64 tx_pause_cnt;
+ u64 rx_pause_cnt;
+};
+
+/* hnae3 loop mode */
enum hnae3_loop {
HNAE3_LOOP_APP,
HNAE3_LOOP_SERIAL_SERDES,
@@ -179,6 +184,15 @@ struct hnae3_vector_info {
#define HNAE3_RING_GL_RX 0
#define HNAE3_RING_GL_TX 1
+#define HNAE3_FW_VERSION_BYTE3_SHIFT 24
+#define HNAE3_FW_VERSION_BYTE3_MASK GENMASK(31, 24)
+#define HNAE3_FW_VERSION_BYTE2_SHIFT 16
+#define HNAE3_FW_VERSION_BYTE2_MASK GENMASK(23, 16)
+#define HNAE3_FW_VERSION_BYTE1_SHIFT 8
+#define HNAE3_FW_VERSION_BYTE1_MASK GENMASK(15, 8)
+#define HNAE3_FW_VERSION_BYTE0_SHIFT 0
+#define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0)
+
struct hnae3_ring_chain_node {
struct hnae3_ring_chain_node *next;
u32 tqp_index;
@@ -289,6 +303,8 @@ struct hnae3_ae_dev {
* Remove multicast address from mac table
* update_stats()
* Update Old network device statistics
+ * get_mac_stats()
+ * get mac pause statistics including tx_cnt and rx_cnt
* get_ethtool_stats()
* Get ethtool network device statistics
* get_strings()
@@ -417,8 +433,8 @@ struct hnae3_ae_ops {
void (*update_stats)(struct hnae3_handle *handle,
struct net_device_stats *net_stats);
void (*get_stats)(struct hnae3_handle *handle, u64 *data);
- void (*get_mac_pause_stats)(struct hnae3_handle *handle, u64 *tx_cnt,
- u64 *rx_cnt);
+ void (*get_mac_stats)(struct hnae3_handle *handle,
+ struct hns3_mac_stats *mac_stats);
void (*get_strings)(struct hnae3_handle *handle,
u32 stringset, u8 *data);
int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
@@ -605,7 +621,7 @@ struct hnae3_handle {
struct pci_dev *pdev;
void *priv;
struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
- u64 flags; /* Indicate the capabilities for this handle*/
+ u64 flags; /* Indicate the capabilities for this handle */
union {
struct net_device *netdev; /* first member */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index a4b937286f55..7070d25ddb5b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -8,6 +8,7 @@
#include "hns3_enet.h"
#define HNS3_DBG_READ_LEN 256
+#define HNS3_DBG_WRITE_LEN 1024
static struct dentry *hns3_dbgfs_root;
@@ -322,6 +323,9 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return 0;
+ if (count > HNS3_DBG_WRITE_LEN)
+ return -ENOSPC;
+
cmd_buf = kzalloc(count + 1, GFP_KERNEL);
if (!cmd_buf)
return count;
@@ -372,20 +376,11 @@ static const struct file_operations hns3_dbg_cmd_fops = {
void hns3_dbg_init(struct hnae3_handle *handle)
{
const char *name = pci_name(handle->pdev);
- struct dentry *pfile;
handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
- if (!handle->hnae3_dbgfs)
- return;
- pfile = debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
- &hns3_dbg_cmd_fops);
- if (!pfile) {
- debugfs_remove_recursive(handle->hnae3_dbgfs);
- handle->hnae3_dbgfs = NULL;
- dev_warn(&handle->pdev->dev, "create file for %s fail\n",
- name);
- }
+ debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
+ &hns3_dbg_cmd_fops);
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
@@ -397,10 +392,6 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
{
hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
- if (!hns3_dbgfs_root) {
- pr_warn("Register debugfs for %s fail\n", debugfs_dir_name);
- return;
- }
}
void hns3_dbg_unregister_debugfs(void)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 310afa708831..a11d514f7a25 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -28,6 +28,12 @@
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
+#define hns3_rl_err(fmt, ...) \
+ do { \
+ if (net_ratelimit()) \
+ netdev_err(fmt, ##__VA_ARGS__); \
+ } while (0)
+
static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
static void hns3_remove_hw_addr(struct net_device *netdev);
@@ -45,6 +51,9 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+#define HNS3_INNER_VLAN_TAG 1
+#define HNS3_OUTER_VLAN_TAG 2
+
/* hns3_pci_tbl - PCI Device ID Table
*
* Last entry must be all 0s
@@ -220,9 +229,9 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
/* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter)
+ *
+ * Default: enable interrupt coalescing self-adaptive and GL
*/
-
- /* Default: enable interrupt coalescing self-adaptive and GL */
tqp_vector->tx_group.coal.gl_adapt_enable = 1;
tqp_vector->rx_group.coal.gl_adapt_enable = 1;
@@ -459,6 +468,9 @@ static int hns3_nic_net_open(struct net_device *netdev)
h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
hns3_config_xps(priv);
+
+ netif_dbg(h, drv, netdev, "net open\n");
+
return 0;
}
@@ -519,6 +531,8 @@ static int hns3_nic_net_stop(struct net_device *netdev)
if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return 0;
+ netif_dbg(h, drv, netdev, "net stop\n");
+
if (h->ae_algo->ops->set_timer_task)
h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
@@ -956,16 +970,16 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U);
}
-static int hns3_fill_desc_vtags(struct sk_buff *skb,
- struct hns3_enet_ring *tx_ring,
- u32 *inner_vlan_flag,
- u32 *out_vlan_flag,
- u16 *inner_vtag,
- u16 *out_vtag)
+static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
+ struct sk_buff *skb)
{
-#define HNS3_TX_VLAN_PRIO_SHIFT 13
-
struct hnae3_handle *handle = tx_ring->tqp->handle;
+ struct vlan_ethhdr *vhdr;
+ int rc;
+
+ if (!(skb->protocol == htons(ETH_P_8021Q) ||
+ skb_vlan_tag_present(skb)))
+ return 0;
/* Since HW limitation, if port based insert VLAN enabled, only one VLAN
* header is allowed in skb, otherwise it will cause RAS error.
@@ -976,8 +990,7 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
return -EINVAL;
if (skb->protocol == htons(ETH_P_8021Q) &&
- !(tx_ring->tqp->handle->kinfo.netdev->features &
- NETIF_F_HW_VLAN_CTAG_TX)) {
+ !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
/* When HW VLAN acceleration is turned off, and the stack
* sets the protocol to 802.1q, the driver just need to
* set the protocol to the encapsulated ethertype.
@@ -987,45 +1000,107 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
}
if (skb_vlan_tag_present(skb)) {
- u16 vlan_tag;
-
- vlan_tag = skb_vlan_tag_get(skb);
- vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
-
/* Based on hw strategy, use out_vtag in two layer tag case,
* and use inner_vtag in one tag case.
*/
- if (skb->protocol == htons(ETH_P_8021Q)) {
- if (handle->port_base_vlan_state ==
- HNAE3_PORT_BASE_VLAN_DISABLE){
- hns3_set_field(*out_vlan_flag,
- HNS3_TXD_OVLAN_B, 1);
- *out_vtag = vlan_tag;
- } else {
- hns3_set_field(*inner_vlan_flag,
- HNS3_TXD_VLAN_B, 1);
- *inner_vtag = vlan_tag;
- }
- } else {
- hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
- *inner_vtag = vlan_tag;
- }
- } else if (skb->protocol == htons(ETH_P_8021Q)) {
- struct vlan_ethhdr *vhdr;
- int rc;
+ if (skb->protocol == htons(ETH_P_8021Q) &&
+ handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ rc = HNS3_OUTER_VLAN_TAG;
+ else
+ rc = HNS3_INNER_VLAN_TAG;
- rc = skb_cow_head(skb, 0);
- if (unlikely(rc < 0))
- return rc;
- vhdr = (struct vlan_ethhdr *)skb->data;
- vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
- << HNS3_TX_VLAN_PRIO_SHIFT);
+ skb->protocol = vlan_get_protocol(skb);
+ return rc;
}
+ rc = skb_cow_head(skb, 0);
+ if (unlikely(rc < 0))
+ return rc;
+
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
+ & VLAN_PRIO_MASK);
+
skb->protocol = vlan_get_protocol(skb);
return 0;
}
+static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, struct hns3_desc *desc)
+{
+ u32 ol_type_vlan_len_msec = 0;
+ u32 type_cs_vlan_tso = 0;
+ u32 paylen = skb->len;
+ u16 inner_vtag = 0;
+ u16 out_vtag = 0;
+ u16 mss = 0;
+ int ret;
+
+ ret = hns3_handle_vtags(ring, skb);
+ if (unlikely(ret < 0)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_vlan_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ } else if (ret == HNS3_INNER_VLAN_TAG) {
+ inner_vtag = skb_vlan_tag_get(skb);
+ inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ VLAN_PRIO_MASK;
+ hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
+ } else if (ret == HNS3_OUTER_VLAN_TAG) {
+ out_vtag = skb_vlan_tag_get(skb);
+ out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ VLAN_PRIO_MASK;
+ hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
+ 1);
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 ol4_proto, il4_proto;
+
+ skb_reset_mac_len(skb);
+
+ ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_l4_proto_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
+ &type_cs_vlan_tso,
+ &ol_type_vlan_len_msec);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_l2l3l4_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ ret = hns3_set_tso(skb, &paylen, &mss,
+ &type_cs_vlan_tso);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_tso_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+ }
+
+ /* Set txbd */
+ desc->tx.ol_type_vlan_len_msec =
+ cpu_to_le32(ol_type_vlan_len_msec);
+ desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
+ desc->tx.paylen = cpu_to_le32(paylen);
+ desc->tx.mss = cpu_to_le16(mss);
+ desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
+ desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
+
+ return 0;
+}
+
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
unsigned int size, int frag_end,
enum hns_desc_type type)
@@ -1033,65 +1108,29 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
struct device *dev = ring_to_dev(ring);
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int frag_buf_num;
int k, sizeoflast;
dma_addr_t dma;
if (type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv;
- u32 ol_type_vlan_len_msec = 0;
- u32 type_cs_vlan_tso = 0;
- u32 paylen = skb->len;
- u16 inner_vtag = 0;
- u16 out_vtag = 0;
- u16 mss = 0;
int ret;
- ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
- &ol_type_vlan_len_msec,
- &inner_vtag, &out_vtag);
+ ret = hns3_fill_skb_desc(ring, skb, desc);
if (unlikely(ret))
return ret;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 ol4_proto, il4_proto;
-
- skb_reset_mac_len(skb);
-
- ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
- if (unlikely(ret))
- return ret;
-
- ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
- &type_cs_vlan_tso,
- &ol_type_vlan_len_msec);
- if (unlikely(ret))
- return ret;
-
- ret = hns3_set_tso(skb, &paylen, &mss,
- &type_cs_vlan_tso);
- if (unlikely(ret))
- return ret;
- }
-
- /* Set txbd */
- desc->tx.ol_type_vlan_len_msec =
- cpu_to_le32(ol_type_vlan_len_msec);
- desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
- desc->tx.paylen = cpu_to_le32(paylen);
- desc->tx.mss = cpu_to_le16(mss);
- desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
- desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
-
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
} else {
- frag = (struct skb_frag_struct *)priv;
+ frag = (skb_frag_t *)priv;
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
}
if (unlikely(dma_mapping_error(dev, dma))) {
+ u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
return -ENOMEM;
}
@@ -1147,28 +1186,20 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return 0;
}
-static int hns3_nic_bd_num(struct sk_buff *skb)
+static unsigned int hns3_nic_bd_num(struct sk_buff *skb)
{
- int size = skb_headlen(skb);
- int i, bd_num;
+ unsigned int bd_num;
+ int i;
/* if the total len is within the max bd limit */
if (likely(skb->len <= HNS3_MAX_BD_SIZE))
return skb_shinfo(skb)->nr_frags + 1;
- bd_num = hns3_tx_bd_count(size);
+ bd_num = hns3_tx_bd_count(skb_headlen(skb));
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- int frag_bd_num;
-
- size = skb_frag_size(frag);
- frag_bd_num = hns3_tx_bd_count(size);
-
- if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
- return -ENOMEM;
-
- bd_num += frag_bd_num;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ bd_num += hns3_tx_bd_count(skb_frag_size(frag));
}
return bd_num;
@@ -1189,7 +1220,7 @@ static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
*/
static bool hns3_skb_need_linearized(struct sk_buff *skb)
{
- int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
+ int bd_limit = HNS3_MAX_BD_NUM_NORMAL - 1;
unsigned int tot_len = 0;
int i;
@@ -1219,21 +1250,16 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct sk_buff **out_skb)
{
struct sk_buff *skb = *out_skb;
- int bd_num;
+ unsigned int bd_num;
bd_num = hns3_nic_bd_num(skb);
- if (bd_num < 0)
- return bd_num;
-
- if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
+ if (unlikely(bd_num > HNS3_MAX_BD_NUM_NORMAL)) {
struct sk_buff *new_skb;
- if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
+ if (skb_is_gso(skb) && bd_num <= HNS3_MAX_BD_NUM_TSO &&
+ !hns3_skb_need_linearized(skb))
goto out;
- bd_num = hns3_tx_bd_count(skb->len);
- if (unlikely(ring_space(ring) < bd_num))
- return -EBUSY;
/* manual split the send packet */
new_skb = skb_copy(skb, GFP_ATOMIC);
if (!new_skb)
@@ -1241,6 +1267,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
dev_kfree_skb_any(skb);
*out_skb = new_skb;
+ bd_num = hns3_nic_bd_num(new_skb);
+ if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_TSO) ||
+ (!skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_NORMAL))
+ return -ENOMEM;
+
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_copy++;
u64_stats_update_end(&ring->syncp);
@@ -1290,7 +1321,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
&tx_ring_data(priv, skb->queue_mapping);
struct hns3_enet_ring *ring = ring_data->ring;
struct netdev_queue *dev_queue;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int next_to_use_head;
int buf_num;
int seg_num;
@@ -1314,9 +1345,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
u64_stats_update_end(&ring->syncp);
}
- if (net_ratelimit())
- netdev_err(netdev, "xmit error: %d!\n", buf_num);
-
+ hns3_rl_err(netdev, "xmit error: %d!\n", buf_num);
goto out_err_tx_ok;
}
@@ -1482,7 +1511,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
tx_bytes += ring->stats.tx_bytes;
tx_pkts += ring->stats.tx_pkts;
tx_drop += ring->stats.sw_err_cnt;
+ tx_drop += ring->stats.tx_vlan_err;
+ tx_drop += ring->stats.tx_l4_proto_err;
+ tx_drop += ring->stats.tx_l2l3l4_err;
+ tx_drop += ring->stats.tx_tso_err;
tx_errors += ring->stats.sw_err_cnt;
+ tx_errors += ring->stats.tx_vlan_err;
+ tx_errors += ring->stats.tx_l4_proto_err;
+ tx_errors += ring->stats.tx_l2l3l4_err;
+ tx_errors += ring->stats.tx_tso_err;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
@@ -1550,6 +1587,8 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
h = hns3_get_handle(netdev);
kinfo = &h->kinfo;
+ netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
+
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
}
@@ -1593,6 +1632,10 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
struct hnae3_handle *h = hns3_get_handle(netdev);
int ret = -EIO;
+ netif_dbg(h, drv, netdev,
+ "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=%u\n",
+ vf, vlan, qos, vlan_proto);
+
if (h->ae_algo->ops->set_vf_vlan_filter)
ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
qos, vlan_proto);
@@ -1611,6 +1654,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP;
+ netif_dbg(h, drv, netdev,
+ "change mtu from %u to %d\n", netdev->mtu, new_mtu);
+
ret = h->ae_algo->ops->set_mtu(h, new_mtu);
if (ret)
netdev_err(netdev, "failed to change MTU in hardware %d\n",
@@ -1680,15 +1726,12 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
/* When mac received many pause frames continuous, it's unable to send
* packets, which may cause tx timeout
*/
- if (h->ae_algo->ops->update_stats &&
- h->ae_algo->ops->get_mac_pause_stats) {
- u64 tx_pause_cnt, rx_pause_cnt;
+ if (h->ae_algo->ops->get_mac_stats) {
+ struct hns3_mac_stats mac_stats;
- h->ae_algo->ops->update_stats(h, &ndev->stats);
- h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
- &rx_pause_cnt);
+ h->ae_algo->ops->get_mac_stats(h, &mac_stats);
netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
- tx_pause_cnt, rx_pause_cnt);
+ mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
}
hw_head = readl_relaxed(tx_ring->tqp->io_base +
@@ -1963,7 +2006,7 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
ops = ae_dev->ops;
/* request the reset */
- if (ops->reset_event) {
+ if (ops->reset_event && ops->get_reset_level) {
if (ae_dev->hw_err_reset_req) {
reset_type = ops->get_reset_level(ae_dev,
&ae_dev->hw_err_reset_req);
@@ -2067,7 +2110,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb)
{
- unsigned int order = hnae3_page_order(ring);
+ unsigned int order = hns3_page_order(ring);
struct page *p;
p = dev_alloc_pages(order);
@@ -2078,7 +2121,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->page_offset = 0;
cb->reuse_flag = 0;
cb->buf = page_address(p);
- cb->length = hnae3_page_size(ring);
+ cb->length = hns3_page_size(ring);
cb->type = DESC_TYPE_PAGE;
return 0;
@@ -2357,8 +2400,9 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
- netdev_err(ring->tqp->handle->kinfo.netdev,
- "hnae reserve buffer map failed.\n");
+ hns3_rl_err(ring->tqp_vector->napi.dev,
+ "alloc rx buffer failed: %d\n",
+ ret);
break;
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
@@ -2381,7 +2425,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
{
struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
int size = le16_to_cpu(desc->rx.size);
- u32 truesize = hnae3_buf_size(ring);
+ u32 truesize = hns3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
@@ -2396,7 +2440,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
/* Move offset up to the next cache line */
desc_cb->page_offset += truesize;
- if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
+ if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
desc_cb->reuse_flag = 1;
/* Bump ref count on page before it is given */
get_page(desc_cb->priv);
@@ -2443,9 +2487,9 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
&iph->daddr, 0);
} else {
- netdev_err(skb->dev,
- "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
- be16_to_cpu(type), depth);
+ hns3_rl_err(skb->dev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
+ be16_to_cpu(type), depth);
return -EFAULT;
}
@@ -2587,7 +2631,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
skb = ring->skb;
if (unlikely(!skb)) {
- netdev_err(netdev, "alloc rx skb fail\n");
+ hns3_rl_err(netdev, "alloc rx skb fail\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
@@ -2662,8 +2706,8 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
HNS3_RX_HEAD_SIZE);
if (unlikely(!new_skb)) {
- netdev_err(ring->tqp->handle->kinfo.netdev,
- "alloc rx skb frag fail\n");
+ hns3_rl_err(ring->tqp_vector->napi.dev,
+ "alloc rx fraglist skb fail\n");
return -ENXIO;
}
ring->frag_num = 0;
@@ -2678,7 +2722,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
}
if (ring->tail_skb) {
- head_skb->truesize += hnae3_buf_size(ring);
+ head_skb->truesize += hns3_buf_size(ring);
head_skb->data_len += le16_to_cpu(desc->rx.size);
head_skb->len += le16_to_cpu(desc->rx.size);
skb = ring->tail_skb;
@@ -2895,24 +2939,22 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
- int recv_pkts, recv_bds, clean_count, err;
int unused_count = hns3_desc_unused(ring);
struct sk_buff *skb = ring->skb;
- int num;
+ int recv_pkts = 0;
+ int recv_bds = 0;
+ int err, num;
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
rmb(); /* Make sure num taken effect before the other data is touched */
- recv_pkts = 0, recv_bds = 0, clean_count = 0;
num -= unused_count;
unused_count -= ring->pending_buf;
while (recv_pkts < budget && recv_bds < num) {
/* Reuse or realloc buffers */
- if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
- hns3_nic_alloc_rx_buffers(ring,
- clean_count + unused_count);
- clean_count = 0;
+ if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
unused_count = hns3_desc_unused(ring) -
ring->pending_buf;
}
@@ -2926,7 +2968,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
goto out;
} else if (unlikely(err)) { /* Do jump the err */
recv_bds += ring->pending_buf;
- clean_count += ring->pending_buf;
+ unused_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
continue;
@@ -2934,7 +2976,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
rx_fn(ring, skb);
recv_bds += ring->pending_buf;
- clean_count += ring->pending_buf;
+ unused_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
@@ -2943,8 +2985,8 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
out:
/* Make all data has been write before submit */
- if (clean_count + unused_count > 0)
- hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count);
+ if (unused_count > 0)
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
return recv_pkts;
}
@@ -3574,7 +3616,7 @@ out:
return ret;
}
-static void hns3_fini_ring(struct hns3_enet_ring *ring)
+void hns3_fini_ring(struct hns3_enet_ring *ring)
{
hns3_free_desc(ring);
devm_kfree(ring_to_dev(ring), ring->desc_cb);
@@ -4165,8 +4207,8 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
static void hns3_store_coal(struct hns3_nic_priv *priv)
{
/* ethtool only support setting and querying one coal
- * configuation for now, so save the vector 0' coal
- * configuation here in order to restore it.
+ * configuration for now, so save the vector 0' coal
+ * configuration here in order to restore it.
*/
memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
sizeof(struct hns3_enet_coalesce));
@@ -4378,6 +4420,9 @@ int hns3_set_channels(struct net_device *netdev,
u16 org_tqp_num;
int ret;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
if (ch->rx_count || ch->tx_count)
return -EINVAL;
@@ -4392,6 +4437,10 @@ int hns3_set_channels(struct net_device *netdev,
if (kinfo->rss_size == new_tqp_num)
return 0;
+ netif_dbg(h, drv, netdev,
+ "set channels: tqp_num=%u, rxfh=%d\n",
+ new_tqp_num, rxfh_configured);
+
ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 848b866761df..e37e64e70d8d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -75,7 +75,7 @@ enum hns3_nic_state {
#define HNS3_TX_TIMEOUT (5 * HZ)
#define HNS3_RING_NAME_LEN 16
#define HNS3_BUFFER_SIZE_2048 2048
-#define HNS3_RING_MAX_PENDING 32768
+#define HNS3_RING_MAX_PENDING 32760
#define HNS3_RING_MIN_PENDING 24
#define HNS3_RING_BD_MULTIPLE 8
/* max frame size of mac */
@@ -195,7 +195,8 @@ enum hns3_nic_state {
#define HNS3_VECTOR_INITED 1
#define HNS3_MAX_BD_SIZE 65535
-#define HNS3_MAX_BD_PER_FRAG 8
+#define HNS3_MAX_BD_NUM_NORMAL 8
+#define HNS3_MAX_BD_NUM_TSO 63
#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
#define HNS3_VECTOR_GL0_OFFSET 0x100
@@ -301,7 +302,7 @@ struct hns3_desc_cb {
dma_addr_t dma; /* dma address of this desc */
void *buf; /* cpu addr for a desc */
- /* priv data for the desc, e.g. skb when use with ip stack*/
+ /* priv data for the desc, e.g. skb when use with ip stack */
void *priv;
u32 page_offset;
u32 length; /* length of the buffer */
@@ -324,11 +325,11 @@ enum hns3_pkt_l3type {
HNS3_L3_TYPE_MAC_PAUSE,
HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/
- /* reserved for 0xA~0xB*/
+ /* reserved for 0xA~0xB */
HNS3_L3_TYPE_CNM = 0xc,
- /* reserved for 0xD~0xE*/
+ /* reserved for 0xD~0xE */
HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */
};
@@ -353,7 +354,7 @@ enum hns3_pkt_ol3type {
HNS3_OL3_TYPE_IPV4_OPT = 4,
HNS3_OL3_TYPE_IPV6_EXT,
- /* reserved for 0x6~0xE*/
+ /* reserved for 0x6~0xE */
HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */
};
@@ -377,6 +378,10 @@ struct ring_stats {
u64 restart_queue;
u64 tx_busy;
u64 tx_copy;
+ u64 tx_vlan_err;
+ u64 tx_l4_proto_err;
+ u64 tx_l2l3l4_err;
+ u64 tx_tso_err;
};
struct {
u64 rx_pkts;
@@ -608,9 +613,18 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
-#define hnae3_buf_size(_ring) ((_ring)->buf_size)
-#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
-#define hnae3_page_size(_ring) (PAGE_SIZE << (u32)hnae3_page_order(_ring))
+#define hns3_buf_size(_ring) ((_ring)->buf_size)
+
+static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->buf_size > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
@@ -633,6 +647,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
+void hns3_fini_ring(struct hns3_enet_ring *ring);
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
bool hns3_is_phys_func(struct pci_dev *pdev);
int hns3_clean_rx_ring(
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 5bff98a9b0dc..0332d6fb4c0d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -30,6 +30,10 @@ static const struct hns3_stats hns3_txq_stats[] = {
HNS3_TQP_STAT("wake", restart_queue),
HNS3_TQP_STAT("busy", tx_busy),
HNS3_TQP_STAT("copy", tx_copy),
+ HNS3_TQP_STAT("vlan_err", tx_vlan_err),
+ HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
+ HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
+ HNS3_TQP_STAT("tso_err", tx_tso_err),
};
#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
@@ -311,6 +315,8 @@ static void hns3_self_test(struct net_device *ndev,
if (eth_test->flags != ETH_TEST_FL_OFFLINE)
return;
+ netif_dbg(h, drv, ndev, "self test start");
+
st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
st_param[HNAE3_LOOP_APP][1] =
h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
@@ -374,6 +380,8 @@ static void hns3_self_test(struct net_device *ndev,
if (if_running)
ndev->netdev_ops->ndo_open(ndev);
+
+ netif_dbg(h, drv, ndev, "self test end\n");
}
static int hns3_get_sset_count(struct net_device *netdev, int stringset)
@@ -527,6 +535,7 @@ static void hns3_get_drvinfo(struct net_device *netdev,
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
+ u32 fw_version;
if (!h->ae_algo->ops->get_fw_version) {
netdev_err(netdev, "could not get fw version!\n");
@@ -545,8 +554,18 @@ static void hns3_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->bus_info));
drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
- priv->ae_handle->ae_algo->ops->get_fw_version(h));
+ fw_version = priv->ae_handle->ae_algo->ops->get_fw_version(h);
+
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%lu.%lu.%lu.%lu",
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
}
static u32 hns3_get_link(struct net_device *netdev)
@@ -593,6 +612,10 @@ static int hns3_set_pauseparam(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ netif_dbg(h, drv, netdev,
+ "set pauseparam: autoneg=%u, rx:%u, tx:%u\n",
+ param->autoneg, param->rx_pause, param->tx_pause);
+
if (h->ae_algo->ops->set_pauseparam)
return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
param->rx_pause,
@@ -612,7 +635,7 @@ static void hns3_get_ksettings(struct hnae3_handle *h,
&cmd->base.speed,
&cmd->base.duplex);
- /* 2.get link mode*/
+ /* 2.get link mode */
if (ops->get_link_mode)
ops->get_link_mode(h,
cmd->link_modes.supported,
@@ -681,7 +704,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
return 0;
}
-static int hns3_check_ksettings_param(struct net_device *netdev,
+static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
@@ -726,12 +749,17 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
- int ret = 0;
+ int ret;
/* Chip don't support this mode. */
if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL;
+ netif_dbg(handle, drv, netdev,
+ "set link(%s): autoneg=%u, speed=%u, duplex=%u\n",
+ netdev->phydev ? "phy" : "mac",
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
+
/* Only support ksettings_set for netdev with phy attached for now */
if (netdev->phydev)
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
@@ -843,8 +871,8 @@ static int hns3_get_rxnfc(struct net_device *netdev,
}
}
-static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
- u32 tx_desc_num, u32 rx_desc_num)
+static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
+ u32 tx_desc_num, u32 rx_desc_num)
{
struct hnae3_handle *h = priv->ae_handle;
int i;
@@ -857,21 +885,29 @@ static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num =
rx_desc_num;
}
-
- return hns3_init_all_ring(priv);
}
-static int hns3_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *param)
+static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
{
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
- bool if_running = netif_running(ndev);
- u32 old_tx_desc_num, new_tx_desc_num;
- u32 old_rx_desc_num, new_rx_desc_num;
- int queue_num = h->kinfo.num_tqps;
- int ret;
+ struct hnae3_handle *handle = priv->ae_handle;
+ struct hns3_enet_ring *tmp_rings;
+ int i;
+
+ tmp_rings = kcalloc(handle->kinfo.num_tqps * 2,
+ sizeof(struct hns3_enet_ring), GFP_KERNEL);
+ if (!tmp_rings)
+ return NULL;
+
+ for (i = 0; i < handle->kinfo.num_tqps * 2; i++)
+ memcpy(&tmp_rings[i], priv->ring_data[i].ring,
+ sizeof(struct hns3_enet_ring));
+
+ return tmp_rings;
+}
+static int hns3_check_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *param)
+{
if (hns3_nic_resetting(ndev))
return -EBUSY;
@@ -887,6 +923,25 @@ static int hns3_set_ringparam(struct net_device *ndev,
return -EINVAL;
}
+ return 0;
+}
+
+static int hns3_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *param)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hns3_enet_ring *tmp_rings;
+ bool if_running = netif_running(ndev);
+ u32 old_tx_desc_num, new_tx_desc_num;
+ u32 old_rx_desc_num, new_rx_desc_num;
+ u16 queue_num = h->kinfo.num_tqps;
+ int ret, i;
+
+ ret = hns3_check_ringparam(ndev, param);
+ if (ret)
+ return ret;
+
/* Hardware requires that its descriptors must be multiple of eight */
new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
@@ -896,6 +951,13 @@ static int hns3_set_ringparam(struct net_device *ndev,
old_rx_desc_num == new_rx_desc_num)
return 0;
+ tmp_rings = hns3_backup_ringparam(priv);
+ if (!tmp_rings) {
+ netdev_err(ndev,
+ "backup ring param failed by allocating memory fail\n");
+ return -ENOMEM;
+ }
+
netdev_info(ndev,
"Changing Tx/Rx ring depth from %d/%d to %d/%d\n",
old_tx_desc_num, old_rx_desc_num,
@@ -904,22 +966,24 @@ static int hns3_set_ringparam(struct net_device *ndev,
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
- ret = hns3_uninit_all_ring(priv);
- if (ret)
- return ret;
-
- ret = hns3_change_all_ring_bd_num(priv, new_tx_desc_num,
- new_rx_desc_num);
+ hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
+ ret = hns3_init_all_ring(priv);
if (ret) {
- ret = hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
- old_rx_desc_num);
- if (ret) {
- netdev_err(ndev,
- "Revert to old bd num fail, ret=%d.\n", ret);
- return ret;
- }
+ netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n",
+ ret);
+
+ hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
+ old_rx_desc_num);
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
+ memcpy(priv->ring_data[i].ring, &tmp_rings[i],
+ sizeof(struct hns3_enet_ring));
+ } else {
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
+ hns3_fini_ring(&tmp_rings[i]);
}
+ kfree(tmp_rings);
+
if (if_running)
ret = ndev->netdev_ops->ndo_open(ndev);
@@ -973,6 +1037,9 @@ static int hns3_nway_reset(struct net_device *netdev)
return -EINVAL;
}
+ netif_dbg(handle, drv, netdev,
+ "nway reset (using %s)\n", phy ? "phy" : "mac");
+
if (phy)
return genphy_restart_aneg(phy);
@@ -1297,6 +1364,9 @@ static int hns3_set_fecparam(struct net_device *netdev,
if (!ops->set_fec)
return -EOPNOTSUPP;
fec_mode = eth_to_loc_fec(fec->fec);
+
+ netif_dbg(handle, drv, netdev, "set fecparam: mode=%u\n", fec_mode);
+
return ops->set_fec(handle, fec_mode);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 22f6acd45d9a..ecf58cfd253d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -103,14 +103,17 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
dma_addr_t dma = ring->desc_dma_addr;
struct hclge_dev *hdev = ring->dev;
struct hclge_hw *hw = &hdev->hw;
+ u32 reg_val;
if (ring->ring_type == HCLGE_TYPE_CSQ) {
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
lower_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
upper_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
- ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
+ reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ reg_val &= HCLGE_NIC_SW_RST_RDY;
+ reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
} else {
@@ -383,6 +386,23 @@ err_csq:
return ret;
}
+static int hclge_firmware_compat_config(struct hclge_dev *hdev)
+{
+ struct hclge_firmware_compat_cmd *req;
+ struct hclge_desc desc;
+ u32 compat = 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_COMPAT_CFG, false);
+
+ req = (struct hclge_firmware_compat_cmd *)desc.data;
+
+ hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
+ req->compat = cpu_to_le32(compat);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
int hclge_cmd_init(struct hclge_dev *hdev)
{
u32 version;
@@ -419,7 +439,24 @@ int hclge_cmd_init(struct hclge_dev *hdev)
}
hdev->fw_version = version;
- dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+ dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ /* ask the firmware to enable some features, driver can work without
+ * it.
+ */
+ ret = hclge_firmware_compat_config(hdev);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "Firmware compatible features not enabled(%d).\n",
+ ret);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 96840d8f3e24..29979be9e33a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -86,6 +86,8 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
HCLGE_OPC_GET_CFG_PARAM = 0x0025,
+ HCLGE_OPC_PF_RST_DONE = 0x0026,
+ HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
HCLGE_OPC_STATS_64_BIT = 0x0030,
HCLGE_OPC_STATS_32_BIT = 0x0031,
@@ -257,6 +259,7 @@ enum hclge_opcode_type {
/* M7 stats command */
HCLGE_OPC_M7_STATS_BD = 0x7012,
HCLGE_OPC_M7_STATS_INFO = 0x7013,
+ HCLGE_OPC_M7_COMPAT_CFG = 0x701A,
/* SFP command */
HCLGE_OPC_GET_SFP_INFO = 0x7104,
@@ -586,6 +589,12 @@ struct hclge_config_mac_mode_cmd {
u8 rsv[20];
};
+struct hclge_pf_rst_sync_cmd {
+#define HCLGE_PF_RST_ALL_VF_RDY_B 0
+ u8 all_vf_ready;
+ u8 rsv[23];
+};
+
#define HCLGE_CFG_SPEED_S 0
#define HCLGE_CFG_SPEED_M GENMASK(5, 0)
@@ -827,7 +836,7 @@ struct hclge_mac_ethertype_idx_rd_cmd {
u8 flags;
u8 resp_code;
__le16 vlan_tag;
- u8 mac_add[6];
+ u8 mac_addr[6];
__le16 index;
__le16 ethter_type;
__le16 egress_port;
@@ -877,6 +886,13 @@ struct hclge_reset_cmd {
u8 rsv[22];
};
+#define HCLGE_PF_RESET_DONE_BIT BIT(0)
+
+struct hclge_pf_rst_done_cmd {
+ u8 pf_rst_done;
+ u8 rsv[23];
+};
+
#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0)
#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2)
#define HCLGE_CMD_SERDES_DONE_B BIT(0)
@@ -906,8 +922,11 @@ struct hclge_serdes_lb_cmd {
#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020
#define HCLGE_NIC_CRQ_TAIL_REG 0x27024
#define HCLGE_NIC_CRQ_HEAD_REG 0x27028
-#define HCLGE_NIC_CMQ_EN_B 16
-#define HCLGE_NIC_CMQ_ENABLE BIT(HCLGE_NIC_CMQ_EN_B)
+
+/* this bit indicates that the driver is ready for hardware reset */
+#define HCLGE_NIC_SW_RST_RDY_B 16
+#define HCLGE_NIC_SW_RST_RDY BIT(HCLGE_NIC_SW_RST_RDY_B)
+
#define HCLGE_NIC_CMQ_DESC_NUM 1024
#define HCLGE_NIC_CMQ_DESC_NUM_S 3
@@ -1009,6 +1028,13 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd {
u8 rsv[4];
};
+#define HCLGE_LINK_EVENT_REPORT_EN_B 0
+#define HCLGE_NCSI_ERROR_REPORT_EN_B 1
+struct hclge_firmware_compat_cmd {
+ __le32 compat;
+ u8 rsv[20];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index bac4ce13f6ae..814e0f076e32 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -201,6 +201,7 @@ static int hclge_client_setup_tc(struct hclge_dev *hdev)
static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
{
struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
bool map_changed = false;
u8 num_tc = 0;
@@ -215,6 +216,8 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
return ret;
if (map_changed) {
+ netif_dbg(h, drv, netdev, "set ets\n");
+
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
return ret;
@@ -300,6 +303,7 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
{
struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
u8 i, j, pfc_map, *prio_tc;
@@ -325,6 +329,10 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
hdev->tm_info.hw_pfc_map = pfc_map;
hdev->tm_info.pfc_en = pfc->pfc_en;
+ netif_dbg(h, drv, netdev,
+ "set pfc: pfc_en=%u, pfc_map=%u, num_tc=%u\n",
+ pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
+
hclge_tm_pfc_info_update(hdev);
return hclge_pause_setup_hw(hdev, false);
@@ -345,8 +353,11 @@ static u8 hclge_getdcbx(struct hnae3_handle *h)
static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
{
struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
+ netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
+
/* No support for LLD_MANAGED modes or CEE */
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
(mode & DCB_CAP_DCBX_VER_CEE) ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index ab625c757a95..025184a0c839 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -14,16 +14,8 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
struct hclge_desc desc[4];
int ret;
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
- desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
-
- ret = hclge_cmd_send(&hdev->hw, desc, 4);
- if (ret != HCLGE_CMD_EXEC_SUCCESS) {
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
+ if (ret) {
dev_err(&hdev->pdev->dev,
"get dfx bdnum fail, status is %d.\n", ret);
return ret;
@@ -325,6 +317,12 @@ static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
struct hclge_desc desc;
int i, ret;
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tc\n");
+ return;
+ }
+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -409,6 +407,12 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tm mapping\n");
+ return;
+ }
+
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -590,6 +594,12 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n",
queue_id, qset_id, pri_id, tc_id);
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tm mapping\n");
+ return;
+ }
+
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
for (group_id = 0; group_id < 32; group_id++) {
@@ -715,6 +725,34 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
rx_buf_cmd->shared_buf);
+ cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev, "\n");
+ dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
+
+ cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev,
+ "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
+ dev_info(&hdev->pdev->dev, "\n");
+
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports rx priv wl\n");
+ return;
+ }
cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
@@ -723,7 +761,6 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
if (ret)
goto err_qos_cmd_send;
- dev_info(&hdev->pdev->dev, "\n");
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
@@ -758,29 +795,6 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
"rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
rx_com_thrd->com_thrd[i].high,
rx_com_thrd->com_thrd[i].low);
-
- cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret)
- goto err_qos_cmd_send;
-
- rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
- dev_info(&hdev->pdev->dev, "\n");
- dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
- rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
-
- cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret)
- goto err_qos_cmd_send;
-
- rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
- dev_info(&hdev->pdev->dev,
- "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
- rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
-
return;
err_qos_cmd_send:
@@ -825,9 +839,9 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
"%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
- req0->index, req0->mac_add[0], req0->mac_add[1],
- req0->mac_add[2], req0->mac_add[3], req0->mac_add[4],
- req0->mac_add[5]);
+ req0->index, req0->mac_addr[0], req0->mac_addr[1],
+ req0->mac_addr[2], req0->mac_addr[3],
+ req0->mac_addr[4], req0->mac_addr[5]);
snprintf(printf_buf + strlen(printf_buf),
HCLGE_DBG_BUF_LEN - strlen(printf_buf),
@@ -883,14 +897,17 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
sel_x ? "x" : "y", loc);
+ /* tcam_data0 ~ tcam_data1 */
req = (u32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+ /* tcam_data2 ~ tcam_data7 */
req = (u32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+ /* tcam_data8 ~ tcam_data12 */
req = (u32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
@@ -981,6 +998,33 @@ void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
kfree(desc_src);
}
+#define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
+
+static void hclge_ncl_config_data_print(struct hclge_dev *hdev,
+ struct hclge_desc *desc, int *offset,
+ int *length)
+{
+#define HCLGE_CMD_DATA_NUM 6
+
+ int i;
+ int j;
+
+ for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
+ for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
+ if (i == 0 && j == 0)
+ continue;
+
+ dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
+ *offset,
+ le32_to_cpu(desc[i].data[j]));
+ *offset += sizeof(u32);
+ *length -= sizeof(u32);
+ if (*length <= 0)
+ return;
+ }
+ }
+}
+
/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
* @hdev: pointer to struct hclge_dev
* @cmd_buf: string that contains offset and length
@@ -990,17 +1034,13 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
-#define HCLGE_CMD_DATA_NUM 6
- struct hclge_desc desc[5];
- u32 byte_offset;
- int bd_num = 5;
+ struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
+ int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
int offset;
int length;
int data0;
int ret;
- int i;
- int j;
ret = sscanf(cmd_buf, "%x %x", &offset, &length);
if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
@@ -1026,22 +1066,7 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
if (ret)
return;
- byte_offset = offset;
- for (i = 0; i < bd_num; i++) {
- for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
- if (i == 0 && j == 0)
- continue;
-
- dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
- byte_offset,
- le32_to_cpu(desc[i].data[j]));
- byte_offset += sizeof(u32);
- length -= sizeof(u32);
- if (length <= 0)
- return;
- }
- }
- offset += HCLGE_MAX_NCL_CONFIG_LENGTH;
+ hclge_ncl_config_data_print(hdev, desc, &offset, &length);
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 0a7243825e7b..2425b3f53ad8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -637,8 +637,8 @@ static void hclge_log_error(struct device *dev, char *reg,
{
while (err->msg) {
if (err->int_msk & err_sts) {
- dev_warn(dev, "%s %s found [error status=0x%x]\n",
- reg, err->msg, err_sts);
+ dev_err(dev, "%s %s found [error status=0x%x]\n",
+ reg, err->msg, err_sts);
if (err->reset_level &&
err->reset_level != HNAE3_NONE_RESET)
set_bit(err->reset_level, reset_requests);
@@ -652,16 +652,11 @@ static void hclge_log_error(struct device *dev, char *reg,
* @desc: descriptor for describing the command
* @cmd: command opcode
* @flag: flag for extended command structure
- * @w_num: offset for setting the read interrupt type.
- * @int_type: select which type of the interrupt for which the error
- * info will be read(RAS-CE/RAS-NFE/RAS-FE etc).
*
* This function query the error info from hw register/s using command
*/
static int hclge_cmd_query_error(struct hclge_dev *hdev,
- struct hclge_desc *desc, u32 cmd,
- u16 flag, u8 w_num,
- enum hclge_err_int_type int_type)
+ struct hclge_desc *desc, u32 cmd, u16 flag)
{
struct device *dev = &hdev->pdev->dev;
int desc_num = 1;
@@ -673,8 +668,6 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
desc_num = 2;
}
- if (w_num)
- desc[0].data[w_num] = cpu_to_le32(int_type);
ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
if (ret)
@@ -872,8 +865,7 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
}
/* configure TM QCN hw errors */
- ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG,
- 0, 0, 0);
+ ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, 0);
if (ret) {
dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret);
return ret;
@@ -1171,8 +1163,8 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
if (status) {
- dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
- status);
+ dev_err(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
+ status);
set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
@@ -1208,8 +1200,8 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
desc_data = (__le32 *)&desc[5];
status = le32_to_cpu(*(desc_data + 1));
if (status) {
- dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
- "rpu_rx_pkt_ecc_mbit_err");
+ dev_err(dev,
+ "PPU_MPF_ABNORMAL_INT_ST1 rpu_rx_pkt_ecc_mbit_err found\n");
set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
@@ -1387,17 +1379,17 @@ static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
return ret;
}
- dev_info(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
- le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
- le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
- le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
- dev_info(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
- le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]),
- le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]),
- le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5]));
- dev_info(dev, "AXI3: %08X %08X %08X %08X\n",
- le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]),
- le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
+ dev_err(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_err(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]),
+ le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]),
+ le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5]));
+ dev_err(dev, "AXI3: %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]),
+ le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
return 0;
}
@@ -1410,18 +1402,18 @@ static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
ret = hclge_cmd_query_error(hdev, &desc[0],
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD,
- HCLGE_CMD_FLAG_NEXT, 0, 0);
+ HCLGE_CMD_FLAG_NEXT);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret);
return ret;
}
- dev_info(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
- le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
- le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
- le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
- dev_info(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
- le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
+ dev_err(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_err(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
+ le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
return 0;
}
@@ -1434,7 +1426,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
/* read overflow error status */
ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD,
- 0, 0, 0);
+ 0);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
return ret;
@@ -1450,9 +1442,9 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
le32_to_cpu(desc[0].data[0]);
while (err->msg) {
if (err->int_msk == err_sts) {
- dev_warn(dev, "%s [error status=0x%x] found\n",
- err->msg,
- le32_to_cpu(desc[0].data[0]));
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg,
+ le32_to_cpu(desc[0].data[0]));
break;
}
err++;
@@ -1460,13 +1452,13 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
}
if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
- dev_warn(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
- le32_to_cpu(desc[0].data[1]));
+ dev_err(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[1]));
}
if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
- dev_warn(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
- le32_to_cpu(desc[0].data[2]));
+ dev_err(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[2]));
}
return 0;
@@ -1483,8 +1475,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
/* read RAS error interrupt status */
ret = hclge_cmd_query_error(hdev, &desc[0],
- HCLGE_QUERY_CLEAR_ROCEE_RAS_INT,
- 0, 0, 0);
+ HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, 0);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
/* reset everything for now */
@@ -1495,10 +1486,10 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) {
if (status & HCLGE_ROCEE_RERR_INT_MASK)
- dev_warn(dev, "ROCEE RAS AXI rresp error\n");
+ dev_err(dev, "ROCEE RAS AXI rresp error\n");
if (status & HCLGE_ROCEE_BERR_INT_MASK)
- dev_warn(dev, "ROCEE RAS AXI bresp error\n");
+ dev_err(dev, "ROCEE RAS AXI bresp error\n");
reset_type = HNAE3_FUNC_RESET;
@@ -1508,7 +1499,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
}
if (status & HCLGE_ROCEE_ECC_INT_MASK) {
- dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
+ dev_err(dev, "ROCEE RAS 2bit ECC error\n");
reset_type = HNAE3_GLOBAL_RESET;
ret = hclge_log_rocee_ecc_error(hdev);
@@ -1566,8 +1557,8 @@ int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
{
- enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
struct hclge_dev *hdev = ae_dev->priv;
+ enum hnae3_reset_type reset_type;
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
hdev->pdev->revision < 0x21)
@@ -1649,16 +1640,16 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
/* Handling Non-fatal HNS RAS errors */
if (status & HCLGE_RAS_REG_NFE_MASK) {
- dev_warn(dev,
- "HNS Non-Fatal RAS error(status=0x%x) identified\n",
- status);
+ dev_err(dev,
+ "HNS Non-Fatal RAS error(status=0x%x) identified\n",
+ status);
hclge_handle_all_ras_errors(hdev);
}
/* Handling Non-fatal Rocee RAS errors */
if (hdev->pdev->revision >= 0x21 &&
status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
- dev_warn(dev, "ROCEE Non-Fatal RAS error identified\n");
+ dev_err(dev, "ROCEE Non-Fatal RAS error identified\n");
hclge_handle_rocee_ras_error(ae_dev);
}
@@ -1737,8 +1728,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
return;
}
- dev_warn(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%d), queue_id(%d)\n",
- vf_id, q_id);
+ dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n",
+ vf_id, q_id);
if (vf_id) {
if (vf_id >= hdev->num_alloc_vport) {
@@ -1755,8 +1746,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
if (ret)
- dev_warn(dev, "inform reset to vf(%d) failed %d!\n",
- hdev->vport->vport_id, ret);
+ dev_err(dev, "inform reset to vf(%u) failed %d!\n",
+ hdev->vport->vport_id, ret);
} else {
set_bit(HNAE3_FUNC_RESET, reset_requests);
}
@@ -1802,8 +1793,8 @@ static int hclge_handle_mpf_msix_error(struct hclge_dev *hdev,
status = le32_to_cpu(*(desc_data + 2)) &
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
if (status)
- dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]",
- status);
+ dev_err(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]",
+ status);
/* clear all main PF MSIx errors */
ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num);
@@ -1997,7 +1988,7 @@ void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
/* Handle Non-fatal HNS RAS errors */
if (status & HCLGE_RAS_REG_NFE_MASK) {
- dev_warn(dev, "HNS hw error(RAS) identified during init\n");
+ dev_err(dev, "HNS hw error(RAS) identified during init\n");
hclge_handle_all_ras_errors(hdev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 3fde5471e1c0..dde17be33767 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -35,6 +35,23 @@
#define BUF_RESERVE_PERCENT 90
#define HCLGE_RESET_MAX_FAIL_CNT 5
+#define HCLGE_RESET_SYNC_TIME 100
+#define HCLGE_PF_RESET_SYNC_TIME 20
+#define HCLGE_PF_RESET_SYNC_CNT 1500
+
+/* Get DFX BD number offset */
+#define HCLGE_DFX_BIOS_BD_OFFSET 1
+#define HCLGE_DFX_SSU_0_BD_OFFSET 2
+#define HCLGE_DFX_SSU_1_BD_OFFSET 3
+#define HCLGE_DFX_IGU_BD_OFFSET 4
+#define HCLGE_DFX_RPU_0_BD_OFFSET 5
+#define HCLGE_DFX_RPU_1_BD_OFFSET 6
+#define HCLGE_DFX_NCSI_BD_OFFSET 7
+#define HCLGE_DFX_RTC_BD_OFFSET 8
+#define HCLGE_DFX_PPP_BD_OFFSET 9
+#define HCLGE_DFX_RCB_BD_OFFSET 10
+#define HCLGE_DFX_TQP_BD_OFFSET 11
+#define HCLGE_DFX_SSU_2_BD_OFFSET 12
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
@@ -317,6 +334,80 @@ static const u8 hclge_hash_key[] = {
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
};
+static const u32 hclge_dfx_bd_offset_list[] = {
+ HCLGE_DFX_BIOS_BD_OFFSET,
+ HCLGE_DFX_SSU_0_BD_OFFSET,
+ HCLGE_DFX_SSU_1_BD_OFFSET,
+ HCLGE_DFX_IGU_BD_OFFSET,
+ HCLGE_DFX_RPU_0_BD_OFFSET,
+ HCLGE_DFX_RPU_1_BD_OFFSET,
+ HCLGE_DFX_NCSI_BD_OFFSET,
+ HCLGE_DFX_RTC_BD_OFFSET,
+ HCLGE_DFX_PPP_BD_OFFSET,
+ HCLGE_DFX_RCB_BD_OFFSET,
+ HCLGE_DFX_TQP_BD_OFFSET,
+ HCLGE_DFX_SSU_2_BD_OFFSET
+};
+
+static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
+ HCLGE_OPC_DFX_BIOS_COMMON_REG,
+ HCLGE_OPC_DFX_SSU_REG_0,
+ HCLGE_OPC_DFX_SSU_REG_1,
+ HCLGE_OPC_DFX_IGU_EGU_REG,
+ HCLGE_OPC_DFX_RPU_REG_0,
+ HCLGE_OPC_DFX_RPU_REG_1,
+ HCLGE_OPC_DFX_NCSI_REG,
+ HCLGE_OPC_DFX_RTC_REG,
+ HCLGE_OPC_DFX_PPP_REG,
+ HCLGE_OPC_DFX_RCB_REG,
+ HCLGE_OPC_DFX_TQP_REG,
+ HCLGE_OPC_DFX_SSU_REG_2
+};
+
+static const struct key_info meta_data_key_info[] = {
+ { PACKET_TYPE_ID, 6},
+ { IP_FRAGEMENT, 1},
+ { ROCE_TYPE, 1},
+ { NEXT_KEY, 5},
+ { VLAN_NUMBER, 2},
+ { SRC_VPORT, 12},
+ { DST_VPORT, 12},
+ { TUNNEL_PACKET, 1},
+};
+
+static const struct key_info tuple_key_info[] = {
+ { OUTER_DST_MAC, 48},
+ { OUTER_SRC_MAC, 48},
+ { OUTER_VLAN_TAG_FST, 16},
+ { OUTER_VLAN_TAG_SEC, 16},
+ { OUTER_ETH_TYPE, 16},
+ { OUTER_L2_RSV, 16},
+ { OUTER_IP_TOS, 8},
+ { OUTER_IP_PROTO, 8},
+ { OUTER_SRC_IP, 32},
+ { OUTER_DST_IP, 32},
+ { OUTER_L3_RSV, 16},
+ { OUTER_SRC_PORT, 16},
+ { OUTER_DST_PORT, 16},
+ { OUTER_L4_RSV, 32},
+ { OUTER_TUN_VNI, 24},
+ { OUTER_TUN_FLOW_ID, 8},
+ { INNER_DST_MAC, 48},
+ { INNER_SRC_MAC, 48},
+ { INNER_VLAN_TAG_FST, 16},
+ { INNER_VLAN_TAG_SEC, 16},
+ { INNER_ETH_TYPE, 16},
+ { INNER_L2_RSV, 16},
+ { INNER_IP_TOS, 8},
+ { INNER_IP_PROTO, 8},
+ { INNER_SRC_IP, 32},
+ { INNER_DST_IP, 32},
+ { INNER_L3_RSV, 16},
+ { INNER_SRC_PORT, 16},
+ { INNER_DST_PORT, 16},
+ { INNER_L4_RSV, 32},
+};
+
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -364,9 +455,13 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
u16 i, k, n;
int ret;
- desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ /* This may be called inside atomic sections,
+ * so GFP_ATOMIC is more suitalbe here
+ */
+ desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
if (!desc)
return -ENOMEM;
+
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
if (ret) {
@@ -702,14 +797,16 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
p = hclge_tqps_get_stats(handle, p);
}
-static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
- u64 *rx_cnt)
+static void hclge_get_mac_stat(struct hnae3_handle *handle,
+ struct hns3_mac_stats *mac_stats)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
- *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
+ hclge_update_stats(handle, NULL);
+
+ mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
+ mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
}
static int hclge_parse_func_status(struct hclge_dev *hdev,
@@ -1075,6 +1172,7 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
hclge_parse_backplane_link_mode(hdev, speed_ability);
}
+
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
struct hclge_cfg_param_cmd *req;
@@ -1270,6 +1368,12 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_init_kdump_kernel_config(hdev);
+ /* Set the init affinity based on pci func number */
+ i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
+ i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
+ cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
+ &hdev->affinity_mask);
+
return ret;
}
@@ -2499,22 +2603,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->mbx_service_task);
+ queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
+ &hdev->mbx_service_task);
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->rst_service_task);
+ queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
+ &hdev->rst_service_task);
}
-static void hclge_task_schedule(struct hclge_dev *hdev)
+void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
- (void)schedule_work(&hdev->service_task);
+ !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
+ hdev->hw_stats.stats_timer++;
+ hdev->fd_arfs_expire_timer++;
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ system_wq, &hdev->service_task,
+ delay_time);
+ }
}
static int hclge_get_mac_link_status(struct hclge_dev *hdev)
@@ -2729,25 +2840,6 @@ static int hclge_get_status(struct hnae3_handle *handle)
return hdev->hw.mac.link;
}
-static void hclge_service_timer(struct timer_list *t)
-{
- struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
-
- mod_timer(&hdev->service_timer, jiffies + HZ);
- hdev->hw_stats.stats_timer++;
- hdev->fd_arfs_expire_timer++;
- hclge_task_schedule(hdev);
-}
-
-static void hclge_service_complete(struct hclge_dev *hdev)
-{
- WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
-
- /* Flush memory before next watchdog */
- smp_mb__before_atomic();
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
-}
-
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
@@ -2763,9 +2855,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
* defer the processing of the mailbox events. Since, we would have not
* cleared RX CMDQ event this time we would receive again another
* interrupt from H/W just for the mailbox.
+ *
+ * check for vector0 reset event sources
*/
-
- /* check for vector0 reset event sources */
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
@@ -2882,10 +2974,15 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
break;
}
- /* clear the source of interrupt if it is not cause by reset */
+ hclge_clear_event_cause(hdev, event_cause, clearval);
+
+ /* Enable interrupt if it is not cause by reset. And when
+ * clearval equal to 0, it means interrupt status may be
+ * cleared by hardware before driver reads status register.
+ * For this case, vector0 interrupt also should be enabled.
+ */
if (!clearval ||
event_cause == HCLGE_VECTOR0_EVENT_MBX) {
- hclge_clear_event_cause(hdev, event_cause, clearval);
hclge_enable_vector(&hdev->misc_vector, true);
}
@@ -2918,6 +3015,36 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
hdev->num_msi_used += 1;
}
+static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
+ affinity_notify);
+
+ cpumask_copy(&hdev->affinity_mask, mask);
+}
+
+static void hclge_irq_affinity_release(struct kref *ref)
+{
+}
+
+static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
+{
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq,
+ &hdev->affinity_mask);
+
+ hdev->affinity_notify.notify = hclge_irq_affinity_notify;
+ hdev->affinity_notify.release = hclge_irq_affinity_release;
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
+ &hdev->affinity_notify);
+}
+
+static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
+{
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
+}
+
static int hclge_misc_irq_init(struct hclge_dev *hdev)
{
int ret;
@@ -3105,6 +3232,39 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
return 0;
}
+static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
+{
+ struct hclge_pf_rst_sync_cmd *req;
+ struct hclge_desc desc;
+ int cnt = 0;
+ int ret;
+
+ req = (struct hclge_pf_rst_sync_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
+
+ do {
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ /* for compatible with old firmware, wait
+ * 100 ms for VF to stop IO
+ */
+ if (ret == -EOPNOTSUPP) {
+ msleep(HCLGE_RESET_SYNC_TIME);
+ return 0;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
+ ret);
+ return ret;
+ } else if (req->all_vf_ready) {
+ return 0;
+ }
+ msleep(HCLGE_PF_RESET_SYNC_TIME);
+ hclge_cmd_reuse_desc(&desc, true);
+ } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
+
+ dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
+ return -ETIME;
+}
+
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
{
struct hclge_desc desc;
@@ -3229,7 +3389,13 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
if (!clearval)
return;
- hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
+ /* For revision 0x20, the reset interrupt source
+ * can only be cleared after hardware reset done
+ */
+ if (hdev->pdev->revision == 0x20)
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
+ clearval);
+
hclge_enable_vector(&hdev->misc_vector, true);
}
@@ -3250,19 +3416,33 @@ static int hclge_reset_prepare_down(struct hclge_dev *hdev)
return ret;
}
-static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
{
-#define HCLGE_RESET_SYNC_TIME 100
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ if (enable)
+ reg_val |= HCLGE_NIC_SW_RST_RDY;
+ else
+ reg_val &= ~HCLGE_NIC_SW_RST_RDY;
+
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
+}
+static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+{
u32 reg_val;
int ret = 0;
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
- /* There is no mechanism for PF to know if VF has stopped IO
- * for now, just wait 100 ms for VF to stop IO
+ /* to confirm whether all running VF is ready
+ * before request PF reset
*/
- msleep(HCLGE_RESET_SYNC_TIME);
+ ret = hclge_func_reset_sync_vf(hdev);
+ if (ret)
+ return ret;
+
ret = hclge_func_reset_cmd(hdev, 0);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3279,10 +3459,13 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
- /* There is no mechanism for PF to know if VF has stopped IO
- * for now, just wait 100 ms for VF to stop IO
+ /* to confirm whether all running VF is ready
+ * before request PF reset
*/
- msleep(HCLGE_RESET_SYNC_TIME);
+ ret = hclge_func_reset_sync_vf(hdev);
+ if (ret)
+ return ret;
+
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
hdev->rst_stats.flr_rst_cnt++;
@@ -3298,14 +3481,13 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* inform hardware that preparatory work is done */
msleep(HCLGE_RESET_SYNC_TIME);
- hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
- HCLGE_NIC_CMQ_ENABLE);
+ hclge_reset_handshake(hdev, true);
dev_info(&hdev->pdev->dev, "prepare wait ok\n");
return ret;
}
-static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
+static bool hclge_reset_err_handle(struct hclge_dev *hdev)
{
#define MAX_RESET_FAIL_CNT 5
@@ -3322,27 +3504,34 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
return false;
} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
hdev->reset_fail_cnt++;
- if (is_timeout) {
- set_bit(hdev->reset_type, &hdev->reset_pending);
- dev_info(&hdev->pdev->dev,
- "re-schedule to wait for hw reset done\n");
- return true;
- }
-
- dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
- hclge_clear_reset_cause(hdev);
- set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
- mod_timer(&hdev->reset_timer,
- jiffies + HCLGE_RESET_INTERVAL);
-
- return false;
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ dev_info(&hdev->pdev->dev,
+ "re-schedule reset task(%d)\n",
+ hdev->reset_fail_cnt);
+ return true;
}
hclge_clear_reset_cause(hdev);
+
+ /* recover the handshake status when reset fail */
+ hclge_reset_handshake(hdev, true);
+
dev_err(&hdev->pdev->dev, "Reset fail!\n");
return false;
}
+static int hclge_set_rst_done(struct hclge_dev *hdev)
+{
+ struct hclge_pf_rst_done_cmd *req;
+ struct hclge_desc desc;
+
+ req = (struct hclge_pf_rst_done_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
+ req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
static int hclge_reset_prepare_up(struct hclge_dev *hdev)
{
int ret = 0;
@@ -3353,10 +3542,18 @@ static int hclge_reset_prepare_up(struct hclge_dev *hdev)
case HNAE3_FLR_RESET:
ret = hclge_set_all_vf_rst(hdev, false);
break;
+ case HNAE3_GLOBAL_RESET:
+ /* fall through */
+ case HNAE3_IMP_RESET:
+ ret = hclge_set_rst_done(hdev);
+ break;
default:
break;
}
+ /* clear up the handshake status after re-initialize done */
+ hclge_reset_handshake(hdev, false);
+
return ret;
}
@@ -3382,7 +3579,6 @@ static int hclge_reset_stack(struct hclge_dev *hdev)
static void hclge_reset(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- bool is_timeout = false;
int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
@@ -3410,10 +3606,8 @@ static void hclge_reset(struct hclge_dev *hdev)
if (ret)
goto err_reset;
- if (hclge_reset_wait(hdev)) {
- is_timeout = true;
+ if (hclge_reset_wait(hdev))
goto err_reset;
- }
hdev->rst_stats.hw_reset_done_cnt++;
@@ -3458,14 +3652,22 @@ static void hclge_reset(struct hclge_dev *hdev)
hdev->reset_fail_cnt = 0;
hdev->rst_stats.reset_done_cnt++;
ae_dev->reset_type = HNAE3_NONE_RESET;
- del_timer(&hdev->reset_timer);
+
+ /* if default_reset_request has a higher level reset request,
+ * it should be handled as soon as possible. since some errors
+ * need this kind of reset to fix.
+ */
+ hdev->reset_level = hclge_get_reset_level(ae_dev,
+ &hdev->default_reset_request);
+ if (hdev->reset_level != HNAE3_NONE_RESET)
+ set_bit(hdev->reset_level, &hdev->reset_request);
return;
err_reset_lock:
rtnl_unlock();
err_reset:
- if (hclge_reset_err_handle(hdev, is_timeout))
+ if (hclge_reset_err_handle(hdev))
hclge_reset_task_schedule(hdev);
}
@@ -3493,9 +3695,10 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
handle = &hdev->vport[0].nic;
if (time_before(jiffies, (hdev->last_reset_time +
- HCLGE_RESET_INTERVAL)))
+ HCLGE_RESET_INTERVAL))) {
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
return;
- else if (hdev->default_reset_request)
+ } else if (hdev->default_reset_request)
hdev->reset_level =
hclge_get_reset_level(ae_dev,
&hdev->default_reset_request);
@@ -3525,6 +3728,12 @@ static void hclge_reset_timer(struct timer_list *t)
{
struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+ /* if default_reset_request has no value, it means that this reset
+ * request has already be handled, so just return here
+ */
+ if (!hdev->default_reset_request)
+ return;
+
dev_info(&hdev->pdev->dev,
"triggering reset in reset timer\n");
hclge_reset_event(hdev->pdev, NULL);
@@ -3606,7 +3815,9 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev)
static void hclge_service_task(struct work_struct *work)
{
struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, service_task);
+ container_of(work, struct hclge_dev, service_task.work);
+
+ clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
hclge_update_stats_for_all(hdev);
@@ -3621,7 +3832,8 @@ static void hclge_service_task(struct work_struct *work)
hclge_rfs_filter_expire(hdev);
hdev->fd_arfs_expire_timer = 0;
}
- hclge_service_complete(hdev);
+
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
}
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
@@ -4197,8 +4409,8 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
- struct hclge_ctrl_vector_chain_cmd *req
- = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ struct hclge_ctrl_vector_chain_cmd *req =
+ (struct hclge_ctrl_vector_chain_cmd *)desc.data;
enum hclge_cmd_status status;
enum hclge_opcode_type op;
u16 tqp_type_and_id;
@@ -5808,7 +6020,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
return -ENOSPC;
}
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock);
@@ -6160,10 +6372,13 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
struct hclge_dev *hdev = vport->back;
if (enable) {
- mod_timer(&hdev->service_timer, jiffies + HZ);
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
} else {
- del_timer_sync(&hdev->service_timer);
- cancel_work_sync(&hdev->service_task);
+ /* Set the DOWN flag here to disable the service to be
+ * scheduled again
+ */
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ cancel_delayed_work_sync(&hdev->service_task);
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
}
}
@@ -6202,12 +6417,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
hdev->reset_type != HNAE3_FUNC_RESET) {
hclge_mac_stop_phy(hdev);
+ hclge_update_link_status(hdev);
return;
}
for (i = 0; i < handle->kinfo.num_tqps; i++)
hclge_reset_tqp(handle, i);
+ hclge_config_mac_tnl_int(hdev, false);
+
/* Mac disable */
hclge_cfg_mac_mode(hdev, false);
@@ -6249,7 +6467,6 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
enum hclge_mac_vlan_tbl_opcode op)
{
struct hclge_dev *hdev = vport->back;
- int return_status = -EIO;
if (cmdq_resp) {
dev_err(&hdev->pdev->dev,
@@ -6260,52 +6477,53 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
if (op == HCLGE_MAC_VLAN_ADD) {
if ((!resp_code) || (resp_code == 1)) {
- return_status = 0;
+ return 0;
} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
- return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for uc_overflow.\n");
+ return -ENOSPC;
} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
- return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for mc_overflow.\n");
- } else {
- dev_err(&hdev->pdev->dev,
- "add mac addr failed for undefined, code=%d.\n",
- resp_code);
+ return -ENOSPC;
}
+
+ dev_err(&hdev->pdev->dev,
+ "add mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
} else if (op == HCLGE_MAC_VLAN_REMOVE) {
if (!resp_code) {
- return_status = 0;
+ return 0;
} else if (resp_code == 1) {
- return_status = -ENOENT;
dev_dbg(&hdev->pdev->dev,
"remove mac addr failed for miss.\n");
- } else {
- dev_err(&hdev->pdev->dev,
- "remove mac addr failed for undefined, code=%d.\n",
- resp_code);
+ return -ENOENT;
}
+
+ dev_err(&hdev->pdev->dev,
+ "remove mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
} else if (op == HCLGE_MAC_VLAN_LKUP) {
if (!resp_code) {
- return_status = 0;
+ return 0;
} else if (resp_code == 1) {
- return_status = -ENOENT;
dev_dbg(&hdev->pdev->dev,
"lookup mac addr failed for miss.\n");
- } else {
- dev_err(&hdev->pdev->dev,
- "lookup mac addr failed for undefined, code=%d.\n",
- resp_code);
+ return -ENOENT;
}
- } else {
- return_status = -EINVAL;
+
dev_err(&hdev->pdev->dev,
- "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
- op);
+ "lookup mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
}
- return return_status;
+ dev_err(&hdev->pdev->dev,
+ "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
+
+ return -EINVAL;
}
static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
@@ -7829,7 +8047,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
return -EBUSY;
}
- /* When port base vlan enabled, we use port base vlan as the vlan
+ /* when port base vlan enabled, we use port base vlan as the vlan
* filter entry. In this case, we don't update vlan filter table
* when user add new vlan or remove exist vlan, just update the vport
* vlan list. The vlan id in vlan list will be writen in vlan filter
@@ -7848,7 +8066,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
} else if (is_kill) {
- /* When remove hw vlan filter failed, record the vlan id,
+ /* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack
*/
@@ -8122,28 +8340,15 @@ static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
{
int ret;
- if (rx_en && tx_en)
- hdev->fc_mode_last_time = HCLGE_FC_FULL;
- else if (rx_en && !tx_en)
- hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
- else if (!rx_en && tx_en)
- hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
- else
- hdev->fc_mode_last_time = HCLGE_FC_NONE;
-
if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
return 0;
ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
- if (ret) {
- dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
- ret);
- return ret;
- }
-
- hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "configure pauseparam error, ret = %d.\n", ret);
- return 0;
+ return ret;
}
int hclge_cfg_flowctrl(struct hclge_dev *hdev)
@@ -8208,6 +8413,21 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
}
}
+static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
+ u32 rx_en, u32 tx_en)
+{
+ if (rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_FULL;
+ else if (rx_en && !tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
+ else if (!rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
+ else
+ hdev->fc_mode_last_time = HCLGE_FC_NONE;
+
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+}
+
static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
u32 rx_en, u32 tx_en)
{
@@ -8233,6 +8453,8 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+ hclge_record_user_pauseparam(hdev, rx_en, tx_en);
+
if (!auto_neg)
return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
@@ -8481,7 +8703,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
}
}
- return ret;
+ return 0;
clear_nic:
hdev->nic_client = NULL;
@@ -8602,12 +8824,10 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
set_bit(HCLGE_STATE_REMOVING, &hdev->state);
- if (hdev->service_timer.function)
- del_timer_sync(&hdev->service_timer);
if (hdev->reset_timer.function)
del_timer_sync(&hdev->reset_timer);
- if (hdev->service_task.func)
- cancel_work_sync(&hdev->service_task);
+ if (hdev->service_task.work.func)
+ cancel_delayed_work_sync(&hdev->service_task);
if (hdev->rst_service_task.func)
cancel_work_sync(&hdev->rst_service_task);
if (hdev->mbx_service_task.func)
@@ -8812,12 +9032,16 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_dcb_ops_set(hdev);
- timer_setup(&hdev->service_timer, hclge_service_timer, 0);
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
- INIT_WORK(&hdev->service_task, hclge_service_task);
+ INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
+ /* Setup affinity after service timer setup because add_timer_on
+ * is called in affinity notify.
+ */
+ hclge_misc_affinity_setup(hdev);
+
hclge_clear_all_event_cause(hdev);
hclge_clear_resetting_state(hdev);
@@ -8842,7 +9066,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
- pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
+ dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
+ HCLGE_DRIVER_NAME);
+
return 0;
err_mdiobus_unreg:
@@ -8979,6 +9205,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
struct hclge_mac *mac = &hdev->hw.mac;
+ hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
if (mac->phydev)
@@ -9238,106 +9465,314 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
}
#define MAX_SEPARATE_NUM 4
-#define SEPARATOR_VALUE 0xFFFFFFFF
+#define SEPARATOR_VALUE 0xFDFCFBFA
#define REG_NUM_PER_LINE 4
#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
+#define REG_SEPARATOR_LINE 1
+#define REG_NUM_REMAIN_MASK 3
+#define BD_LIST_MAX_NUM 30
-static int hclge_get_regs_len(struct hnae3_handle *handle)
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
{
- int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- u32 regs_num_32_bit, regs_num_64_bit;
+ /*prepare 4 commands to query DFX BD number*/
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
+
+ return hclge_cmd_send(&hdev->hw, desc, 4);
+}
+
+static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
+ int *bd_num_list,
+ u32 type_num)
+{
+#define HCLGE_DFX_REG_BD_NUM 4
+
+ u32 entries_per_desc, desc_index, index, offset, i;
+ struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
int ret;
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return -EOPNOTSUPP;
+ "Get dfx bd num fail, status is %d.\n", ret);
+ return ret;
}
- cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
- common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
- ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
- tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
+ for (i = 0; i < type_num; i++) {
+ offset = hclge_dfx_bd_offset_list[i];
+ index = offset % entries_per_desc;
+ desc_index = offset / entries_per_desc;
+ bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
+ }
- return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
- tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
- regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
+ return ret;
}
-static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
- void *data)
+static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc_src, int bd_num,
+ enum hclge_opcode_type cmd)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- u32 regs_num_32_bit, regs_num_64_bit;
- int i, j, reg_um, separator_num;
+ struct hclge_desc *desc = desc_src;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ for (i = 0; i < bd_num - 1; i++) {
+ desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc++;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ }
+
+ desc = desc_src;
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
+ cmd, ret);
+
+ return ret;
+}
+
+static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
+ void *data)
+{
+ int entries_per_desc, reg_num, separator_num, desc_index, index, i;
+ struct hclge_desc *desc = desc_src;
u32 *reg = data;
+
+ entries_per_desc = ARRAY_SIZE(desc->data);
+ reg_num = entries_per_desc * bd_num;
+ separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++) {
+ index = i % entries_per_desc;
+ desc_index = i / entries_per_desc;
+ *reg++ = le32_to_cpu(desc[desc_index].data[index]);
+ }
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ return reg_num + separator_num;
+}
+
+static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ int data_len_per_desc, data_len, bd_num, i;
+ int bd_num_list[BD_LIST_MAX_NUM];
int ret;
- *version = hdev->fw_version;
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ return ret;
+ }
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
+ *len = 0;
+ for (i = 0; i < dfx_reg_type_num; i++) {
+ bd_num = bd_num_list[i];
+ data_len = data_len_per_desc * bd_num;
+ *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
+ }
+
+ return ret;
+}
+
+static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ int bd_num, bd_num_max, buf_len, i;
+ int bd_num_list[BD_LIST_MAX_NUM];
+ struct hclge_desc *desc_src;
+ u32 *reg = data;
+ int ret;
+
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return;
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ return ret;
+ }
+
+ bd_num_max = bd_num_list[0];
+ for (i = 1; i < dfx_reg_type_num; i++)
+ bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
+
+ buf_len = sizeof(*desc_src) * bd_num_max;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src) {
+ dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < dfx_reg_type_num; i++) {
+ bd_num = bd_num_list[i];
+ ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
+ hclge_dfx_reg_opcode_list[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg fail, status is %d.\n", ret);
+ break;
+ }
+
+ reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
}
+ kfree(desc_src);
+ return ret;
+}
+
+static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
+ struct hnae3_knic_private_info *kinfo)
+{
+#define HCLGE_RING_REG_OFFSET 0x200
+#define HCLGE_RING_INT_REG_OFFSET 0x4
+
+ int i, j, reg_num, separator_num;
+ int data_num_sum;
+ u32 *reg = data;
+
/* fetching per-PF registers valus from PF PCIe register space */
- reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
+ data_num_sum = reg_num + separator_num;
- reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
+ data_num_sum += reg_num + separator_num;
- reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
for (j = 0; j < kinfo->num_tqps; j++) {
- for (i = 0; i < reg_um; i++)
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw,
ring_reg_addr_list[i] +
- 0x200 * j);
+ HCLGE_RING_REG_OFFSET * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
+ data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
- reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
for (j = 0; j < hdev->num_msi_used - 1; j++) {
- for (i = 0; i < reg_um; i++)
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw,
tqp_intr_reg_addr_list[i] +
- 4 * j);
+ HCLGE_RING_INT_REG_OFFSET * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
+ data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
+
+ return data_num_sum;
+}
+
+static int hclge_get_regs_len(struct hnae3_handle *handle)
+{
+ int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
+ int regs_lines_32_bit, regs_lines_64_bit;
+ int ret;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg len failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+
+ return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
+ tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
+ regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
+}
+
+static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ int i, reg_num, separator_num, ret;
+ u32 *reg = data;
+
+ *version = hdev->fw_version;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return;
+ }
+
+ reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
- /* fetching PF common registers values from firmware */
ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get 32 bit register failed, ret = %d.\n", ret);
return;
}
+ reg_num = regs_num_32_bit;
+ reg += reg_num;
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
- reg += regs_num_32_bit;
ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev,
"Get 64 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+ reg_num = regs_num_64_bit * 2;
+ reg += reg_num;
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ ret = hclge_get_dfx_reg(hdev, reg);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Get dfx register failed, ret = %d.\n", ret);
}
static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
@@ -9452,7 +9887,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_mtu = hclge_set_mtu,
.reset_queue = hclge_reset_tqp,
.get_stats = hclge_get_stats,
- .get_mac_pause_stats = hclge_get_mac_pause_stat,
+ .get_mac_stats = hclge_get_mac_stat,
.update_stats = hclge_update_stats,
.get_strings = hclge_get_strings,
.get_sset_count = hclge_get_sset_count,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 6a12285f4c76..7ff03b9605e4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -302,6 +302,13 @@ enum hclge_fc_mode {
HCLGE_FC_DEFAULT
};
+enum hclge_link_fail_code {
+ HCLGE_LF_NORMAL,
+ HCLGE_LF_REF_CLOCK_LOST,
+ HCLGE_LF_XSFP_TX_DISABLE,
+ HCLGE_LF_XSFP_ABSENT,
+};
+
#define HCLGE_PG_NUM 4
#define HCLGE_SCH_MODE_SP 0
#define HCLGE_SCH_MODE_DWRR 1
@@ -532,50 +539,6 @@ struct key_info {
u8 key_length; /* use bit as unit */
};
-static const struct key_info meta_data_key_info[] = {
- { PACKET_TYPE_ID, 6},
- { IP_FRAGEMENT, 1},
- { ROCE_TYPE, 1},
- { NEXT_KEY, 5},
- { VLAN_NUMBER, 2},
- { SRC_VPORT, 12},
- { DST_VPORT, 12},
- { TUNNEL_PACKET, 1},
-};
-
-static const struct key_info tuple_key_info[] = {
- { OUTER_DST_MAC, 48},
- { OUTER_SRC_MAC, 48},
- { OUTER_VLAN_TAG_FST, 16},
- { OUTER_VLAN_TAG_SEC, 16},
- { OUTER_ETH_TYPE, 16},
- { OUTER_L2_RSV, 16},
- { OUTER_IP_TOS, 8},
- { OUTER_IP_PROTO, 8},
- { OUTER_SRC_IP, 32},
- { OUTER_DST_IP, 32},
- { OUTER_L3_RSV, 16},
- { OUTER_SRC_PORT, 16},
- { OUTER_DST_PORT, 16},
- { OUTER_L4_RSV, 32},
- { OUTER_TUN_VNI, 24},
- { OUTER_TUN_FLOW_ID, 8},
- { INNER_DST_MAC, 48},
- { INNER_SRC_MAC, 48},
- { INNER_VLAN_TAG_FST, 16},
- { INNER_VLAN_TAG_SEC, 16},
- { INNER_ETH_TYPE, 16},
- { INNER_L2_RSV, 16},
- { INNER_IP_TOS, 8},
- { INNER_IP_PROTO, 8},
- { INNER_SRC_IP, 32},
- { INNER_DST_IP, 32},
- { INNER_L3_RSV, 16},
- { INNER_SRC_PORT, 16},
- { INNER_DST_PORT, 16},
- { INNER_L4_RSV, 32},
-};
-
#define MAX_KEY_LENGTH 400
#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
@@ -806,9 +769,8 @@ struct hclge_dev {
u16 adminq_work_limit; /* Num of admin receive queue desc to process */
unsigned long service_timer_period;
unsigned long service_timer_previous;
- struct timer_list service_timer;
struct timer_list reset_timer;
- struct work_struct service_task;
+ struct delayed_work service_task;
struct work_struct rst_service_task;
struct work_struct mbx_service_task;
@@ -864,6 +826,10 @@ struct hclge_dev {
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
+
+ /* affinity mask and notify for misc interrupt */
+ cpumask_t affinity_mask;
+ struct irq_affinity_notify affinity_notify;
};
/* VPort level vlan tag configuration for TX direction */
@@ -990,7 +956,6 @@ int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
-int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
@@ -1018,4 +983,7 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
u16 state, u16 vlan_tag, u16 qos,
u16 vlan_proto);
+void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time);
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 690b9990215c..f5da28a60d00 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -479,7 +479,7 @@ static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
hclge_reset_vf_queue(vport, queue_id);
- /* send response msg to VF after queue reset complete*/
+ /* send response msg to VF after queue reset complete */
hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
}
@@ -545,6 +545,36 @@ static int hclge_get_rss_key(struct hclge_vport *vport,
HCLGE_RSS_MBX_RESP_LEN);
}
+static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
+{
+ switch (link_fail_code) {
+ case HCLGE_LF_REF_CLOCK_LOST:
+ dev_warn(&hdev->pdev->dev, "Reference clock lost!\n");
+ break;
+ case HCLGE_LF_XSFP_TX_DISABLE:
+ dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n");
+ break;
+ case HCLGE_LF_XSFP_ABSENT:
+ dev_warn(&hdev->pdev->dev, "SFP is absent!\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void hclge_handle_link_change_event(struct hclge_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+#define LINK_STATUS_OFFSET 1
+#define LINK_FAIL_CODE_OFFSET 2
+
+ clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+ hclge_task_schedule(hdev, 0);
+
+ if (!req->msg[LINK_STATUS_OFFSET])
+ hclge_link_fail_parse(hdev, req->msg[LINK_FAIL_CODE_OFFSET]);
+}
+
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
@@ -552,6 +582,15 @@ static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
return tail == hw->cmq.crq.next_to_use;
}
+static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+
+ ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET);
+ dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n");
+ ae_dev->ops->reset_event(hdev->pdev, NULL);
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
@@ -707,6 +746,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"PF fail(%d) to media type for VF\n",
ret);
break;
+ case HCLGE_MBX_PUSH_LINK_STATUS:
+ hclge_handle_link_change_event(hdev, req);
+ break;
+ case HCLGE_MBX_NCSI_ERROR:
+ hclge_handle_ncsi_error(hdev);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index abb1b438564e..dc4dfd4602ab 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -231,6 +231,8 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
phydev->advertising);
+ phy_attached_info(phydev);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 3f41fa2bc414..e829101d576c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -404,8 +404,8 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
{
struct hclge_port_shapping_cmd *shap_cfg_cmd;
struct hclge_desc desc;
- u32 shapping_para = 0;
u8 ir_u, ir_b, ir_s;
+ u32 shapping_para;
int ret;
ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
@@ -650,12 +650,8 @@ static void hclge_pfc_info_init(struct hclge_dev *hdev)
}
}
-static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
+static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
{
- if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
- (hdev->tm_info.num_pg != 1))
- return -EINVAL;
-
hclge_tm_pg_info_init(hdev);
hclge_tm_tc_info_init(hdev);
@@ -663,8 +659,6 @@ static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
hclge_tm_vport_info_update(hdev);
hclge_pfc_info_init(hdev);
-
- return 0;
}
static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
@@ -1428,15 +1422,15 @@ int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
int hclge_tm_schd_init(struct hclge_dev *hdev)
{
- int ret;
-
/* fc_mode is HCLGE_FC_FULL on reset */
hdev->tm_info.fc_mode = HCLGE_FC_FULL;
hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
- ret = hclge_tm_schd_info_init(hdev);
- if (ret)
- return ret;
+ if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
+ hdev->tm_info.num_pg != 1)
+ return -EINVAL;
+
+ hclge_tm_schd_info_init(hdev);
return hclge_tm_init_hw(hdev, true);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 652b796044e3..4c2c9458648f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -43,7 +43,7 @@ static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
{
struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- int clean = 0;
+ int clean;
u32 head;
head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
@@ -97,7 +97,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
+ reg_val &= HCLGEVF_NIC_SW_RST_RDY;
+ reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
@@ -405,7 +407,15 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
}
hdev->fw_version = version;
- dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+ dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 127a434a56f3..f830eef02e5c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -244,8 +244,11 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
-#define HCLGEVF_NIC_CMQ_EN_B 16
-#define HCLGEVF_NIC_CMQ_ENABLE BIT(HCLGEVF_NIC_CMQ_EN_B)
+
+/* this bit indicates that the driver is ready for hardware reset */
+#define HCLGEVF_NIC_SW_RST_RDY_B 16
+#define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B)
+
#define HCLGEVF_NIC_CMQ_DESC_NUM 1024
#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index a13a0e101c3b..594cae8c7410 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1269,7 +1269,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
HCLGE_MBX_VLAN_FILTER, msg_data,
HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
- /* When remove hw vlan filter failed, record the vlan id,
+ /* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack.
*/
@@ -1396,19 +1396,22 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
u32 val;
int ret;
- /* wait to check the hardware reset completion status */
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
- dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
-
if (hdev->reset_type == HNAE3_FLR_RESET)
return hclgevf_flr_poll_timeout(hdev,
HCLGEVF_RESET_WAIT_US,
HCLGEVF_RESET_WAIT_CNT);
-
- ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
- !(val & HCLGEVF_RST_ING_BITS),
- HCLGEVF_RESET_WAIT_US,
- HCLGEVF_RESET_WAIT_TIMEOUT_US);
+ else if (hdev->reset_type == HNAE3_VF_RESET)
+ ret = readl_poll_timeout(hdev->hw.io_base +
+ HCLGEVF_VF_RST_ING, val,
+ !(val & HCLGEVF_VF_RST_ING_BIT),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
+ else
+ ret = readl_poll_timeout(hdev->hw.io_base +
+ HCLGEVF_RST_ING, val,
+ !(val & HCLGEVF_RST_ING_BITS),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
/* hardware completion status should be available by this time */
if (ret) {
@@ -1426,6 +1429,20 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
return 0;
}
+static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
+{
+ u32 reg_val;
+
+ reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
+ if (enable)
+ reg_val |= HCLGEVF_NIC_SW_RST_RDY;
+ else
+ reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
+
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
+ reg_val);
+}
+
static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
{
int ret;
@@ -1448,7 +1465,14 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
if (ret)
return ret;
- return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ if (ret)
+ return ret;
+
+ /* clear handshake status with IMP */
+ hclgevf_reset_handshake(hdev, false);
+
+ return 0;
}
static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
@@ -1474,8 +1498,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
/* inform hardware that preparatory work is done */
msleep(HCLGEVF_RESET_SYNC_TIME);
- hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
- HCLGEVF_NIC_CMQ_ENABLE);
+ hclgevf_reset_handshake(hdev, true);
dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
hdev->reset_type, ret);
@@ -1484,6 +1507,8 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
{
+ /* recover handshake status with IMP when reset fail */
+ hclgevf_reset_handshake(hdev, true);
hdev->rst_stats.rst_fail_cnt++;
dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n",
hdev->rst_stats.rst_fail_cnt);
@@ -1494,9 +1519,6 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
- } else {
- hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
- HCLGEVF_NIC_CMQ_ENABLE);
}
}
@@ -1539,7 +1561,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
rtnl_lock();
- /* now, re-initialize the nic client and ae device*/
+ /* now, re-initialize the nic client and ae device */
ret = hclgevf_reset_stack(hdev);
if (ret) {
dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
@@ -1762,9 +1784,8 @@ static void hclgevf_reset_service_task(struct work_struct *work)
* 1b and 2. cases but we will not get any intimation about 1a
* from PF as cmdq would be in unreliable state i.e. mailbox
* communication between PF and VF would be broken.
- */
-
- /* if we are never geting into pending state it means either:
+ *
+ * if we are never geting into pending state it means either:
* 1. PF is not receiving our request which could be due to IMP
* reset
* 2. PF is screwed
@@ -1867,29 +1888,45 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
u32 *clearval)
{
- u32 cmdq_src_reg, rst_ing_reg;
+ u32 val, cmdq_stat_reg, rst_ing_reg;
/* fetch the events from their corresponding regs */
- cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
- HCLGEVF_VECTOR0_CMDQ_SRC_REG);
+ cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
+ HCLGEVF_VECTOR0_CMDQ_STAT_REG);
- if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
+ if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
"receive reset interrupt 0x%x!\n", rst_ing_reg);
set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
- cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
- *clearval = cmdq_src_reg;
+ *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
hdev->rst_stats.vf_rst_cnt++;
+ /* set up VF hardware reset status, its PF will clear
+ * this status when PF has initialized done.
+ */
+ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
+ val | HCLGEVF_VF_RST_ING_BIT);
return HCLGEVF_VECTOR0_EVENT_RST;
}
/* check for vector0 mailbox(=CMDQ RX) event source */
- if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
- cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
- *clearval = cmdq_src_reg;
+ if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
+ /* for revision 0x21, clearing interrupt is writing bit 0
+ * to the clear register, writing bit 1 means to keep the
+ * old value.
+ * for revision 0x20, the clear register is a read & write
+ * register, so we should just write 0 to the bit we are
+ * handling, and keep other bits as cmdq_stat_reg.
+ */
+ if (hdev->pdev->revision >= 0x21)
+ *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+ else
+ *clearval = cmdq_stat_reg &
+ ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+
return HCLGEVF_VECTOR0_EVENT_MBX;
}
@@ -2265,7 +2302,7 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
{
- int ret = 0;
+ int ret;
hclgevf_get_misc_vector(hdev);
@@ -2695,7 +2732,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
}
hdev->last_reset_time = jiffies;
- pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
+ dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
+ HCLGEVF_DRIVER_NAME);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 5a9e30998a8f..bdde3afc286b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -87,6 +87,8 @@
/* Vector0 interrupt CMDQ event source register(RW) */
#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
+/* Vector0 interrupt CMDQ event status register(RO) */
+#define HCLGEVF_VECTOR0_CMDQ_STAT_REG 0x27104
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
/* RST register bits for RESET event */
@@ -103,6 +105,9 @@
(HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
+#define HCLGEVF_VF_RST_ING 0x07008
+#define HCLGEVF_VF_RST_ING_BIT BIT(16)
+
#define HCLGEVF_RSS_IND_TBL_SIZE 512
#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
#define HCLGEVF_RSS_KEY_SIZE 40
@@ -120,7 +125,7 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
-#define HCLGEVF_STATS_TIMER_INTERVAL (36)
+#define HCLGEVF_STATS_TIMER_INTERVAL 36U
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 9c78251f9c39..0e13d1c7e474 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -136,7 +136,7 @@ static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t dma_addr;
int i, j;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index cca71ba7a74a..13e30eba5349 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1577,20 +1577,16 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
ehea_destroy_eq(pr->eq);
for (i = 0; i < pr->rq1_skba.len; i++)
- if (pr->rq1_skba.arr[i])
- dev_kfree_skb(pr->rq1_skba.arr[i]);
+ dev_kfree_skb(pr->rq1_skba.arr[i]);
for (i = 0; i < pr->rq2_skba.len; i++)
- if (pr->rq2_skba.arr[i])
- dev_kfree_skb(pr->rq2_skba.arr[i]);
+ dev_kfree_skb(pr->rq2_skba.arr[i]);
for (i = 0; i < pr->rq3_skba.len; i++)
- if (pr->rq3_skba.arr[i])
- dev_kfree_skb(pr->rq3_skba.arr[i]);
+ dev_kfree_skb(pr->rq3_skba.arr[i]);
for (i = 0; i < pr->sq_skba.len; i++)
- if (pr->sq_skba.arr[i])
- dev_kfree_skb(pr->sq_skba.arr[i]);
+ dev_kfree_skb(pr->sq_skba.arr[i]);
vfree(pr->rq1_skba.arr);
vfree(pr->rq2_skba.arr);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 395dde444483..9e43c9ace9c2 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1549,7 +1549,7 @@ emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
ctrl);
/* skb fragments */
for (i = 0; i < nr_frags; ++i) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index cebd20f3128d..07efa2b40003 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1485,7 +1485,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
memcpy(dst + cur,
page_address(skb_frag_page(frag)) +
- frag->page_offset, skb_frag_size(frag));
+ skb_frag_off(frag), skb_frag_size(frag));
cur += skb_frag_size(frag);
}
} else {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index a41008523c98..71d3d8854d8f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -937,8 +937,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info[i].dma,
txdr->buffer_info[i].length,
DMA_TO_DEVICE);
- if (txdr->buffer_info[i].skb)
- dev_kfree_skb(txdr->buffer_info[i].skb);
+ dev_kfree_skb(txdr->buffer_info[i].skb);
}
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index f703fa58458e..86493fea56e4 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2889,9 +2889,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- frag = &skb_shinfo(skb)->frags[f];
len = skb_frag_size(frag);
offset = 0;
@@ -4176,8 +4175,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* an error means any chain goes out the window
* too
*/
- if (rx_ring->rx_skb_top)
- dev_kfree_skb(rx_ring->rx_skb_top);
+ dev_kfree_skb(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
goto next_desc;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 08342698386d..de8c5818a305 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1126,8 +1126,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
buffer_info->dma,
buffer_info->length,
DMA_TO_DEVICE);
- if (buffer_info->skb)
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb(buffer_info->skb);
}
}
@@ -1139,8 +1138,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
dma_unmap_single(&pdev->dev,
buffer_info->dma,
2048, DMA_FROM_DEVICE);
- if (buffer_info->skb)
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb(buffer_info->skb);
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 395b05701480..a1fab77b2096 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1429,6 +1429,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
else
phy_reg |= 0xFA;
e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
+
+ if (speed == SPEED_1000) {
+ hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
+ &phy_reg);
+
+ phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
+
+ hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
+ phy_reg);
+ }
}
hw->phy.ops.release(hw);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index eb09c755fa17..1502895eb45d 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -210,7 +210,7 @@
/* PHY Power Management Control */
#define HV_PM_CTRL PHY_REG(770, 17)
-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+#define HV_PM_CTRL_K1_CLK_REQ 0x200
#define HV_PM_CTRL_K1_ENABLE 0x4000
#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e4baa13b3cda..8a3f035c3a5f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5579,9 +5579,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- frag = &skb_shinfo(skb)->frags[f];
len = skb_frag_size(frag);
offset = 0;
@@ -6297,7 +6296,7 @@ fl_out:
static int e1000e_pm_freeze(struct device *dev)
{
- struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
netif_device_detach(netdev);
@@ -6630,7 +6629,7 @@ static int __e1000_resume(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int e1000e_pm_thaw(struct device *dev)
{
- struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
e1000e_set_interrupt_capability(adapter);
@@ -6679,8 +6678,7 @@ static int e1000e_pm_resume(struct device *dev)
static int e1000e_pm_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
u16 eee_lp;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 7d42582ed48d..b14441944b4b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#ifndef _FM10K_H_
#define _FM10K_H_
@@ -177,14 +177,10 @@ static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring)
#define MIN_Q_VECTORS 1
enum fm10k_non_q_vectors {
FM10K_MBX_VECTOR,
-#define NON_Q_VECTORS_VF NON_Q_VECTORS_PF
- NON_Q_VECTORS_PF
+ NON_Q_VECTORS
};
-#define NON_Q_VECTORS(hw) (((hw)->mac.type == fm10k_mac_pf) ? \
- NON_Q_VECTORS_PF : \
- NON_Q_VECTORS_VF)
-#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS(hw))
+#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS)
struct fm10k_q_vector {
struct fm10k_intfc *interface;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
index 20768ac7f17e..c45315472245 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k.h"
@@ -36,7 +36,7 @@ static int fm10k_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
{
u8 num_tc = 0;
- int i, err;
+ int i;
/* verify type and determine num_tcs needed */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
@@ -57,7 +57,7 @@ static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
/* update TC hardware mapping if necessary */
if (num_tc != netdev_get_num_tc(dev)) {
- err = fm10k_setup_tc(dev, num_tc);
+ int err = fm10k_setup_tc(dev, num_tc);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index dca104121c05..1d27b2fb23af 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -160,8 +160,6 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx);
q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc);
- if (!q_vector->dbg_q_vector)
- return;
/* Generate a file for each rx ring in the q_vector */
for (i = 0; i < q_vector->tx.count; i++) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 4895dd83dd08..c681d2d28107 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include <linux/vmalloc.h>
@@ -222,7 +222,6 @@ static void __fm10k_add_ethtool_stats(u64 **data, void *pointer,
const unsigned int size)
{
unsigned int i;
- char *p;
if (!pointer) {
/* memory is not zero allocated so we have to clear it */
@@ -232,7 +231,7 @@ static void __fm10k_add_ethtool_stats(u64 **data, void *pointer,
}
for (i = 0; i < size; i++) {
- p = (char *)pointer + stats[i].stat_offset;
+ char *p = (char *)pointer + stats[i].stat_offset;
switch (stats[i].sizeof_stat) {
case sizeof(u64):
@@ -651,7 +650,6 @@ static int fm10k_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_q_vector *qv;
u16 tx_itr, rx_itr;
int i;
@@ -677,7 +675,8 @@ static int fm10k_set_coalesce(struct net_device *dev,
/* update q_vectors */
for (i = 0; i < interface->num_q_vectors; i++) {
- qv = interface->q_vector[i];
+ struct fm10k_q_vector *qv = interface->q_vector[i];
+
qv->tx.itr = tx_itr;
qv->rx.itr = rx_itr;
}
@@ -1115,13 +1114,12 @@ static void fm10k_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_hw *hw = &interface->hw;
/* report maximum channels */
ch->max_combined = fm10k_max_channels(dev);
/* report info for other vector */
- ch->max_other = NON_Q_VECTORS(hw);
+ ch->max_other = NON_Q_VECTORS;
ch->other_count = ch->max_other;
/* record RSS queues */
@@ -1133,14 +1131,13 @@ static int fm10k_set_channels(struct net_device *dev,
{
struct fm10k_intfc *interface = netdev_priv(dev);
unsigned int count = ch->combined_count;
- struct fm10k_hw *hw = &interface->hw;
/* verify they are not requesting separate vectors */
if (!count || ch->rx_count || ch->tx_count)
return -EINVAL;
/* verify other_count has not changed */
- if (ch->other_count != NON_Q_VECTORS(hw))
+ if (ch->other_count != NON_Q_VECTORS)
return -EINVAL;
/* verify the number of channels does not exceed hardware limits */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 8de77155f2e7..afe1fafd2447 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k.h"
#include "fm10k_vf.h"
@@ -426,7 +426,7 @@ static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
size_t size;
- int i, err;
+ int i;
/* return error if iov_data is already populated */
if (iov_data)
@@ -452,6 +452,7 @@ static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
/* loop through vf_info structures initializing each entry */
for (i = 0; i < num_vfs; i++) {
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
+ int err;
/* Record VF VSI value */
vf_info->vsi = i + 1;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 90270b4a1682..e0a2be534b20 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -17,7 +17,7 @@ const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
- "Copyright(c) 2013 - 2018 Intel Corporation.";
+ "Copyright(c) 2013 - 2019 Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
@@ -315,7 +315,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
/* prefetch first cache line of first page */
prefetch(page_addr);
#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
+ prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES));
#endif
/* allocate a skb to store the frags */
@@ -946,7 +946,7 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct fm10k_tx_buffer *tx_buffer;
struct fm10k_tx_desc *tx_desc;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned char *data;
dma_addr_t dma;
unsigned int data_len, size;
@@ -1074,7 +1074,8 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(
+ &skb_shinfo(skb)->frags[f]));
if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring->tx_stats.tx_busy++;
@@ -1823,7 +1824,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
v_budget = min_t(u16, v_budget, num_online_cpus());
/* account for vectors not related to queues */
- v_budget += NON_Q_VECTORS(hw);
+ v_budget += NON_Q_VECTORS;
/* At the same time, hardware can only support a maximum of
* hw.mac->max_msix_vectors vectors. With features
@@ -1855,7 +1856,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
}
/* record the number of queues available for q_vectors */
- interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw);
+ interface->num_q_vectors = v_budget - NON_Q_VECTORS;
return 0;
}
@@ -1869,7 +1870,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
{
struct net_device *dev = interface->netdev;
- int pc, offset, rss_i, i, q_idx;
+ int pc, offset, rss_i, i;
u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1;
u8 num_pcs = netdev_get_num_tc(dev);
@@ -1879,7 +1880,8 @@ static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
rss_i = interface->ring_feature[RING_F_RSS].indices;
for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) {
- q_idx = pc;
+ int q_idx = pc;
+
for (i = 0; i < rss_i; i++) {
interface->tx_ring[offset + i]->reg_idx = q_idx;
interface->tx_ring[offset + i]->qos_pc = pc;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 21021fe4f1c3..75e51f91036c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_common.h"
@@ -297,13 +297,14 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len)
{
struct fm10k_mbx_fifo *fifo = &mbx->rx;
u16 total_len = 0, msg_len;
- u32 *msg;
/* length should include previous amounts pushed */
len += mbx->pushed;
/* offset in message is based off of current message size */
do {
+ u32 *msg;
+
msg = fifo->buffer + fm10k_fifo_tail_offset(fifo, total_len);
msg_len = FM10K_TLV_DWORD_LEN(*msg);
total_len += msg_len;
@@ -1920,7 +1921,6 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw,
/* reduce length by 1 to convert to a mask */
u16 mbmem_len = mbx->mbmem_len - 1;
u16 tail_len, len = 0;
- u32 *msg;
/* push head behind tail */
if (mbx->tail < head)
@@ -1930,6 +1930,8 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw,
/* determine msg aligned offset for end of buffer */
do {
+ u32 *msg;
+
msg = fifo->buffer + fm10k_fifo_head_offset(fifo, len);
tail_len = len;
len += FM10K_TLV_DWORD_LEN(*msg);
@@ -2132,7 +2134,8 @@ fifo_err:
* DWORDs, not bytes. Any invalid values will cause the mailbox to return
* error.
**/
-s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
+s32 fm10k_sm_mbx_init(struct fm10k_hw __always_unused *hw,
+ struct fm10k_mbx_info *mbx,
const struct fm10k_msg_data *msg_data)
{
mbx->mbx_reg = FM10K_GMBX;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 538a8467f434..09f7a246e134 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k.h"
#include <linux/vmalloc.h>
@@ -54,7 +54,7 @@ err:
**/
static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface)
{
- int i, err = 0;
+ int i, err;
for (i = 0; i < interface->num_tx_queues; i++) {
err = fm10k_setup_tx_resources(interface->tx_ring[i]);
@@ -121,7 +121,7 @@ err:
**/
static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface)
{
- int i, err = 0;
+ int i, err;
for (i = 0; i < interface->num_rx_queues; i++) {
err = fm10k_setup_rx_resources(interface->rx_ring[i]);
@@ -169,7 +169,6 @@ void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring,
**/
static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring)
{
- struct fm10k_tx_buffer *tx_buffer;
unsigned long size;
u16 i;
@@ -179,7 +178,8 @@ static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring)
/* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) {
- tx_buffer = &tx_ring->tx_buffer[i];
+ struct fm10k_tx_buffer *tx_buffer = &tx_ring->tx_buffer[i];
+
fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
}
@@ -253,8 +253,7 @@ static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring)
if (!rx_ring->rx_buffer)
return;
- if (rx_ring->skb)
- dev_kfree_skb(rx_ring->skb);
+ dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
/* Free all the Rx ring sk_buffs */
@@ -871,7 +870,7 @@ static int fm10k_uc_vlan_unsync(struct net_device *netdev,
u16 glort = interface->glort;
u16 vid = interface->vid;
bool set = !!(vid / VLAN_N_VID);
- int err = -EHOSTDOWN;
+ int err;
/* drop any leading bits on the VLAN ID */
vid &= VLAN_N_VID - 1;
@@ -891,7 +890,7 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev,
u16 glort = interface->glort;
u16 vid = interface->vid;
bool set = !!(vid / VLAN_N_VID);
- int err = -EHOSTDOWN;
+ int err;
/* drop any leading bits on the VLAN ID */
vid &= VLAN_N_VID - 1;
@@ -1444,11 +1443,11 @@ static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type,
static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
struct fm10k_l2_accel *l2_accel)
{
- struct fm10k_ring *ring;
int i;
for (i = 0; i < interface->num_rx_queues; i++) {
- ring = interface->rx_ring[i];
+ struct fm10k_ring *ring = interface->rx_ring[i];
+
rcu_assign_pointer(ring->l2_accel, l2_accel);
}
@@ -1463,7 +1462,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
struct fm10k_l2_accel *old_l2_accel = NULL;
struct fm10k_dglort_cfg dglort = { 0 };
struct fm10k_hw *hw = &interface->hw;
- int size = 0, i;
+ int size, i;
u16 vid, glort;
/* The hardware supported by fm10k only filters on the destination MAC
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index e49fb51d3613..bb236fa44048 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -344,7 +344,6 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
struct net_device *netdev = interface->netdev;
u32 __iomem *hw_addr;
u32 value;
- int err;
/* do nothing if netdev is still present or hw_addr is set */
if (netif_device_present(netdev) || interface->hw.hw_addr)
@@ -362,6 +361,8 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
hw_addr = READ_ONCE(interface->uc_addr);
value = readl(hw_addr);
if (~value) {
+ int err;
+
/* Make sure the reset was initiated because we detached,
* otherwise we might race with a different reset flow.
*/
@@ -697,8 +698,6 @@ static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
*/
static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
{
- int i;
-
/* If we're down or resetting, just bail */
if (test_bit(__FM10K_DOWN, interface->state) ||
test_bit(__FM10K_RESETTING, interface->state))
@@ -710,6 +709,8 @@ static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
interface->next_tx_hang_check = jiffies + (2 * HZ);
if (netif_carrier_ok(interface->netdev)) {
+ int i;
+
/* Force detection of hung controller */
for (i = 0; i < interface->num_tx_queues; i++)
set_check_for_tx_hang(interface->tx_ring[i]);
@@ -897,7 +898,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
/* Map interrupt */
if (ring->q_vector) {
- txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
+ txint = ring->q_vector->v_idx + NON_Q_VECTORS;
txint |= FM10K_INT_MAP_TIMER0;
}
@@ -1036,7 +1037,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
/* Map interrupt */
if (ring->q_vector) {
- rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
+ rxint = ring->q_vector->v_idx + NON_Q_VECTORS;
rxint |= FM10K_INT_MAP_TIMER1;
}
@@ -1719,10 +1720,9 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
void fm10k_qv_free_irq(struct fm10k_intfc *interface)
{
int vector = interface->num_q_vectors;
- struct fm10k_hw *hw = &interface->hw;
struct msix_entry *entry;
- entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector];
+ entry = &interface->msix_entries[NON_Q_VECTORS + vector];
while (vector) {
struct fm10k_q_vector *q_vector;
@@ -1759,7 +1759,7 @@ int fm10k_qv_request_irq(struct fm10k_intfc *interface)
unsigned int ri = 0, ti = 0;
int vector, err;
- entry = &interface->msix_entries[NON_Q_VECTORS(hw)];
+ entry = &interface->msix_entries[NON_Q_VECTORS];
for (vector = 0; vector < interface->num_q_vectors; vector++) {
struct fm10k_q_vector *q_vector = interface->q_vector[vector];
@@ -2339,7 +2339,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
/* Restart the MAC/VLAN request queue in-case of outstanding events */
fm10k_macvlan_schedule(interface);
- return err;
+ return 0;
}
/**
@@ -2352,7 +2352,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
**/
static int __maybe_unused fm10k_resume(struct device *dev)
{
- struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
+ struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
int err;
@@ -2379,7 +2379,7 @@ static int __maybe_unused fm10k_resume(struct device *dev)
**/
static int __maybe_unused fm10k_suspend(struct device *dev)
{
- struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
+ struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
netif_device_detach(netdev);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index cb4d02629b86..be07bfdb0bb4 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_pf.h"
#include "fm10k_vf.h"
@@ -1152,7 +1152,7 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
* assumption is that in this case it is acceptable to just directly
* hand off the message from the VF to the underlying shared code.
**/
-s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
+s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 __always_unused **results,
struct fm10k_mbx_info *mbx)
{
struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
@@ -1352,7 +1352,6 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *mbx)
{
struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
- u32 *result;
s32 err = 0;
u32 msg[2];
u8 mode = 0;
@@ -1362,7 +1361,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
return FM10K_ERR_PARAM;
if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) {
- result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
+ u32 *result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
/* XCAST mode update requested */
err = fm10k_tlv_attr_get_u8(result, &mode);
@@ -1566,7 +1565,7 @@ static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
/* read remaining fields */
fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI);
fault->address <<= 32;
- fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO);
+ fault->address |= fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO);
fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO);
/* clear valid bit to allow for next error */
@@ -1642,7 +1641,7 @@ const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
* switch API.
**/
s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
u16 glort, mask;
u32 dglort_map;
@@ -1685,7 +1684,7 @@ const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
* This handler configures the default VLAN for the PF
**/
static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
u16 glort, pvid;
u32 pvid_update;
@@ -1746,7 +1745,7 @@ const struct fm10k_tlv_attr fm10k_err_msg_attr[] = {
* messages that the PF has sent.
**/
s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
struct fm10k_swapi_error err_msg;
s32 err;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index 2a7a40bf2b1c..21eff0895a7a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_tlv.h"
@@ -472,7 +472,7 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
const struct fm10k_tlv_attr *tlv_attr)
{
u32 i, attr_id, offset = 0;
- s32 err = 0;
+ s32 err;
u16 len;
/* verify pointers are not NULL */
@@ -587,8 +587,9 @@ s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg,
* a minimum it just indicates that the message requested was
* unimplemented.
**/
-s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+s32 fm10k_tlv_msg_error(struct fm10k_hw __always_unused *hw,
+ u32 __always_unused **results,
+ struct fm10k_mbx_info __always_unused *mbx)
{
return FM10K_NOT_IMPLEMENTED;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 9fb9fca375e3..15ac1c7885bc 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#ifndef _FM10K_TYPE_H_
#define _FM10K_TYPE_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index a8519c1f0406..dc8ccd378ec9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_vf.h"
@@ -198,7 +198,7 @@ static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
* This function should determine the MAC address for the VF
**/
s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
u8 perm_addr[ETH_ALEN];
u16 vid;
@@ -267,8 +267,10 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw)
* This function is used to add or remove unicast MAC addresses for
* the VF.
**/
-static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
- const u8 *mac, u16 vid, bool add, u8 flags)
+static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort,
+ const u8 *mac, u16 vid, bool add,
+ u8 __always_unused flags)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[7];
@@ -309,7 +311,8 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
* This function is used to add or remove multicast MAC addresses for
* the VF.
**/
-static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort,
+static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort,
const u8 *mac, u16 vid, bool add)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
@@ -373,7 +376,7 @@ const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = {
* are ready to bring up the interface.
**/
s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ?
FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO;
@@ -392,8 +395,9 @@ s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results,
* enabled we can add filters, if it is disabled all filters for this
* logical port are flushed.
**/
-static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort,
- u16 count, bool enable)
+static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort,
+ u16 __always_unused count, bool enable)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[2];
@@ -420,7 +424,8 @@ static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort,
* so that it can enable either multicast, multicast promiscuous, or
* promiscuous mode of operation.
**/
-static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode)
+static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort, u8 mode)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[3];
@@ -475,7 +480,7 @@ static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
* that information to then populate a DGLORTMAP/DEC entry and the queues
* to which it has been assigned.
**/
-static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw,
+static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw __always_unused *hw,
struct fm10k_dglort_cfg *dglort)
{
/* verify the dglort pointer */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 84bd06901014..3e535d3263b3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1021,6 +1021,7 @@ i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
return NULL;
}
void i40e_update_stats(struct i40e_vsi *vsi);
+void i40e_update_veb_stats(struct i40e_veb *veb);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 814acbe79ffd..72c04881d290 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -610,8 +610,10 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
if (hw->aq.api_maj_ver > 1 ||
(hw->aq.api_maj_ver == 1 &&
- hw->aq.api_min_ver >= 8))
+ hw->aq.api_min_ver >= 8)) {
hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
+ hw->flags |= I40E_HW_FLAG_DROP_MODE;
+ }
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 6536023fa074..21cccec328e3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -11,8 +11,8 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0008
-#define I40E_FW_API_VERSION_MINOR_X710 0x0008
+#define I40E_FW_API_VERSION_MINOR_X722 0x0009
+#define I40E_FW_API_VERSION_MINOR_X710 0x0009
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
@@ -2051,20 +2051,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
struct i40e_aq_set_mac_config {
__le16 max_frame_size;
u8 params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80
u8 tx_timer_priority; /* bitmap */
__le16 tx_timer_value;
__le16 fc_refresh_threshold;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 906cf68d3453..46e649c09f72 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -13,7 +13,7 @@
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
**/
-static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+i40e_status i40e_set_mac_type(struct i40e_hw *hw)
{
i40e_status status = 0;
@@ -1577,19 +1577,22 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, abilities,
abilities_size, cmd_details);
- if (status)
- break;
-
- if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
+ switch (hw->aq.asq_last_status) {
+ case I40E_AQ_RC_EIO:
status = I40E_ERR_UNKNOWN_PHY;
break;
- } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
+ case I40E_AQ_RC_EAGAIN:
usleep_range(1000, 2000);
total_delay++;
status = I40E_ERR_TIMEOUT;
+ break;
+ /* also covers I40E_AQ_RC_OK */
+ default:
+ break;
}
- } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
- (total_delay < max_delay));
+
+ } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
+ (total_delay < max_delay));
if (status)
return status;
@@ -1643,25 +1646,15 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
return status;
}
-/**
- * i40e_set_fc
- * @hw: pointer to the hw struct
- * @aq_failures: buffer to return AdminQ failure information
- * @atomic_restart: whether to enable atomic link restart
- *
- * Set the requested flow control mode using set_phy_config.
- **/
-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
- bool atomic_restart)
+static noinline_for_stack enum i40e_status_code
+i40e_set_fc_status(struct i40e_hw *hw,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ bool atomic_restart)
{
- enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
- struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_aq_set_phy_config config;
- enum i40e_status_code status;
+ enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
u8 pause_mask = 0x0;
- *aq_failures = 0x0;
-
switch (fc_mode) {
case I40E_FC_FULL:
pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
@@ -1677,6 +1670,48 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
break;
}
+ memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+ /* clear the old pause settings */
+ config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
+ ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
+ /* set the new abilities */
+ config.abilities |= pause_mask;
+ /* If the abilities have changed, then set the new config */
+ if (config.abilities == abilities->abilities)
+ return 0;
+
+ /* Auto restart link so settings take effect */
+ if (atomic_restart)
+ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ /* Copy over all the old settings */
+ config.phy_type = abilities->phy_type;
+ config.phy_type_ext = abilities->phy_type_ext;
+ config.link_speed = abilities->link_speed;
+ config.eee_capability = abilities->eee_capability;
+ config.eeer = abilities->eeer_val;
+ config.low_power_ctrl = abilities->d3_lpan;
+ config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
+
+ return i40e_aq_set_phy_config(hw, &config, NULL);
+}
+
+/**
+ * i40e_set_fc
+ * @hw: pointer to the hw struct
+ * @aq_failures: buffer to return AdminQ failure information
+ * @atomic_restart: whether to enable atomic link restart
+ *
+ * Set the requested flow control mode using set_phy_config.
+ **/
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_restart)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code status;
+
+ *aq_failures = 0x0;
+
/* Get the current phy config */
status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
NULL);
@@ -1685,31 +1720,10 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
return status;
}
- memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
- /* clear the old pause settings */
- config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
- ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
- /* set the new abilities */
- config.abilities |= pause_mask;
- /* If the abilities have changed, then set the new config */
- if (config.abilities != abilities.abilities) {
- /* Auto restart link so settings take effect */
- if (atomic_restart)
- config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- /* Copy over all the old settings */
- config.phy_type = abilities.phy_type;
- config.phy_type_ext = abilities.phy_type_ext;
- config.link_speed = abilities.link_speed;
- config.eee_capability = abilities.eee_capability;
- config.eeer = abilities.eeer_val;
- config.low_power_ctrl = abilities.d3_lpan;
- config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_PHY_FEC_CONFIG_MASK;
- status = i40e_aq_set_phy_config(hw, &config, NULL);
+ status = i40e_set_fc_status(hw, &abilities, atomic_restart);
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
- if (status)
- *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
- }
/* Update the link info */
status = i40e_update_link_info(hw);
if (status) {
@@ -2537,7 +2551,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
* i40e_updatelink_status - update status of the HW network link
* @hw: pointer to the hw struct
**/
-i40e_status i40e_update_link_info(struct i40e_hw *hw)
+noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
{
struct i40e_aq_get_phy_abilities_resp abilities;
i40e_status status = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 292eeb3def10..200a1cb3b536 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -877,7 +877,23 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
return I40E_NOT_SUPPORTED;
/* Read LLDP NVM area */
- ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) {
+ u8 offset = 0;
+
+ if (hw->mac.type == I40E_MAC_XL710)
+ offset = I40E_LLDP_CURRENT_STATUS_XL710_OFFSET;
+ else if (hw->mac.type == I40E_MAC_X722)
+ offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET;
+ else
+ return I40E_NOT_SUPPORTED;
+
+ ret = i40e_read_nvm_module_data(hw,
+ I40E_SR_EMP_SR_SETTINGS_PTR,
+ offset, 1,
+ &lldp_cfg.adminstatus);
+ } else {
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ }
if (ret)
return I40E_ERR_NOT_READY;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index ddb48ae7cce4..2a80c5daa376 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -30,6 +30,8 @@
#define I40E_CEE_SUBTYPE_APP_PRI 4
#define I40E_CEE_MAX_FEAT_TYPE 3
+#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B
+#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31
/* Defines for LLDP TLV header */
#define I40E_LLDP_TLV_LEN_SHIFT 0
#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 55d20acfcf70..41232898d8ae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1732,29 +1732,15 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
**/
void i40e_dbg_pf_init(struct i40e_pf *pf)
{
- struct dentry *pfile;
const char *name = pci_name(pf->pdev);
- const struct device *dev = &pf->pdev->dev;
pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
- if (!pf->i40e_dbg_pf)
- return;
-
- pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
- &i40e_dbg_command_fops);
- if (!pfile)
- goto create_failed;
- pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
- &i40e_dbg_netdev_ops_fops);
- if (!pfile)
- goto create_failed;
+ debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_command_fops);
- return;
-
-create_failed:
- dev_info(dev, "debugfs dir/file for %s failed\n", name);
- debugfs_remove_recursive(pf->i40e_dbg_pf);
+ debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_netdev_ops_fops);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 527eb52c5401..41e1240acaea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -711,6 +711,35 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
}
/**
+ * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask
+ * @req_fec_info: mask request FEC info
+ * @ks: ethtool ksettings to fill in
+ **/
+static void i40e_get_settings_link_up_fec(u8 req_fec_info,
+ struct ethtool_link_ksettings *ks)
+{
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+
+ if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ } else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ } else {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_NONE);
+ if (I40E_AQ_SET_FEC_AUTO & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ }
+ }
+}
+
+/**
* i40e_get_settings_link_up - Get the Link settings for when link is up
* @hw: hw structure
* @ks: ethtool ksettings to fill in
@@ -769,13 +798,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseSR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseSR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -892,9 +915,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseKR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, supported,
20000baseKR2_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -908,10 +928,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseKR_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
ethtool_link_ksettings_add_link_mode(ks, advertising,
20000baseKR2_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -929,13 +946,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+
break;
case I40E_PHY_TYPE_25GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_ACC:
@@ -945,13 +957,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -2250,7 +2257,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_veb *veb = pf->veb[pf->lan_veb];
+ struct i40e_veb *veb = NULL;
unsigned int i;
bool veb_stats;
u64 *p = data;
@@ -2273,8 +2280,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
goto check_data_pointer;
veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
+ (pf->lan_veb < I40E_MAX_VEB) &&
(pf->flags & I40E_FLAG_VEB_STATS_ENABLED));
+ if (veb_stats) {
+ veb = pf->veb[pf->lan_veb];
+ i40e_update_veb_stats(veb);
+ }
+
/* If veb stats aren't enabled, pass NULL instead of the veb so that
* we initialize stats to zero and update the data pointer
* intelligently
@@ -2329,7 +2342,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
}
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
- return;
+ goto check_data_pointer;
i40e_add_stat_strings(&data, i40e_gstrings_veb_stats);
@@ -2341,6 +2354,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
+check_data_pointer:
WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
"stat strings count mismatch!");
}
@@ -5123,6 +5137,12 @@ static int i40e_get_module_info(struct net_device *netdev,
/* Module is not SFF-8472 compliant */
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else if (!(sff8472_swap & I40E_MODULE_SFF_DDM_IMPLEMENTED)) {
+ /* Module is SFF-8472 compliant but doesn't implement
+ * Digital Diagnostic Monitoring (DDM).
+ */
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 9ebbe3da61bb..fdf43d87e983 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -534,6 +534,10 @@ void i40e_pf_reset_stats(struct i40e_pf *pf)
sizeof(pf->veb[i]->stats));
memset(&pf->veb[i]->stats_offsets, 0,
sizeof(pf->veb[i]->stats_offsets));
+ memset(&pf->veb[i]->tc_stats, 0,
+ sizeof(pf->veb[i]->tc_stats));
+ memset(&pf->veb[i]->tc_stats_offsets, 0,
+ sizeof(pf->veb[i]->tc_stats_offsets));
pf->veb[i]->stat_offsets_loaded = false;
}
}
@@ -677,7 +681,7 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
* i40e_update_veb_stats - Update Switch component statistics
* @veb: the VEB being updated
**/
-static void i40e_update_veb_stats(struct i40e_veb *veb)
+void i40e_update_veb_stats(struct i40e_veb *veb)
{
struct i40e_pf *pf = veb->pf;
struct i40e_hw *hw = &pf->hw;
@@ -2530,6 +2534,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
vsi_name,
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
+ } else {
+ dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
+ vsi->netdev->name,
+ cur_multipromisc ? "entering" : "leaving");
}
}
@@ -3360,7 +3368,7 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
err = i40e_configure_tx_ring(vsi->tx_rings[i]);
- if (!i40e_enabled_xdp_vsi(vsi))
+ if (err || !i40e_enabled_xdp_vsi(vsi))
return err;
for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
@@ -6412,50 +6420,6 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
}
/**
- * i40e_update_dcb_config
- * @hw: pointer to the HW struct
- * @enable_mib_change: enable MIB change event
- *
- * Update DCB configuration from the firmware
- **/
-static enum i40e_status_code
-i40e_update_dcb_config(struct i40e_hw *hw, bool enable_mib_change)
-{
- struct i40e_lldp_variables lldp_cfg;
- i40e_status ret;
-
- if (!hw->func_caps.dcb)
- return I40E_NOT_SUPPORTED;
-
- /* Read LLDP NVM area */
- ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
- if (ret)
- return I40E_ERR_NOT_READY;
-
- /* Get DCBX status */
- ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
- if (ret)
- return ret;
-
- /* Check the DCBX Status */
- if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
- hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
- /* Get current DCBX configuration */
- ret = i40e_get_dcb_config(hw);
- if (ret)
- return ret;
- } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
- return I40E_ERR_NOT_READY;
- }
-
- /* Configure the LLDP MIB change event */
- if (enable_mib_change)
- ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
-
- return ret;
-}
-
-/**
* i40e_init_pf_dcb - Initialize DCB configuration
* @pf: PF being configured
*
@@ -6477,7 +6441,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
goto out;
}
- err = i40e_update_dcb_config(hw, true);
+ err = i40e_init_dcb(hw, true);
if (!err) {
/* Device/Function is not DCBX capable */
if ((!hw->func_caps.dcb) ||
@@ -8486,6 +8450,11 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
dev_dbg(&pf->pdev->dev, "PFR requested\n");
i40e_handle_reset_warning(pf, lock_acquired);
+ dev_info(&pf->pdev->dev,
+ pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
+ "FW LLDP is disabled\n" :
+ "FW LLDP is enabled\n");
+
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
@@ -14569,9 +14538,20 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
**/
static bool i40e_check_recovery_mode(struct i40e_pf *pf)
{
- u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
-
- if (val & I40E_GL_FWSTS_FWS1B_MASK) {
+ u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
+ bool is_recovery_mode = false;
+
+ if (pf->hw.mac.type == I40E_MAC_XL710)
+ is_recovery_mode =
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK ||
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK;
+ if (pf->hw.mac.type == I40E_MAC_X722)
+ is_recovery_mode =
+ val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
+ val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK;
+ if (is_recovery_mode) {
dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
set_bit(__I40E_RECOVERY_MODE, pf->state);
@@ -14585,6 +14565,51 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf)
}
/**
+ * i40e_pf_loop_reset - perform reset in a loop.
+ * @pf: board private structure
+ *
+ * This function is useful when a NIC is about to enter recovery mode.
+ * When a NIC's internal data structures are corrupted the NIC's
+ * firmware is going to enter recovery mode.
+ * Right after a POR it takes about 7 minutes for firmware to enter
+ * recovery mode. Until that time a NIC is in some kind of intermediate
+ * state. After that time period the NIC almost surely enters
+ * recovery mode. The only way for a driver to detect intermediate
+ * state is to issue a series of pf-resets and check a return value.
+ * If a PF reset returns success then the firmware could be in recovery
+ * mode so the caller of this code needs to check for recovery mode
+ * if this function returns success. There is a little chance that
+ * firmware will hang in intermediate state forever.
+ * Since waiting 7 minutes is quite a lot of time this function waits
+ * 10 seconds and then gives up by returning an error.
+ *
+ * Return 0 on success, negative on failure.
+ **/
+static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
+{
+ const unsigned short MAX_CNT = 1000;
+ const unsigned short MSECS = 10;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ int cnt;
+
+ for (cnt = 0; cnt < MAX_CNT; ++cnt) {
+ ret = i40e_pf_reset(hw);
+ if (!ret)
+ break;
+ msleep(MSECS);
+ }
+
+ if (cnt == MAX_CNT) {
+ dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
+ return ret;
+ }
+
+ pf->pfr_count++;
+ return ret;
+}
+
+/**
* i40e_init_recovery_mode - initialize subsystems needed in recovery mode
* @pf: board private structure
* @hw: ptr to the hardware info
@@ -14812,14 +14837,22 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Reset here to make sure all is clean and to define PF 'n' */
i40e_clear_hw(hw);
- if (!i40e_check_recovery_mode(pf)) {
- err = i40e_pf_reset(hw);
- if (err) {
- dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
- goto err_pf_reset;
- }
- pf->pfr_count++;
+
+ err = i40e_set_mac_type(hw);
+ if (err) {
+ dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
+ err);
+ goto err_pf_reset;
+ }
+
+ err = i40e_pf_loop_reset(pf);
+ if (err) {
+ dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+ goto err_pf_reset;
}
+
+ i40e_check_recovery_mode(pf);
+
hw->aq.num_arq_entries = I40E_AQ_LEN;
hw->aq.num_asq_entries = I40E_AQ_LEN;
hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
@@ -15605,8 +15638,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
**/
static int __maybe_unused i40e_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_pf *pf = dev_get_drvdata(dev);
struct i40e_hw *hw = &pf->hw;
/* If we're already suspended, then there is nothing to do */
@@ -15656,8 +15688,7 @@ static int __maybe_unused i40e_suspend(struct device *dev)
**/
static int __maybe_unused i40e_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_pf *pf = dev_get_drvdata(dev);
int err;
/* If we're not suspended, then there is nothing to do */
@@ -15674,7 +15705,7 @@ static int __maybe_unused i40e_resume(struct device *dev)
*/
err = i40e_restore_interrupt_scheme(pf);
if (err) {
- dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
+ dev_err(dev, "Cannot restore interrupt scheme: %d\n",
err);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index c508b75c3c09..e4d8d20baf3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -322,6 +322,77 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
}
/**
+ * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
+ * @hw: pointer to the HW structure
+ * @module_ptr: Pointer to module in words with respect to NVM beginning
+ * @offset: offset in words from module start
+ * @words_data_size: Words to read from NVM
+ * @data_ptr: Pointer to memory location where resulting buffer will be stored
+ **/
+i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr, u16 offset,
+ u16 words_data_size,
+ u16 *data_ptr)
+{
+ i40e_status status;
+ u16 ptr_value = 0;
+ u32 flat_offset;
+
+ if (module_ptr != 0) {
+ status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm word failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+ }
+#define I40E_NVM_INVALID_PTR_VAL 0x7FFF
+#define I40E_NVM_INVALID_VAL 0xFFFF
+
+ /* Pointer not initialized */
+ if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
+ ptr_value == I40E_NVM_INVALID_VAL)
+ return I40E_ERR_BAD_PTR;
+
+ /* Check whether the module is in SR mapped area or outside */
+ if (ptr_value & I40E_PTR_TYPE) {
+ /* Pointer points outside of the Shared RAM mapped area */
+ ptr_value &= ~I40E_PTR_TYPE;
+
+ /* PtrValue in 4kB units, need to convert to words */
+ ptr_value /= 2;
+ flat_offset = ((u32)ptr_value * 0x1000) + (u32)offset;
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!status) {
+ status = i40e_aq_read_nvm(hw, 0, 2 * flat_offset,
+ 2 * words_data_size,
+ data_ptr, true, NULL);
+ i40e_release_nvm(hw);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm aq failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+ } else {
+ return I40E_ERR_NVM;
+ }
+ } else {
+ /* Read from the Shadow RAM */
+ status = i40e_read_nvm_buffer(hw, ptr_value + offset,
+ &words_data_size, data_ptr);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm buffer failed.Error code: %d.\n",
+ status);
+ }
+ }
+
+ return status;
+}
+
+/**
* i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -430,6 +501,36 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
}
/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+ data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ }
+
+ return ret_code;
+}
+
+/**
* i40e_write_nvm_aq - Writes Shadow RAM.
* @hw: pointer to the HW structure.
* @module_pointer: module pointer location in words from the NVM beginning
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index eac88bcc6c06..5250441bf75b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -315,6 +315,12 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
void i40e_release_nvm(struct i40e_hw *hw);
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
+i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr, u16 offset,
+ u16 words_data_size,
+ u16 *data_ptr);
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum);
@@ -326,6 +332,8 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 52e3680c57f8..d35d690ca10f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -58,7 +58,7 @@
#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
#define I40E_PF_ARQT_ARQT_SHIFT 0
#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
@@ -81,7 +81,7 @@
#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
#define I40E_PF_ATQT_ATQT_SHIFT 0
#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
@@ -108,7 +108,7 @@
#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ARQT_MAX_INDEX 127
#define I40E_VF_ARQT_ARQT_SHIFT 0
@@ -136,7 +136,7 @@
#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ATQT_MAX_INDEX 127
#define I40E_VF_ATQT_ATQT_SHIFT 0
@@ -259,7 +259,7 @@
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
@@ -363,6 +363,12 @@
#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
#define I40E_GL_FWSTS_FWS1B_SHIFT 16
#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT)
#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
@@ -503,7 +509,7 @@
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MSRWD_MAX_INDEX 3
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
@@ -1242,14 +1248,14 @@
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
#define I40E_QRX_ENA_MAX_INDEX 1535
#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
@@ -1658,7 +1664,7 @@
#define I40E_GLNVM_SRCTL_START_SHIFT 30
#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
@@ -3025,7 +3031,7 @@
#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VP_MDET_RX_MAX_INDEX 127
#define I40E_VP_MDET_RX_VALID_SHIFT 0
@@ -3161,7 +3167,7 @@
#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
#define I40E_VF_ARQT1_ARQT_SHIFT 0
#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
@@ -3184,7 +3190,7 @@
#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
#define I40E_VF_ATQT1_ATQT_SHIFT 0
#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2a2fe3ec7926..e3f29dc8b290 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3262,7 +3262,7 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
**/
bool __i40e_chk_linearize(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag, *stale;
+ const skb_frag_t *frag, *stale;
int nr_frags, sum;
/* no need to check if number of frags is less than 7 */
@@ -3306,7 +3306,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
* descriptor associated with the fragment.
*/
if (stale_size > I40E_MAX_DATA_PER_TXD) {
- int align_pad = -(stale->page_offset) &
+ int align_pad = -(skb_frag_off(stale)) &
(I40E_MAX_READ_REQ_SIZE - 1);
sum -= align_pad;
@@ -3349,7 +3349,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc;
u16 i = tx_ring->next_to_use;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 100e92d2982f..36d37f31a287 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -521,7 +521,7 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
**/
static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
int count = 0, size = skb_headlen(skb);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 8f43aa47c263..b43ec94a0f29 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -443,6 +443,7 @@ struct i40e_nvm_access {
#define I40E_MODULE_SFF_8472_COMP 0x5E
#define I40E_MODULE_SFF_8472_SWAP 0x5C
#define I40E_MODULE_SFF_ADDR_MODE 0x04
+#define I40E_MODULE_SFF_DDM_IMPLEMENTED 0x40
#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D
#define I40E_MODULE_TYPE_QSFP28 0x11
#define I40E_MODULE_QSFP_MAX_LEN 640
@@ -623,6 +624,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
+#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
u64 flags;
/* Used in set switch config AQ command */
@@ -1316,6 +1318,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_VPD_PTR 0x2F
#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 02b09a8ad54c..f8aa4deceb5e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -55,7 +55,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
- if (vf->link_forced) {
+
+ /* Always report link is down if the VF queues aren't enabled */
+ if (!vf->queues_enabled) {
+ pfe.event_data.link_event.link_status = false;
+ pfe.event_data.link_event.link_speed = 0;
+ } else if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
(vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
@@ -65,6 +70,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
pfe.event_data.link_event.link_speed =
i40e_virtchnl_link_speed(ls->link_speed);
}
+
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
}
@@ -2037,30 +2043,33 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
alluni = true;
aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
alluni);
- if (!aq_ret) {
- if (allmulti) {
+ if (aq_ret)
+ goto err_out;
+
+ if (allmulti) {
+ if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
+ &vf->vf_states))
dev_info(&pf->pdev->dev,
"VF %d successfully set multicast promiscuous mode\n",
vf->vf_id);
- set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
- } else {
- dev_info(&pf->pdev->dev,
- "VF %d successfully unset multicast promiscuous mode\n",
- vf->vf_id);
- clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
- }
- if (alluni) {
+ } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
+ &vf->vf_states))
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully unset multicast promiscuous mode\n",
+ vf->vf_id);
+
+ if (alluni) {
+ if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
+ &vf->vf_states))
dev_info(&pf->pdev->dev,
"VF %d successfully set unicast promiscuous mode\n",
vf->vf_id);
- set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
- } else {
- dev_info(&pf->pdev->dev,
- "VF %d successfully unset unicast promiscuous mode\n",
- vf->vf_id);
- clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
- }
- }
+ } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
+ &vf->vf_states))
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully unset unicast promiscuous mode\n",
+ vf->vf_id);
+
err_out:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
@@ -2153,7 +2162,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
* VF does not know about these additional VSIs and all
* it cares is about its own queues. PF configures these queues
* to its appropriate VSIs based on TC mapping
- **/
+ */
if (vf->adq_enabled) {
if (idx >= ARRAY_SIZE(vf->ch)) {
aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
@@ -2364,6 +2373,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
}
}
+ vf->queues_enabled = true;
+
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
@@ -2385,6 +2396,9 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
+ /* Immediately mark queues as disabled */
+ vf->queues_enabled = false;
+
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -3953,10 +3967,15 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* When the VF is resetting wait until it is done.
* It can take up to 200 milliseconds,
* but wait for up to 300 milliseconds to be safe.
+ * If the VF is indeed in reset, the vsi pointer has
+ * to show on the newly loaded vsi under pf->vsi[id].
*/
for (i = 0; i < 15; i++) {
- if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ if (i > 0)
+ vsi = pf->vsi[vf->lan_vsi_idx];
break;
+ }
msleep(20);
}
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index f65cc0c16550..7164b9bb294f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -99,6 +99,7 @@ struct i40e_vf {
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if VF link is forced */
+ bool queues_enabled; /* true if the VF queues are enabled */
bool spoofchk;
u16 num_mac;
u16 num_vlan;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 0cca1b589b56..7a30d5d5ef53 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -2161,7 +2161,7 @@ static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
**/
bool __iavf_chk_linearize(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag, *stale;
+ const skb_frag_t *frag, *stale;
int nr_frags, sum;
/* no need to check if number of frags is less than 7 */
@@ -2205,7 +2205,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb)
* descriptor associated with the fragment.
*/
if (stale_size > IAVF_MAX_DATA_PER_TXD) {
- int align_pad = -(stale->page_offset) &
+ int align_pad = -(skb_frag_off(stale)) &
(IAVF_MAX_READ_REQ_SIZE - 1);
sum -= align_pad;
@@ -2269,7 +2269,7 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct iavf_tx_buffer *tx_bi;
struct iavf_tx_desc *tx_desc;
u16 i = tx_ring->next_to_use;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index 71e7d090f8db..dd3348f9da9d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -462,7 +462,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb);
**/
static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
int count = 0, size = skb_headlen(skb);
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 9ee6b55553c0..97d0f61cf52b 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -69,7 +69,8 @@ extern const char ice_drv_ver[];
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
#define ICE_AQ_LEN 64
-#define ICE_MBXQ_LEN 64
+#define ICE_MBXSQ_LEN 64
+#define ICE_MBXRQ_LEN 512
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_MAX_TXQS 2048
@@ -86,16 +87,6 @@ extern const char ice_drv_ver[];
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff
#define ICE_INVAL_VFID 256
-#define ICE_MAX_VF_COUNT 256
-#define ICE_MAX_QS_PER_VF 256
-#define ICE_MIN_QS_PER_VF 1
-#define ICE_DFLT_QS_PER_VF 4
-#define ICE_NONQ_VECS_VF 1
-#define ICE_MAX_SCATTER_QS_PER_VF 16
-#define ICE_MAX_BASE_QS_PER_VF 16
-#define ICE_MAX_INTR_PER_VF 65
-#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
-#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_RESET_WAIT 20
@@ -220,6 +211,7 @@ enum ice_state {
__ICE_CFG_BUSY,
__ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS,
+ __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
__ICE_STATE_NBITS /* must be last */
};
@@ -329,7 +321,6 @@ struct ice_q_vector {
} ____cacheline_internodealigned_in_smp;
enum ice_pf_flags {
- ICE_FLAG_MSIX_ENA,
ICE_FLAG_FLTR_SYNC,
ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA,
@@ -337,7 +328,8 @@ enum ice_pf_flags {
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
- ICE_FLAG_ENABLE_FW_LLDP,
+ ICE_FLAG_NO_MEDIA,
+ ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -455,6 +447,8 @@ ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type)
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
+void ice_update_vsi_stats(struct ice_vsi *vsi);
+void ice_update_pf_stats(struct ice_pf *pf);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 765e3c2ed045..bf9aa533a7c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1610,6 +1610,7 @@ enum ice_aq_err {
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
+ ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
};
/* Admin Queue command opcodes */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 2e0731c1e1a3..4b43e6de847b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -740,7 +740,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
ice_get_itr_intrl_gran(hw);
- status = ice_init_all_ctrlq(hw);
+ status = ice_create_all_ctrlq(hw);
if (status)
goto err_unroll_cqinit;
@@ -855,7 +855,7 @@ err_unroll_sched:
err_unroll_alloc:
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
err_unroll_cqinit:
- ice_shutdown_all_ctrlq(hw);
+ ice_destroy_all_ctrlq(hw);
return status;
}
@@ -881,7 +881,7 @@ void ice_deinit_hw(struct ice_hw *hw)
/* Attempt to disable FW logging before shutting down control queues */
ice_cfg_fw_log(hw, false);
- ice_shutdown_all_ctrlq(hw);
+ ice_destroy_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */
ice_clear_all_vsi_ctx(hw);
@@ -1078,6 +1078,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
+ ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
{ 0 }
};
@@ -1088,7 +1089,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* @rxq_index: the index of the Rx queue
*
* Converts rxq context from sparse to dense structure and then writes
- * it to HW register space
+ * it to HW register space and enables the hardware to prefetch descriptors
+ * instead of only fetching them on demand
*/
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
@@ -1096,6 +1098,11 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+ if (!rlan_ctx)
+ return ICE_ERR_BAD_PTR;
+
+ rlan_ctx->prefena = 1;
+
ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
}
@@ -2024,7 +2031,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
if (!pcaps)
return ICE_ERR_NO_MEMORY;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
pcaps, NULL);
if (!status)
memcpy(li->module_type, &pcaps->module_type,
@@ -2174,27 +2181,24 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
{
switch (fec) {
case ICE_FEC_BASER:
- /* Clear auto FEC and RS bits, and AND BASE-R ability
+ /* Clear RS bits, and AND BASE-R ability
* bits and OR request bits.
*/
- cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
ICE_AQC_PHY_FEC_25G_KR_REQ;
break;
case ICE_FEC_RS:
- /* Clear auto FEC and BASE-R bits, and AND RS ability
+ /* Clear BASE-R bits, and AND RS ability
* bits and OR request bits.
*/
- cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
ICE_AQC_PHY_FEC_25G_RS_544_REQ;
break;
case ICE_FEC_NONE:
- /* Clear auto FEC and all FEC option bits. */
- cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
+ /* Clear all FEC option bits. */
cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
break;
case ICE_FEC_AUTO:
@@ -3240,40 +3244,44 @@ void ice_replay_post(struct ice_hw *hw)
/**
* ice_stat_update40 - read 40 bit stat from the chip and update stat values
* @hw: ptr to the hardware info
- * @hireg: high 32 bit HW register to read from
- * @loreg: low 32 bit HW register to read from
+ * @reg: offset of 64 bit HW register to read from
* @prev_stat_loaded: bool to specify if previous stats are loaded
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
void
-ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
+ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat)
{
- u64 new_data;
-
- new_data = rd32(hw, loreg);
- new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+ u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
/* device stats are not reset at PFR, they likely will not be zeroed
- * when the driver starts. So save the first values read and use them as
- * offsets to be subtracted from the raw values in order to report stats
- * that count from zero.
+ * when the driver starts. Thus, save the value from the first read
+ * without adding to the statistic value so that we report stats which
+ * count up from zero.
*/
- if (!prev_stat_loaded)
+ if (!prev_stat_loaded) {
*prev_stat = new_data;
+ return;
+ }
+
+ /* Calculate the difference between the new and old values, and then
+ * add it to the software stat value.
+ */
if (new_data >= *prev_stat)
- *cur_stat = new_data - *prev_stat;
+ *cur_stat += new_data - *prev_stat;
else
/* to manage the potential roll-over */
- *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
- *cur_stat &= 0xFFFFFFFFFFULL;
+ *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
+
+ /* Update the previously stored value to prepare for next read */
+ *prev_stat = new_data;
}
/**
* ice_stat_update32 - read 32 bit stat from the chip and update stat values
* @hw: ptr to the hardware info
- * @reg: HW register to read from
+ * @reg: offset of HW register to read from
* @prev_stat_loaded: bool to specify if previous stats are loaded
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
@@ -3287,17 +3295,26 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
new_data = rd32(hw, reg);
/* device stats are not reset at PFR, they likely will not be zeroed
- * when the driver starts. So save the first values read and use them as
- * offsets to be subtracted from the raw values in order to report stats
- * that count from zero.
+ * when the driver starts. Thus, save the value from the first read
+ * without adding to the statistic value so that we report stats which
+ * count up from zero.
*/
- if (!prev_stat_loaded)
+ if (!prev_stat_loaded) {
*prev_stat = new_data;
+ return;
+ }
+
+ /* Calculate the difference between the new and old values, and then
+ * add it to the software stat value.
+ */
if (new_data >= *prev_stat)
- *cur_stat = new_data - *prev_stat;
+ *cur_stat += new_data - *prev_stat;
else
/* to manage the potential roll-over */
- *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
+ *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
+
+ /* Update the previously stored value to prepare for next read */
+ *prev_stat = new_data;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index d1f8353fe6bb..e376d1eadba4 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -17,8 +17,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
+enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw);
+void ice_destroy_all_ctrlq(struct ice_hw *hw);
enum ice_status
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending);
@@ -123,8 +125,8 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
void
-ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
+ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat);
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index e91ac4df0242..2353166c654e 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -310,7 +310,7 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @cq: pointer to the specific Control queue
*
* This is the main initialization routine for the Control Send Queue
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_sq_entries
* - cq->sq_buf_size
@@ -369,7 +369,7 @@ init_ctrlq_exit:
* @cq: pointer to the specific Control queue
*
* The main initialization routine for the Admin Receive (Event) Queue.
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_rq_entries
* - cq->rq_buf_size
@@ -569,14 +569,8 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
return 0;
init_ctrlq_free_rq:
- if (cq->rq.count) {
- ice_shutdown_rq(hw, cq);
- mutex_destroy(&cq->rq_lock);
- }
- if (cq->sq.count) {
- ice_shutdown_sq(hw, cq);
- mutex_destroy(&cq->sq_lock);
- }
+ ice_shutdown_rq(hw, cq);
+ ice_shutdown_sq(hw, cq);
return status;
}
@@ -585,12 +579,14 @@ init_ctrlq_free_rq:
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
*
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_sq_entries
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
+ *
+ * NOTE: this function does not initialize the controlq locks
*/
static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
@@ -616,8 +612,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
!cq->rq_buf_size || !cq->sq_buf_size) {
return ICE_ERR_CFG;
}
- mutex_init(&cq->sq_lock);
- mutex_init(&cq->rq_lock);
/* setup SQ command write back timeout */
cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
@@ -625,7 +619,7 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
/* allocate the ATQ */
ret_code = ice_init_sq(hw, cq);
if (ret_code)
- goto init_ctrlq_destroy_locks;
+ return ret_code;
/* allocate the ARQ */
ret_code = ice_init_rq(hw, cq);
@@ -637,9 +631,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
init_ctrlq_free_sq:
ice_shutdown_sq(hw, cq);
-init_ctrlq_destroy_locks:
- mutex_destroy(&cq->sq_lock);
- mutex_destroy(&cq->rq_lock);
return ret_code;
}
@@ -647,12 +638,14 @@ init_ctrlq_destroy_locks:
* ice_init_all_ctrlq - main initialization routine for all control queues
* @hw: pointer to the hardware structure
*
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver MUST* set the following fields
* in the cq->structure for all control queues:
* - cq->num_sq_entries
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
+ *
+ * NOTE: this function does not initialize the controlq locks.
*/
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
{
@@ -672,9 +665,47 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
}
/**
+ * ice_init_ctrlq_locks - Initialize locks for a control queue
+ * @cq: pointer to the control queue
+ *
+ * Initializes the send and receive queue locks for a given control queue.
+ */
+static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
+{
+ mutex_init(&cq->sq_lock);
+ mutex_init(&cq->rq_lock);
+}
+
+/**
+ * ice_create_all_ctrlq - main initialization routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, the driver *MUST* set the following fields
+ * in the cq->structure for all control queues:
+ * - cq->num_sq_entries
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ * - cq->sq_buf_size
+ *
+ * This function creates all the control queue locks and then calls
+ * ice_init_all_ctrlq. It should be called once during driver load. If the
+ * driver needs to re-initialize control queues at run time it should call
+ * ice_init_all_ctrlq instead.
+ */
+enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
+{
+ ice_init_ctrlq_locks(&hw->adminq);
+ ice_init_ctrlq_locks(&hw->mailboxq);
+
+ return ice_init_all_ctrlq(hw);
+}
+
+/**
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
+ *
+ * NOTE: this function does not destroy the control queue locks.
*/
static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
@@ -693,19 +724,17 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
return;
}
- if (cq->sq.count) {
- ice_shutdown_sq(hw, cq);
- mutex_destroy(&cq->sq_lock);
- }
- if (cq->rq.count) {
- ice_shutdown_rq(hw, cq);
- mutex_destroy(&cq->rq_lock);
- }
+ ice_shutdown_sq(hw, cq);
+ ice_shutdown_rq(hw, cq);
}
/**
* ice_shutdown_all_ctrlq - shutdown routine for all control queues
* @hw: pointer to the hardware structure
+ *
+ * NOTE: this function does not destroy the control queue locks. The driver
+ * may call this at runtime to shutdown and later restart control queues, such
+ * as in response to a reset event.
*/
void ice_shutdown_all_ctrlq(struct ice_hw *hw)
{
@@ -716,6 +745,37 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
}
/**
+ * ice_destroy_ctrlq_locks - Destroy locks for a control queue
+ * @cq: pointer to the control queue
+ *
+ * Destroys the send and receive queue locks for a given control queue.
+ */
+static void
+ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
+{
+ mutex_destroy(&cq->sq_lock);
+ mutex_destroy(&cq->rq_lock);
+}
+
+/**
+ * ice_destroy_all_ctrlq - exit routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * This function shuts down all the control queues and then destroys the
+ * control queue locks. It should be called once during driver unload. The
+ * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
+ * reinitialize control queues, such as in response to a reset event.
+ */
+void ice_destroy_all_ctrlq(struct ice_hw *hw)
+{
+ /* shut down all the control queues first */
+ ice_shutdown_all_ctrlq(hw);
+
+ ice_destroy_ctrlq_locks(&hw->adminq);
+ ice_destroy_ctrlq_locks(&hw->mailboxq);
+}
+
+/**
* ice_clean_sq - cleans Admin send queue (ATQ)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index c2002ded65f6..d60c942249e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -954,7 +954,8 @@ enum ice_status ice_init_dcb(struct ice_hw *hw)
pi->dcbx_status = ice_get_dcbx_status(hw);
if (pi->dcbx_status == ICE_DCBX_STATUS_DONE ||
- pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS) {
+ pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
+ pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
/* Get current DCBX configuration */
ret = ice_get_dcb_cfg(pi);
pi->is_sw_lldp = (hw->adminq.sq_last_status == ICE_AQ_RC_EPERM);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index fe88b127ca42..734cef8eed9e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -204,15 +204,86 @@ out:
}
/**
+ * ice_cfg_etsrec_defaults - Set default ETS recommended DCB config
+ * @pi: port information structure
+ */
+static void ice_cfg_etsrec_defaults(struct ice_port_info *pi)
+{
+ struct ice_dcbx_cfg *dcbcfg = &pi->local_dcbx_cfg;
+ u8 i;
+
+ /* Ensure ETS recommended DCB configuration is not already set */
+ if (dcbcfg->etsrec.maxtcs)
+ return;
+
+ /* In CEE mode, set the default to 1 TC */
+ dcbcfg->etsrec.maxtcs = 1;
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ dcbcfg->etsrec.tcbwtable[i] = i ? 0 : 100;
+ dcbcfg->etsrec.tsatable[i] = i ? ICE_IEEE_TSA_STRICT :
+ ICE_IEEE_TSA_ETS;
+ }
+}
+
+/**
+ * ice_dcb_need_recfg - Check if DCB needs reconfig
+ * @pf: board private structure
+ * @old_cfg: current DCB config
+ * @new_cfg: new DCB config
+ */
+static bool
+ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
+ struct ice_dcbx_cfg *new_cfg)
+{
+ bool need_reconfig = false;
+
+ /* Check if ETS configuration has changed */
+ if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg,
+ sizeof(new_cfg->etscfg))) {
+ /* If Priority Table has changed reconfig is needed */
+ if (memcmp(&new_cfg->etscfg.prio_table,
+ &old_cfg->etscfg.prio_table,
+ sizeof(new_cfg->etscfg.prio_table))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
+ }
+
+ if (memcmp(&new_cfg->etscfg.tcbwtable,
+ &old_cfg->etscfg.tcbwtable,
+ sizeof(new_cfg->etscfg.tcbwtable)))
+ dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+
+ if (memcmp(&new_cfg->etscfg.tsatable,
+ &old_cfg->etscfg.tsatable,
+ sizeof(new_cfg->etscfg.tsatable)))
+ dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
+ }
+
+ /* Check if PFC configuration has changed */
+ if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
+ }
+
+ /* Check if APP Table has changed */
+ if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
+ }
+
+ dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
+ return need_reconfig;
+}
+
+/**
* ice_dcb_rebuild - rebuild DCB post reset
* @pf: physical function instance
*/
void ice_dcb_rebuild(struct ice_pf *pf)
{
+ struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
- struct ice_dcbx_cfg *prev_cfg;
enum ice_status ret;
- u8 willing;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
@@ -224,9 +295,15 @@ void ice_dcb_rebuild(struct ice_pf *pf)
if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return;
+ local_dcbx_cfg = &pf->hw.port_info->local_dcbx_cfg;
+ desired_dcbx_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
/* Save current willing state and force FW to unwilling */
- willing = pf->hw.port_info->local_dcbx_cfg.etscfg.willing;
- pf->hw.port_info->local_dcbx_cfg.etscfg.willing = 0x0;
+ local_dcbx_cfg->etscfg.willing = 0x0;
+ local_dcbx_cfg->pfc.willing = 0x0;
+ local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING;
+
+ ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n");
@@ -234,8 +311,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
}
/* Retrieve DCB config and ensure same as current in SW */
- prev_cfg = devm_kmemdup(&pf->pdev->dev,
- &pf->hw.port_info->local_dcbx_cfg,
+ prev_cfg = devm_kmemdup(&pf->pdev->dev, local_dcbx_cfg,
sizeof(*prev_cfg), GFP_KERNEL);
if (!prev_cfg) {
dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n");
@@ -243,22 +319,29 @@ void ice_dcb_rebuild(struct ice_pf *pf)
}
ice_init_dcb(&pf->hw);
- if (memcmp(prev_cfg, &pf->hw.port_info->local_dcbx_cfg,
- sizeof(*prev_cfg))) {
+ if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
+ pf->hw.port_info->is_sw_lldp = true;
+ else
+ pf->hw.port_info->is_sw_lldp = false;
+
+ if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) {
/* difference in cfg detected - disable DCB till next MIB */
dev_err(&pf->pdev->dev, "Set local MIB not accurate\n");
- devm_kfree(&pf->pdev->dev, prev_cfg);
goto dcb_error;
}
/* fetched config congruent to previous configuration */
devm_kfree(&pf->pdev->dev, prev_cfg);
- /* Configuration replayed - reset willing state to previous */
- pf->hw.port_info->local_dcbx_cfg.etscfg.willing = willing;
+ /* Set the local desired config */
+ if (local_dcbx_cfg->dcbx_mode == ICE_DCBX_MODE_CEE)
+ memcpy(local_dcbx_cfg, desired_dcbx_cfg,
+ sizeof(*local_dcbx_cfg));
+
+ ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
- dev_err(&pf->pdev->dev, "Fail restoring prev willing state\n");
+ dev_err(&pf->pdev->dev, "Failed to set desired config\n");
goto dcb_error;
}
dev_info(&pf->pdev->dev, "DCB restored after reset\n");
@@ -364,35 +447,17 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
struct device *dev = &pf->pdev->dev;
struct ice_port_info *port_info;
struct ice_hw *hw = &pf->hw;
- int sw_default = 0;
int err;
port_info = hw->port_info;
err = ice_init_dcb(hw);
if (err) {
- /* FW LLDP is not active, default to SW DCBX/LLDP */
- dev_info(&pf->pdev->dev, "FW LLDP is not active\n");
- hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
- hw->port_info->is_sw_lldp = true;
- }
-
- if (port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
- dev_info(&pf->pdev->dev, "DCBX disabled\n");
-
- /* LLDP disabled in FW */
- if (port_info->is_sw_lldp) {
- sw_default = 1;
- dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n");
- clear_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags);
- } else {
- set_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags);
- }
-
- if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED)
- dev_info(&pf->pdev->dev, "DCBX not started\n");
-
- if (sw_default) {
+ /* FW LLDP is disabled, activate SW DCBX/LLDP mode */
+ dev_info(&pf->pdev->dev,
+ "FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
+ port_info->is_sw_lldp = true;
+ clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
err = ice_dcb_sw_dflt_cfg(pf, locked);
if (err) {
dev_err(&pf->pdev->dev,
@@ -407,6 +472,9 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
return 0;
}
+ port_info->is_sw_lldp = false;
+ set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
+
/* DCBX in FW and LLDP enabled in FW */
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
@@ -502,55 +570,6 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
}
/**
- * ice_dcb_need_recfg - Check if DCB needs reconfig
- * @pf: board private structure
- * @old_cfg: current DCB config
- * @new_cfg: new DCB config
- */
-static bool ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
- struct ice_dcbx_cfg *new_cfg)
-{
- bool need_reconfig = false;
-
- /* Check if ETS configuration has changed */
- if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg,
- sizeof(new_cfg->etscfg))) {
- /* If Priority Table has changed reconfig is needed */
- if (memcmp(&new_cfg->etscfg.prio_table,
- &old_cfg->etscfg.prio_table,
- sizeof(new_cfg->etscfg.prio_table))) {
- need_reconfig = true;
- dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
- }
-
- if (memcmp(&new_cfg->etscfg.tcbwtable,
- &old_cfg->etscfg.tcbwtable,
- sizeof(new_cfg->etscfg.tcbwtable)))
- dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
-
- if (memcmp(&new_cfg->etscfg.tsatable,
- &old_cfg->etscfg.tsatable,
- sizeof(new_cfg->etscfg.tsatable)))
- dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
- }
-
- /* Check if PFC configuration has changed */
- if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
- need_reconfig = true;
- dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
- }
-
- /* Check if APP Table has changed */
- if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
- need_reconfig = true;
- dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
- }
-
- dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
- return need_reconfig;
-}
-
-/**
* ice_dcb_process_lldp_set_mib_change - Process MIB change
* @pf: ptr to ice_pf
* @event: pointer to the admin queue receive event
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 52083a63dee6..f7dd0bd03d39 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -155,7 +155,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
- ICE_PRIV_FLAG("enable-fw-lldp", ICE_FLAG_ENABLE_FW_LLDP),
+ ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -1201,8 +1201,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
- if (test_bit(ICE_FLAG_ENABLE_FW_LLDP, change_flags)) {
- if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) {
+ if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) {
+ if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) {
enum ice_status status;
/* Disable FW LLDP engine */
@@ -1319,14 +1319,17 @@ ice_get_ethtool_stats(struct net_device *netdev,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_ring *ring;
- unsigned int j = 0;
+ unsigned int j;
int i = 0;
char *p;
+ ice_update_pf_stats(pf);
+ ice_update_vsi_stats(vsi);
+
for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
/* populate per queue stats */
@@ -1716,6 +1719,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_port_info *pi = np->vsi->port_info;
struct ethtool_link_ksettings cap_ksettings;
struct ice_link_status *link_info;
struct ice_vsi *vsi = np->vsi;
@@ -2040,6 +2044,33 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
break;
}
ks->base.duplex = DUPLEX_FULL;
+
+ if (link_info->an_info & ICE_AQ_AN_COMPLETED)
+ ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
+ Autoneg);
+
+ /* Set flow control negotiated Rx/Tx pause */
+ switch (pi->fc.current_mode) {
+ case ICE_FC_FULL:
+ ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause);
+ break;
+ case ICE_FC_TX_PAUSE:
+ ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause);
+ ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
+ Asym_Pause);
+ break;
+ case ICE_FC_RX_PAUSE:
+ ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
+ Asym_Pause);
+ break;
+ case ICE_FC_PFC:
+ /* fall through */
+ default:
+ ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause);
+ ethtool_link_ksettings_del_link_mode(ks, lp_advertising,
+ Asym_Pause);
+ break;
+ }
}
/**
@@ -2078,9 +2109,12 @@ ice_get_link_ksettings(struct net_device *netdev,
struct ice_aqc_get_phy_caps_data *caps;
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
+ enum ice_status status;
+ int err = 0;
ethtool_link_ksettings_zero_link_mode(ks, supported);
ethtool_link_ksettings_zero_link_mode(ks, advertising);
+ ethtool_link_ksettings_zero_link_mode(ks, lp_advertising);
hw_link_info = &vsi->port_info->phy.link_info;
/* set speed and duplex */
@@ -2125,48 +2159,36 @@ ice_get_link_ksettings(struct net_device *netdev,
/* flow control is symmetric and always supported */
ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
- switch (vsi->port_info->fc.req_mode) {
- case ICE_FC_FULL:
+ caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ status = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_SW_CFG, caps, NULL);
+ if (status) {
+ err = -EIO;
+ goto done;
+ }
+
+ /* Set the advertised flow control based on the PHY capability */
+ if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) &&
+ (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) {
ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
- break;
- case ICE_FC_TX_PAUSE:
ethtool_link_ksettings_add_link_mode(ks, advertising,
Asym_Pause);
- break;
- case ICE_FC_RX_PAUSE:
+ } else if (caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Asym_Pause);
+ } else if (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) {
ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
ethtool_link_ksettings_add_link_mode(ks, advertising,
Asym_Pause);
- break;
- case ICE_FC_PFC:
- default:
+ } else {
ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
ethtool_link_ksettings_del_link_mode(ks, advertising,
Asym_Pause);
- break;
}
- caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
- if (!caps)
- goto done;
-
- if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_TOPO_CAP,
- caps, NULL))
- netdev_info(netdev, "Get phy capability failed.\n");
-
- /* Set supported FEC modes based on PHY capability */
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
-
- if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
- caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
-
- if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_SW_CFG,
- caps, NULL))
- netdev_info(netdev, "Get phy capability failed.\n");
-
/* Set advertised FEC modes based on PHY capability */
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
@@ -2178,9 +2200,25 @@ ice_get_link_ksettings(struct net_device *netdev,
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ status = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_TOPO_CAP, caps, NULL);
+ if (status) {
+ err = -EIO;
+ goto done;
+ }
+
+ /* Set supported FEC modes based on PHY capability */
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+
done:
devm_kfree(&vsi->back->pdev->dev, caps);
- return 0;
+ return err;
}
/**
@@ -2763,6 +2801,11 @@ static int ice_nway_reset(struct net_device *netdev)
* ice_get_pauseparam - Get Flow Control status
* @netdev: network interface device structure
* @pause: ethernet pause (flow control) parameters
+ *
+ * Get requested flow control status from PHY capability.
+ * If autoneg is true, then ethtool will send the ETHTOOL_GSET ioctl which
+ * is handled by ice_get_link_ksettings. ice_get_link_ksettings will report
+ * the negotiated Rx/Tx pause via lp_advertising.
*/
static void
ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
@@ -2816,6 +2859,7 @@ static int
ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_link_status *hw_link_info;
struct ice_pf *pf = np->vsi->back;
struct ice_dcbx_cfg *dcbx_cfg;
@@ -2826,6 +2870,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
u8 aq_failures;
bool link_up;
int err = 0;
+ u32 is_an;
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
@@ -2840,7 +2885,30 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return -EOPNOTSUPP;
}
- if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
+ /* Get pause param reports configured and negotiated flow control pause
+ * when ETHTOOL_GLINKSETTINGS is defined. Since ETHTOOL_GLINKSETTINGS is
+ * defined get pause param pause->autoneg reports SW configured setting,
+ * so compare pause->autoneg with SW configured to prevent the user from
+ * using set pause param to chance autoneg.
+ */
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ /* Get current PHY config */
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (status) {
+ kfree(pcaps);
+ return -EIO;
+ }
+
+ is_an = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ kfree(pcaps);
+
+ if (pause->autoneg != is_an) {
netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 6c5ce05742b1..6f78ff5534af 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -127,8 +127,11 @@
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2)
#define GLINT_DYN_CTL_ITR_INDX_S 3
+#define GLINT_DYN_CTL_ITR_INDX_M ICE_M(0x3, 3)
#define GLINT_DYN_CTL_INTERVAL_S 5
+#define GLINT_DYN_CTL_INTERVAL_M ICE_M(0xFFF, 5)
#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
+#define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30)
#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4))
@@ -281,14 +284,10 @@
#define GL_PWR_MODE_CTL 0x000B820C
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
-#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8))
#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
-#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8))
#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))
-#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8))
#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8))
-#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8))
#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8))
#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8))
#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8))
@@ -296,38 +295,22 @@
#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8))
#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8))
#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8))
-#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8))
#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8))
-#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8))
#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8))
#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8))
-#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8))
#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8))
-#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8))
#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8))
-#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8))
#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8))
-#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8))
#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8))
-#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8))
#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8))
-#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8))
#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8))
-#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8))
#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8))
-#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8))
#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8))
-#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8))
#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8))
-#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8))
#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8))
-#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8))
#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8))
-#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8))
#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8))
-#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8))
#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
-#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
#define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64))
#define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64))
@@ -340,32 +323,23 @@
#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
#define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64))
#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
-#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
-#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8))
#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8))
-#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8))
#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8))
-#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8))
#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8))
-#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8))
#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8))
-#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8))
#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8))
-#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8))
#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8))
-#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8))
#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8))
#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4))
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
-#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
-#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PF_VT_PFALLOC_HIF 0x0009DD80
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
+#define PRTRPB_RDPC 0x000AC260
#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 510a8c900e61..57ea6811fe2c 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -290,6 +290,7 @@ struct ice_rlan_ctx {
u8 tphdata_ena;
u8 tphhead_ena;
u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
};
struct ice_ctx_ele {
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a19f5920733b..8d5d6635a123 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1010,6 +1010,13 @@ static int ice_vsi_init(struct ice_vsi *vsi)
ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
}
+ /* Allow control frames out of main VSI */
+ if (vsi->type == ICE_VSI_PF) {
+ ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
+ ctxt->info.valid_sections |=
+ cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+ }
+
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(&pf->pdev->dev,
@@ -1129,12 +1136,7 @@ static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
return -EEXIST;
}
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- num_q_vectors = vsi->num_q_vectors;
- } else {
- err = -EINVAL;
- goto err_out;
- }
+ num_q_vectors = vsi->num_q_vectors;
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
err = ice_vsi_alloc_q_vector(vsi, v_idx);
@@ -1180,9 +1182,6 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
return -EEXIST;
}
- if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- return -ENOENT;
-
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
@@ -1477,40 +1476,32 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
prev_es = &vsi->eth_stats_prev;
cur_es = &vsi->eth_stats;
- ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_bytes,
- &cur_es->rx_bytes);
+ ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->rx_bytes, &cur_es->rx_bytes);
- ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_unicast,
- &cur_es->rx_unicast);
+ ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->rx_unicast, &cur_es->rx_unicast);
- ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_multicast,
- &cur_es->rx_multicast);
+ ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->rx_multicast, &cur_es->rx_multicast);
- ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_broadcast,
- &cur_es->rx_broadcast);
+ ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->rx_broadcast, &cur_es->rx_broadcast);
ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
&prev_es->rx_discards, &cur_es->rx_discards);
- ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_bytes,
- &cur_es->tx_bytes);
+ ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->tx_bytes, &cur_es->tx_bytes);
- ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_unicast,
- &cur_es->tx_unicast);
+ ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->tx_unicast, &cur_es->tx_unicast);
- ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_multicast,
- &cur_es->tx_multicast);
+ ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->tx_multicast, &cur_es->tx_multicast);
- ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_broadcast,
- &cur_es->tx_broadcast);
+ ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->tx_broadcast, &cur_es->tx_broadcast);
ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
&prev_es->tx_errors, &cur_es->tx_errors);
@@ -2156,6 +2147,9 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
if (status == ICE_ERR_RESET_ONGOING) {
dev_dbg(&pf->pdev->dev,
"Reset in progress. LAN Tx queues already disabled\n");
+ } else if (status == ICE_ERR_DOES_NOT_EXIST) {
+ dev_dbg(&pf->pdev->dev,
+ "LAN Tx queues does not exist, nothing to disabled\n");
} else if (status) {
dev_err(&pf->pdev->dev,
"Failed to disable LAN Tx queues, error: %d\n",
@@ -2519,7 +2513,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = pf->num_lan_tx;
+ max_txqs[i] = vsi->alloc_txq;
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
@@ -2547,7 +2541,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_cfg_sw_lldp(vsi, true, true);
/* Rx LLDP packets */
- if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags))
+ if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, true);
}
@@ -2610,39 +2604,36 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int base = vsi->base_vector;
+ int i;
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- int i;
-
- if (!vsi->q_vectors || !vsi->irqs_ready)
- return;
+ if (!vsi->q_vectors || !vsi->irqs_ready)
+ return;
- ice_vsi_release_msix(vsi);
- if (vsi->type == ICE_VSI_VF)
- return;
+ ice_vsi_release_msix(vsi);
+ if (vsi->type == ICE_VSI_VF)
+ return;
- vsi->irqs_ready = false;
- ice_for_each_q_vector(vsi, i) {
- u16 vector = i + base;
- int irq_num;
+ vsi->irqs_ready = false;
+ ice_for_each_q_vector(vsi, i) {
+ u16 vector = i + base;
+ int irq_num;
- irq_num = pf->msix_entries[vector].vector;
+ irq_num = pf->msix_entries[vector].vector;
- /* free only the irqs that were actually requested */
- if (!vsi->q_vectors[i] ||
- !(vsi->q_vectors[i]->num_ring_tx ||
- vsi->q_vectors[i]->num_ring_rx))
- continue;
+ /* free only the irqs that were actually requested */
+ if (!vsi->q_vectors[i] ||
+ !(vsi->q_vectors[i]->num_ring_tx ||
+ vsi->q_vectors[i]->num_ring_rx))
+ continue;
- /* clear the affinity notifier in the IRQ descriptor */
- irq_set_affinity_notifier(irq_num, NULL);
+ /* clear the affinity notifier in the IRQ descriptor */
+ irq_set_affinity_notifier(irq_num, NULL);
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
- synchronize_irq(irq_num);
- devm_free_irq(&pf->pdev->dev, irq_num,
- vsi->q_vectors[i]);
- }
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(irq_num, NULL);
+ synchronize_irq(irq_num);
+ devm_free_irq(&pf->pdev->dev, irq_num,
+ vsi->q_vectors[i]);
}
}
@@ -2821,15 +2812,17 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
}
/* disable each interrupt */
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- ice_for_each_q_vector(vsi, i)
- wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
+ ice_for_each_q_vector(vsi, i)
+ wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
- ice_flush(hw);
+ ice_flush(hw);
- ice_for_each_q_vector(vsi, i)
- synchronize_irq(pf->msix_entries[i + base].vector);
- }
+ /* don't call synchronize_irq() for VF's from the host */
+ if (vsi->type == ICE_VSI_VF)
+ return;
+
+ ice_for_each_q_vector(vsi, i)
+ synchronize_irq(pf->msix_entries[i + base].vector);
}
/**
@@ -2895,7 +2888,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
*/
- if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags))
+ if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, false);
}
@@ -2986,6 +2979,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret)
goto err_rings;
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto err_vectors;
+
ret = ice_vsi_set_q_vectors_reg_idx(vsi);
if (ret)
goto err_vectors;
@@ -3007,10 +3004,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret)
goto err_rings;
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto err_vectors;
-
ret = ice_vsi_set_q_vectors_reg_idx(vsi);
if (ret)
goto err_vectors;
@@ -3028,7 +3021,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = pf->num_lan_tx;
+ max_txqs[i] = vsi->alloc_txq;
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
@@ -3145,7 +3138,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
if (ena_tc & BIT(i))
num_tc++;
/* populate max_txqs per TC */
- max_txqs[i] = pf->num_lan_tx;
+ max_txqs[i] = vsi->alloc_txq;
}
vsi->tc_cfg.ena_tc = ena_tc;
@@ -3188,3 +3181,33 @@ out:
return ret;
}
#endif /* CONFIG_DCB */
+
+/**
+ * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
+ * @vsi: the VSI being configured MAC filter
+ * @macaddr: the MAC address to be added.
+ * @set: Add or delete a MAC filter
+ *
+ * Adds or removes MAC address filter entry for VF VSI
+ */
+enum ice_status
+ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
+{
+ LIST_HEAD(tmp_add_list);
+ enum ice_status status;
+
+ /* Update MAC filter list to be added or removed for a VSI */
+ if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) {
+ status = ICE_ERR_NO_MEMORY;
+ goto cfg_mac_fltr_exit;
+ }
+
+ if (set)
+ status = ice_add_mac(&vsi->back->hw, &tmp_add_list);
+ else
+ status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
+
+cfg_mac_fltr_exit:
+ ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 6e43ef03bfc3..969ba27cba95 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -95,4 +95,8 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
+
+enum ice_status
+ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
+
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 63db08d9bafa..f3923dec32b7 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -9,7 +9,7 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
-#define DRV_VERSION "0.7.4-k"
+#define DRV_VERSION "0.7.5-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -34,19 +34,17 @@ static const struct net_device_ops ice_netdev_ops;
static void ice_rebuild(struct ice_pf *pf);
static void ice_vsi_release_all(struct ice_pf *pf);
-static void ice_update_vsi_stats(struct ice_vsi *vsi);
-static void ice_update_pf_stats(struct ice_pf *pf);
/**
* ice_get_tx_pending - returns number of Tx descriptors not processed
* @ring: the ring of descriptors
*/
-static u32 ice_get_tx_pending(struct ice_ring *ring)
+static u16 ice_get_tx_pending(struct ice_ring *ring)
{
- u32 head, tail;
+ u16 head, tail;
head = ring->next_to_clean;
- tail = readl(ring->tail);
+ tail = ring->next_to_use;
if (head != tail)
return (head < tail) ?
@@ -118,10 +116,9 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
*/
static int ice_init_mac_fltr(struct ice_pf *pf)
{
- LIST_HEAD(tmp_add_list);
+ enum ice_status status;
u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
- int status;
vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
if (!vsi)
@@ -132,8 +129,7 @@ static int ice_init_mac_fltr(struct ice_pf *pf)
*/
/* Add a unicast MAC filter so the VSI can get its packets */
- status = ice_add_mac_to_list(vsi, &tmp_add_list,
- vsi->port_info->mac.perm_addr);
+ status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true);
if (status)
goto unregister;
@@ -141,18 +137,11 @@ static int ice_init_mac_fltr(struct ice_pf *pf)
* MAC address to the list as well.
*/
eth_broadcast_addr(broadcast);
- status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
- if (status)
- goto free_mac_list;
-
- /* Program MAC filters for entries in tmp_add_list */
- status = ice_add_mac(&pf->hw, &tmp_add_list);
+ status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true);
if (status)
- status = -ENOMEM;
-
-free_mac_list:
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ goto unregister;
+ return 0;
unregister:
/* We aren't useful with no MAC filters, so unregister if we
* had an error
@@ -166,7 +155,7 @@ unregister:
vsi->netdev = NULL;
}
- return status;
+ return -EIO;
}
/**
@@ -488,6 +477,7 @@ static void
ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
+ u8 i;
/* already prepared for reset */
if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
@@ -497,6 +487,10 @@ ice_prepare_for_reset(struct ice_pf *pf)
if (ice_check_sq_alive(hw, &hw->mailboxq))
ice_vc_notify_reset(pf);
+ /* Disable VFs until reset is completed */
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
+
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf, false);
@@ -810,6 +804,20 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (!vsi || !vsi->port_info)
return -EINVAL;
+ /* turn off PHY if media was removed */
+ if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
+ !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
+ set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
+
+ result = ice_aq_set_link_restart_an(pi, false, NULL);
+ if (result) {
+ dev_dbg(&pf->pdev->dev,
+ "Failed to set link down, VSI %d error %d\n",
+ vsi->vsi_num, result);
+ return result;
+ }
+ }
+
ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up);
@@ -1315,6 +1323,124 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
}
/**
+ * ice_force_phys_link_state - Force the physical link state
+ * @vsi: VSI to force the physical link state to up/down
+ * @link_up: true/false indicates to set the physical link to up/down
+ *
+ * Force the physical link state by getting the current PHY capabilities from
+ * hardware and setting the PHY config based on the determined capabilities. If
+ * link changes a link event will be triggered because both the Enable Automatic
+ * Link Update and LESM Enable bits are set when setting the PHY capabilities.
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_set_phy_cfg_data *cfg;
+ struct ice_port_info *pi;
+ struct device *dev;
+ int retcode;
+
+ if (!vsi || !vsi->port_info || !vsi->back)
+ return -EINVAL;
+ if (vsi->type != ICE_VSI_PF)
+ return 0;
+
+ dev = &vsi->back->pdev->dev;
+
+ pi = vsi->port_info;
+
+ pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (retcode) {
+ dev_err(dev,
+ "Failed to get phy capabilities, VSI %d error %d\n",
+ vsi->vsi_num, retcode);
+ retcode = -EIO;
+ goto out;
+ }
+
+ /* No change in link */
+ if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
+ link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
+ goto out;
+
+ cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ retcode = -ENOMEM;
+ goto out;
+ }
+
+ cfg->phy_type_low = pcaps->phy_type_low;
+ cfg->phy_type_high = pcaps->phy_type_high;
+ cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ cfg->low_power_ctrl = pcaps->low_power_ctrl;
+ cfg->eee_cap = pcaps->eee_cap;
+ cfg->eeer_value = pcaps->eeer_value;
+ cfg->link_fec_opt = pcaps->link_fec_options;
+ if (link_up)
+ cfg->caps |= ICE_AQ_PHY_ENA_LINK;
+ else
+ cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
+
+ retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
+ if (retcode) {
+ dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
+ vsi->vsi_num, retcode);
+ retcode = -EIO;
+ }
+
+ devm_kfree(dev, cfg);
+out:
+ devm_kfree(dev, pcaps);
+ return retcode;
+}
+
+/**
+ * ice_check_media_subtask - Check for media; bring link up if detected.
+ * @pf: pointer to PF struct
+ */
+static void ice_check_media_subtask(struct ice_pf *pf)
+{
+ struct ice_port_info *pi;
+ struct ice_vsi *vsi;
+ int err;
+
+ vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
+ if (!vsi)
+ return;
+
+ /* No need to check for media if it's already present or the interface
+ * is down
+ */
+ if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) ||
+ test_bit(__ICE_DOWN, vsi->state))
+ return;
+
+ /* Refresh link info and check if media is present */
+ pi = vsi->port_info;
+ err = ice_update_link_info(pi);
+ if (err)
+ return;
+
+ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
+ err = ice_force_phys_link_state(vsi, true);
+ if (err)
+ return;
+ clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
+
+ /* A Link Status Event will be generated; the event handler
+ * will complete bringing the interface up
+ */
+ }
+}
+
+/**
* ice_service_task - manage and run subtasks
* @work: pointer to work_struct contained by the PF struct
*/
@@ -1336,6 +1462,7 @@ static void ice_service_task(struct work_struct *work)
return;
}
+ ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf);
ice_sync_fltr_subtask(pf);
ice_handle_mdd_event(pf);
@@ -1369,8 +1496,8 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
- hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN;
- hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN;
+ hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN;
+ hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
}
@@ -1409,15 +1536,11 @@ static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
*/
static int ice_vsi_ena_irq(struct ice_vsi *vsi)
{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
-
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- int i;
+ struct ice_hw *hw = &vsi->back->hw;
+ int i;
- ice_for_each_q_vector(vsi, i)
- ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
- }
+ ice_for_each_q_vector(vsi, i)
+ ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
ice_flush(hw);
return 0;
@@ -1665,7 +1788,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
wr32(hw, PFINT_OICR_ENA, 0);
ice_flush(hw);
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
+ if (pf->msix_entries) {
synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
devm_free_irq(&pf->pdev->dev,
pf->msix_entries[pf->oicr_idx].vector, pf);
@@ -2091,7 +2214,6 @@ static void ice_deinit_pf(struct ice_pf *pf)
static void ice_init_pf(struct ice_pf *pf)
{
bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
- set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
struct ice_hw *hw = &pf->hw;
@@ -2191,7 +2313,6 @@ msix_err:
exit_err:
pf->num_lan_msix = 0;
- clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
return err;
}
@@ -2204,7 +2325,6 @@ static void ice_dis_msix(struct ice_pf *pf)
pci_disable_msix(pf->pdev);
devm_kfree(&pf->pdev->dev, pf->msix_entries);
pf->msix_entries = NULL;
- clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
}
/**
@@ -2213,8 +2333,7 @@ static void ice_dis_msix(struct ice_pf *pf)
*/
static void ice_clear_interrupt_scheme(struct ice_pf *pf)
{
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_dis_msix(pf);
+ ice_dis_msix(pf);
if (pf->irq_tracker) {
devm_kfree(&pf->pdev->dev, pf->irq_tracker);
@@ -2230,10 +2349,7 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
{
int vectors;
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- vectors = ice_ena_msix_range(pf);
- else
- return -ENODEV;
+ vectors = ice_ena_msix_range(pf);
if (vectors < 0)
return vectors;
@@ -2390,12 +2506,10 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
* the misc functionality and queue processing is combined in
* the same vector and that gets setup at open.
*/
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- err = ice_req_irq_msix_misc(pf);
- if (err) {
- dev_err(dev, "setup of misc vector failed: %d\n", err);
- goto err_init_interrupt_unroll;
- }
+ err = ice_req_irq_msix_misc(pf);
+ if (err) {
+ dev_err(dev, "setup of misc vector failed: %d\n", err);
+ goto err_init_interrupt_unroll;
}
/* create switch struct for the switch element created by FW on boot */
@@ -2711,10 +2825,8 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
struct ice_hw *hw = &pf->hw;
struct sockaddr *addr = pi;
enum ice_status status;
- LIST_HEAD(a_mac_list);
- LIST_HEAD(r_mac_list);
u8 flags = 0;
- int err;
+ int err = 0;
u8 *mac;
mac = (u8 *)addr->sa_data;
@@ -2737,42 +2849,23 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
/* When we change the MAC address we also have to change the MAC address
* based filter rules that were created previously for the old MAC
* address. So first, we remove the old filter rule using ice_remove_mac
- * and then create a new filter rule using ice_add_mac. Note that for
- * both these operations, we first need to form a "list" of MAC
- * addresses (even though in this case, we have only 1 MAC address to be
- * added/removed) and this done using ice_add_mac_to_list. Depending on
- * the ensuing operation this "list" of MAC addresses is either to be
- * added or removed from the filter.
+ * and then create a new filter rule using ice_add_mac via
+ * ice_vsi_cfg_mac_fltr function call for both add and/or remove
+ * filters.
*/
- err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
- if (err) {
- err = -EADDRNOTAVAIL;
- goto free_lists;
- }
-
- status = ice_remove_mac(hw, &r_mac_list);
+ status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false);
if (status) {
err = -EADDRNOTAVAIL;
- goto free_lists;
- }
-
- err = ice_add_mac_to_list(vsi, &a_mac_list, mac);
- if (err) {
- err = -EADDRNOTAVAIL;
- goto free_lists;
+ goto err_update_filters;
}
- status = ice_add_mac(hw, &a_mac_list);
+ status = ice_vsi_cfg_mac_fltr(vsi, mac, true);
if (status) {
err = -EADDRNOTAVAIL;
- goto free_lists;
+ goto err_update_filters;
}
-free_lists:
- /* free list entries */
- ice_free_fltr_list(&pf->pdev->dev, &r_mac_list);
- ice_free_fltr_list(&pf->pdev->dev, &a_mac_list);
-
+err_update_filters:
if (err) {
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
mac);
@@ -2788,8 +2881,8 @@ free_lists:
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
if (status) {
- netdev_err(netdev, "can't set MAC %pM. write to firmware failed.\n",
- mac);
+ netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
+ mac, status);
}
return 0;
}
@@ -3008,10 +3101,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
int err;
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_vsi_cfg_msix(vsi);
- else
- return -ENOTSUPP;
+ ice_vsi_cfg_msix(vsi);
/* Enable only Rx rings, Tx rings were enabled by the FW when the
* Tx queue group list was configured and the context bits were
@@ -3132,7 +3222,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
* ice_update_vsi_stats - Update VSI stats counters
* @vsi: the VSI to be updated
*/
-static void ice_update_vsi_stats(struct ice_vsi *vsi)
+void ice_update_vsi_stats(struct ice_vsi *vsi)
{
struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
struct ice_eth_stats *cur_es = &vsi->eth_stats;
@@ -3159,6 +3249,8 @@ static void ice_update_vsi_stats(struct ice_vsi *vsi)
cur_ns->rx_errors = pf->stats.crc_errors +
pf->stats.illegal_bytes;
cur_ns->rx_length_errors = pf->stats.rx_len_errors;
+ /* record drops from the port level */
+ cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
}
}
@@ -3166,7 +3258,7 @@ static void ice_update_vsi_stats(struct ice_vsi *vsi)
* ice_update_pf_stats - Update PF port stats counters
* @pf: PF whose stats needs to be updated
*/
-static void ice_update_pf_stats(struct ice_pf *pf)
+void ice_update_pf_stats(struct ice_pf *pf)
{
struct ice_hw_port_stats *prev_ps, *cur_ps;
struct ice_hw *hw = &pf->hw;
@@ -3176,96 +3268,86 @@ static void ice_update_pf_stats(struct ice_pf *pf)
cur_ps = &pf->stats;
pf_id = hw->pf_id;
- ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.rx_bytes,
+ ice_stat_update40(hw, GLPRT_GORCL(pf_id), pf->stat_prev_loaded,
+ &prev_ps->eth.rx_bytes,
&cur_ps->eth.rx_bytes);
- ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.rx_unicast,
+ ice_stat_update40(hw, GLPRT_UPRCL(pf_id), pf->stat_prev_loaded,
+ &prev_ps->eth.rx_unicast,
&cur_ps->eth.rx_unicast);
- ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.rx_multicast,
+ ice_stat_update40(hw, GLPRT_MPRCL(pf_id), pf->stat_prev_loaded,
+ &prev_ps->eth.rx_multicast,
&cur_ps->eth.rx_multicast);
- ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast,
+ ice_stat_update40(hw, GLPRT_BPRCL(pf_id), pf->stat_prev_loaded,
+ &prev_ps->eth.rx_broadcast,
&cur_ps->eth.rx_broadcast);
- ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.tx_bytes,
+ ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
+ &prev_ps->eth.rx_discards,
+ &cur_ps->eth.rx_discards);
+
+ ice_stat_update40(hw, GLPRT_GOTCL(pf_id), pf->stat_prev_loaded,
+ &prev_ps->eth.tx_bytes,
&cur_ps->eth.tx_bytes);
- ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.tx_unicast,
+ ice_stat_update40(hw, GLPRT_UPTCL(pf_id), pf->stat_prev_loaded,
+ &prev_ps->eth.tx_unicast,
&cur_ps->eth.tx_unicast);
- ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.tx_multicast,
+ ice_stat_update40(hw, GLPRT_MPTCL(pf_id), pf->stat_prev_loaded,
+ &prev_ps->eth.tx_multicast,
&cur_ps->eth.tx_multicast);
- ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast,
+ ice_stat_update40(hw, GLPRT_BPTCL(pf_id), pf->stat_prev_loaded,
+ &prev_ps->eth.tx_broadcast,
&cur_ps->eth.tx_broadcast);
ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded,
&prev_ps->tx_dropped_link_down,
&cur_ps->tx_dropped_link_down);
- ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id),
- pf->stat_prev_loaded, &prev_ps->rx_size_64,
- &cur_ps->rx_size_64);
+ ice_stat_update40(hw, GLPRT_PRC64L(pf_id), pf->stat_prev_loaded,
+ &prev_ps->rx_size_64, &cur_ps->rx_size_64);
- ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id),
- pf->stat_prev_loaded, &prev_ps->rx_size_127,
- &cur_ps->rx_size_127);
+ ice_stat_update40(hw, GLPRT_PRC127L(pf_id), pf->stat_prev_loaded,
+ &prev_ps->rx_size_127, &cur_ps->rx_size_127);
- ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id),
- pf->stat_prev_loaded, &prev_ps->rx_size_255,
- &cur_ps->rx_size_255);
+ ice_stat_update40(hw, GLPRT_PRC255L(pf_id), pf->stat_prev_loaded,
+ &prev_ps->rx_size_255, &cur_ps->rx_size_255);
- ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id),
- pf->stat_prev_loaded, &prev_ps->rx_size_511,
- &cur_ps->rx_size_511);
+ ice_stat_update40(hw, GLPRT_PRC511L(pf_id), pf->stat_prev_loaded,
+ &prev_ps->rx_size_511, &cur_ps->rx_size_511);
- ice_stat_update40(hw, GLPRT_PRC1023H(pf_id),
- GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded,
&prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
- ice_stat_update40(hw, GLPRT_PRC1522H(pf_id),
- GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded,
&prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
- ice_stat_update40(hw, GLPRT_PRC9522H(pf_id),
- GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded,
&prev_ps->rx_size_big, &cur_ps->rx_size_big);
- ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id),
- pf->stat_prev_loaded, &prev_ps->tx_size_64,
- &cur_ps->tx_size_64);
+ ice_stat_update40(hw, GLPRT_PTC64L(pf_id), pf->stat_prev_loaded,
+ &prev_ps->tx_size_64, &cur_ps->tx_size_64);
- ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id),
- pf->stat_prev_loaded, &prev_ps->tx_size_127,
- &cur_ps->tx_size_127);
+ ice_stat_update40(hw, GLPRT_PTC127L(pf_id), pf->stat_prev_loaded,
+ &prev_ps->tx_size_127, &cur_ps->tx_size_127);
- ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id),
- pf->stat_prev_loaded, &prev_ps->tx_size_255,
- &cur_ps->tx_size_255);
+ ice_stat_update40(hw, GLPRT_PTC255L(pf_id), pf->stat_prev_loaded,
+ &prev_ps->tx_size_255, &cur_ps->tx_size_255);
- ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id),
- pf->stat_prev_loaded, &prev_ps->tx_size_511,
- &cur_ps->tx_size_511);
+ ice_stat_update40(hw, GLPRT_PTC511L(pf_id), pf->stat_prev_loaded,
+ &prev_ps->tx_size_511, &cur_ps->tx_size_511);
- ice_stat_update40(hw, GLPRT_PTC1023H(pf_id),
- GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded,
&prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
- ice_stat_update40(hw, GLPRT_PTC1522H(pf_id),
- GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded,
&prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
- ice_stat_update40(hw, GLPRT_PTC9522H(pf_id),
- GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded,
&prev_ps->tx_size_big, &cur_ps->tx_size_big);
ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded,
@@ -3372,85 +3454,6 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
}
/**
- * ice_force_phys_link_state - Force the physical link state
- * @vsi: VSI to force the physical link state to up/down
- * @link_up: true/false indicates to set the physical link to up/down
- *
- * Force the physical link state by getting the current PHY capabilities from
- * hardware and setting the PHY config based on the determined capabilities. If
- * link changes a link event will be triggered because both the Enable Automatic
- * Link Update and LESM Enable bits are set when setting the PHY capabilities.
- *
- * Returns 0 on success, negative on failure
- */
-static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
-{
- struct ice_aqc_get_phy_caps_data *pcaps;
- struct ice_aqc_set_phy_cfg_data *cfg;
- struct ice_port_info *pi;
- struct device *dev;
- int retcode;
-
- if (!vsi || !vsi->port_info || !vsi->back)
- return -EINVAL;
- if (vsi->type != ICE_VSI_PF)
- return 0;
-
- dev = &vsi->back->pdev->dev;
-
- pi = vsi->port_info;
-
- pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
- if (!pcaps)
- return -ENOMEM;
-
- retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
- NULL);
- if (retcode) {
- dev_err(dev,
- "Failed to get phy capabilities, VSI %d error %d\n",
- vsi->vsi_num, retcode);
- retcode = -EIO;
- goto out;
- }
-
- /* No change in link */
- if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
- link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
- goto out;
-
- cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
- if (!cfg) {
- retcode = -ENOMEM;
- goto out;
- }
-
- cfg->phy_type_low = pcaps->phy_type_low;
- cfg->phy_type_high = pcaps->phy_type_high;
- cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
- cfg->low_power_ctrl = pcaps->low_power_ctrl;
- cfg->eee_cap = pcaps->eee_cap;
- cfg->eeer_value = pcaps->eeer_value;
- cfg->link_fec_opt = pcaps->link_fec_options;
- if (link_up)
- cfg->caps |= ICE_AQ_PHY_ENA_LINK;
- else
- cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
-
- retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
- if (retcode) {
- dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
- vsi->vsi_num, retcode);
- retcode = -EIO;
- }
-
- devm_kfree(dev, cfg);
-out:
- devm_kfree(dev, pcaps);
- return retcode;
-}
-
-/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
*/
@@ -3559,24 +3562,6 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
/**
- * ice_vsi_req_irq - Request IRQ from the OS
- * @vsi: The VSI IRQ is being requested for
- * @basename: name for the vector
- *
- * Return 0 on success and a negative value on error
- */
-static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
-{
- struct ice_pf *pf = vsi->back;
- int err = -EINVAL;
-
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- err = ice_vsi_req_irq_msix(vsi, basename);
-
- return err;
-}
-
-/**
* ice_vsi_open - Called when a network interface is made active
* @vsi: the VSI to open
*
@@ -3605,7 +3590,7 @@ static int ice_vsi_open(struct ice_vsi *vsi)
snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
- err = ice_vsi_req_irq(vsi, int_name);
+ err = ice_vsi_req_irq_msix(vsi, int_name);
if (err)
goto err_setup_rx;
@@ -3684,8 +3669,6 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
err = netd->netdev_ops->ndo_open(netd);
rtnl_unlock();
}
- } else {
- err = ice_vsi_open(vsi);
}
}
@@ -3842,12 +3825,10 @@ static void ice_rebuild(struct ice_pf *pf)
}
/* start misc vector */
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- err = ice_req_irq_msix_misc(pf);
- if (err) {
- dev_err(dev, "misc vector setup failed: %d\n", err);
- goto err_vsi_rebuild;
- }
+ err = ice_req_irq_msix_misc(pf);
+ if (err) {
+ dev_err(dev, "misc vector setup failed: %d\n", err);
+ goto err_vsi_rebuild;
}
/* restart the VSIs that were rebuilt and running before the reset */
@@ -4244,9 +4225,7 @@ static void ice_tx_timeout(struct net_device *netdev)
head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
/* Read interrupt register */
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- val = rd32(hw,
- GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
+ val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
@@ -4295,6 +4274,7 @@ int ice_open(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_port_info *pi;
int err;
if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
@@ -4304,13 +4284,33 @@ int ice_open(struct net_device *netdev)
netif_carrier_off(netdev);
- err = ice_force_phys_link_state(vsi, true);
+ pi = vsi->port_info;
+ err = ice_update_link_info(pi);
if (err) {
- netdev_err(netdev,
- "Failed to set physical link up, error %d\n", err);
+ netdev_err(netdev, "Failed to get link info, error %d\n",
+ err);
return err;
}
+ /* Set PHY if there is media, otherwise, turn off PHY */
+ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
+ err = ice_force_phys_link_state(vsi, true);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to set physical link up, error %d\n",
+ err);
+ return err;
+ }
+ } else {
+ err = ice_aq_set_link_restart_an(pi, false, NULL);
+ if (err) {
+ netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
+ vsi->vsi_num, err);
+ return err;
+ }
+ set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags);
+ }
+
err = ice_vsi_open(vsi);
if (err)
netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 8271fd651725..99cf527d2b1a 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -2137,6 +2137,38 @@ out:
}
/**
+ * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
+ * @hw: pointer to the hardware structure
+ * @recp_id: lookup type for which the specified rule needs to be searched
+ * @f_info: rule information
+ *
+ * Helper function to search for a unicast rule entry - this is to be used
+ * to remove unicast MAC filter that is not shared with other VSIs on the
+ * PF switch.
+ *
+ * Returns pointer to entry storing the rule if found
+ */
+static struct ice_fltr_mgmt_list_entry *
+ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
+ struct ice_fltr_info *f_info)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *list_itr;
+ struct list_head *list_head;
+
+ list_head = &sw->recp_list[recp_id].filt_rules;
+ list_for_each_entry(list_itr, list_head, list_entry) {
+ if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
+ sizeof(f_info->l_data)) &&
+ f_info->fwd_id.hw_vsi_id ==
+ list_itr->fltr_info.fwd_id.hw_vsi_id &&
+ f_info->flag == list_itr->fltr_info.flag)
+ return list_itr;
+ }
+ return NULL;
+}
+
+/**
* ice_remove_mac - remove a MAC address based filter rule
* @hw: pointer to the hardware structure
* @m_list: list of MAC addresses and forwarding information
@@ -2153,15 +2185,39 @@ enum ice_status
ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_fltr_list_entry *list_itr, *tmp;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
if (!m_list)
return ICE_ERR_PARAM;
+ rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
+ u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
+ u16 vsi_handle;
if (l_type != ICE_SW_LKUP_MAC)
return ICE_ERR_PARAM;
+
+ vsi_handle = list_itr->fltr_info.vsi_handle;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ list_itr->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, vsi_handle);
+ if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
+ /* Don't remove the unicast address that belongs to
+ * another VSI on the switch, since it is not being
+ * shared...
+ */
+ mutex_lock(rule_lock);
+ if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
+ &list_itr->fltr_info)) {
+ mutex_unlock(rule_lock);
+ return ICE_ERR_DOES_NOT_EXIST;
+ }
+ mutex_unlock(rule_lock);
+ }
list_itr->status = ice_remove_rule_internal(hw,
ICE_SW_LKUP_MAC,
list_itr);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 3c83230434b6..5bf5c179a738 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -377,18 +377,28 @@ err:
*/
static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
{
+ u16 prev_ntu = rx_ring->next_to_use;
+
rx_ring->next_to_use = val;
/* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = val;
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
+ /* QRX_TAIL will be updated with any tail value, but hardware ignores
+ * the lower 3 bits. This makes it so we only bump tail on meaningful
+ * boundaries. Also, this allows us to bump tail on intervals of 8 up to
+ * the budget depending on the current traffic load.
*/
- wmb();
- writel(val, rx_ring->tail);
+ val &= ~0x7;
+ if (prev_ntu != val) {
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+ }
}
/**
@@ -445,7 +455,13 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
*
- * Returns false if all allocations were successful, true if any fail
+ * Returns false if all allocations were successful, true if any fail. Returning
+ * true signals to the caller that we didn't replace cleaned_count buffers and
+ * there is more work to do.
+ *
+ * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
+ * buffers. Then bump tail at most one time. Grouping like this lets us avoid
+ * multiple tail writes per call.
*/
bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
{
@@ -462,8 +478,9 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
bi = &rx_ring->rx_buf[ntu];
do {
+ /* if we fail here, we have work remaining */
if (!ice_alloc_mapped_page(rx_ring, bi))
- goto no_bufs;
+ break;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
@@ -494,16 +511,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
if (rx_ring->next_to_use != ntu)
ice_release_rx_desc(rx_ring, ntu);
- return false;
-
-no_bufs:
- if (rx_ring->next_to_use != ntu)
- ice_release_rx_desc(rx_ring, ntu);
-
- /* make sure to come back via polling to try again after
- * allocation failure
- */
- return true;
+ return !!cleaned_count;
}
/**
@@ -599,6 +607,8 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int truesize = ICE_RXBUF_2048;
#endif
+ if (!size)
+ return;
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
rx_buf->page_offset, size, truesize);
@@ -654,6 +664,8 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
prefetchw(rx_buf->page);
*skb = rx_buf->skb;
+ if (!size)
+ return rx_buf;
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
rx_buf->page_offset, size,
@@ -737,8 +749,11 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- /* hand second half of page back to the ring */
+ if (!rx_buf)
+ return;
+
if (ice_can_reuse_rx_page(rx_buf)) {
+ /* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
rx_ring->rx_stats.page_reuse_count++;
} else {
@@ -990,7 +1005,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
- bool failure = false;
+ bool failure;
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
@@ -1002,13 +1017,6 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
u16 vlan_tag = 0;
u8 rx_ptype;
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= ICE_RX_BUF_WRITE) {
- failure = failure ||
- ice_alloc_rx_bufs(rx_ring, cleaned_count);
- cleaned_count = 0;
- }
-
/* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
@@ -1030,8 +1038,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
+ /* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
- /* allocate (if needed) and populate skb */
+
if (skb)
ice_add_rx_frag(rx_buf, skb, size);
else
@@ -1040,7 +1049,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
- rx_buf->pagecnt_bias++;
+ if (rx_buf)
+ rx_buf->pagecnt_bias++;
break;
}
@@ -1085,6 +1095,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
total_rx_pkts++;
}
+ /* return up to cleaned_count buffers to hardware */
+ failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
+
/* update queue and vector specific stats */
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.pkts += total_rx_pkts;
@@ -1351,6 +1364,23 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
struct ice_ring_container *rx = &q_vector->rx;
u32 itr_val;
+ /* when exiting WB_ON_ITR lets set a low ITR value and trigger
+ * interrupts to expire right away in case we have more work ready to go
+ * already
+ */
+ if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
+ itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
+ /* set target back to last user set value */
+ rx->target_itr = rx->itr_setting;
+ /* set current to what we just wrote and dynamic if needed */
+ rx->current_itr = ICE_WB_ON_ITR_USECS |
+ (rx->itr_setting & ICE_ITR_DYNAMIC);
+ /* allow normal interrupt flow to start */
+ q_vector->itr_countdown = 0;
+ return;
+ }
+
/* This will do nothing if dynamic updates are not enabled */
ice_update_itr(q_vector, tx);
ice_update_itr(q_vector, rx);
@@ -1396,6 +1426,41 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
}
/**
+ * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
+ * @vsi: pointer to the VSI structure
+ * @q_vector: q_vector to set WB_ON_ITR on
+ *
+ * We need to tell hardware to write-back completed descriptors even when
+ * interrupts are disabled. Descriptors will be written back on cache line
+ * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
+ * descriptors may not be written back if they don't fill a cache line until the
+ * next interrupt.
+ *
+ * This sets the write-back frequency to 2 microseconds as that is the minimum
+ * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
+ * make sure hardware knows we aren't meddling with the INTENA_M bit.
+ */
+static void
+ice_set_wb_on_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+ /* already in WB_ON_ITR mode no need to change it */
+ if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
+ return;
+
+ if (q_vector->num_ring_rx)
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
+ ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
+ ICE_RX_ITR));
+
+ if (q_vector->num_ring_tx)
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
+ ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
+ ICE_TX_ITR));
+
+ q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
+}
+
+/**
* ice_napi_poll - NAPI polling Rx/Tx cleanup routine
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
@@ -1409,10 +1474,9 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
struct ice_q_vector *q_vector =
container_of(napi, struct ice_q_vector, napi);
struct ice_vsi *vsi = q_vector->vsi;
- struct ice_pf *pf = vsi->back;
bool clean_complete = true;
- int budget_per_ring = 0;
struct ice_ring *ring;
+ int budget_per_ring;
int work_done = 0;
/* Since the actual Tx work is minimal, we can give the Tx a larger
@@ -1426,11 +1490,16 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
if (budget <= 0)
return budget;
- /* We attempt to distribute budget to each Rx queue fairly, but don't
- * allow the budget to go below 1 because that would exit polling early.
- */
- if (q_vector->num_ring_rx)
+ /* normally we have 1 Rx ring per q_vector */
+ if (unlikely(q_vector->num_ring_rx > 1))
+ /* We attempt to distribute budget to each Rx queue fairly, but
+ * don't allow the budget to go below 1 because that would exit
+ * polling early.
+ */
budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
+ else
+ /* Max of 1 Rx ring in this q_vector so give it the budget */
+ budget_per_ring = budget;
ice_for_each_ring(ring, q_vector->rx) {
int cleaned;
@@ -1450,8 +1519,9 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* poll us due to busy-polling
*/
if (likely(napi_complete_done(napi, work_done)))
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_update_ena_itr(vsi, q_vector);
+ ice_update_ena_itr(vsi, q_vector);
+ else
+ ice_set_wb_on_itr(vsi, q_vector);
return min_t(int, work_done, budget - 1);
}
@@ -1521,7 +1591,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
{
u64 td_offset, td_tag, td_cmd;
u16 i = tx_ring->next_to_use;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int data_len, size;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
@@ -1923,7 +1993,7 @@ static unsigned int ice_txd_use_count(unsigned int size)
*/
static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int count = 0, size = skb_headlen(skb);
@@ -1954,7 +2024,7 @@ static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
*/
static bool __ice_chk_linearize(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag, *stale;
+ const skb_frag_t *frag, *stale;
int nr_frags, sum;
/* no need to check if number of frags is less than 7 */
@@ -2036,6 +2106,7 @@ static netdev_tx_t
ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
{
struct ice_tx_offload_params offload = { 0 };
+ struct ice_vsi *vsi = tx_ring->vsi;
struct ice_tx_buf *first;
unsigned int count;
int tso, csum;
@@ -2083,7 +2154,15 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
if (csum < 0)
goto out_drop;
- if (tso || offload.cd_tunnel_params) {
+ /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
+ if (unlikely(skb->priority == TC_PRIO_CONTROL &&
+ vsi->type == ICE_VSI_PF &&
+ vsi->port_info->is_sw_lldp))
+ offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ ICE_TX_CTX_DESC_SWTCH_UPLINK <<
+ ICE_TXD_CTX_QW1_CMD_S);
+
+ if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
int i = tx_ring->next_to_use;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index ec76aba347b9..94a9280193e2 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -144,6 +144,19 @@ enum ice_rx_dtype {
#define ICE_DFLT_INTRL 0
#define ICE_MAX_INTRL 236
+#define ICE_WB_ON_ITR_USECS 2
+#define ICE_IN_WB_ON_ITR_MODE 255
+/* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
+ * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
+ * set the write-back latency to the usecs passed in.
+ */
+#define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \
+ ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \
+ GLINT_DYN_CTL_INTERVAL_M) | \
+ (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \
+ GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \
+ GLINT_DYN_CTL_WB_ON_ITR_M)
+
/* Legacy or Advanced Mode Queue */
#define ICE_TX_ADVANCED 0
#define ICE_TX_LEGACY 1
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 5d24b539648f..86637d99ee77 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -297,13 +297,6 @@ void ice_free_vfs(struct ice_pf *pf)
if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
/* disable VF qp mappings */
ice_dis_vf_mappings(&pf->vf[i]);
-
- /* Set this state so that assigned VF vectors can be
- * reclaimed by PF for reuse in ice_vsi_release(). No
- * need to clear this bit since pf->vf array is being
- * freed anyways after this for loop
- */
- set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states);
ice_free_vf_res(&pf->vf[i]);
}
}
@@ -389,12 +382,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
wr32(hw, PF_PCI_CIAA,
VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
- for (i = 0; i < 100; i++) {
+ for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
reg = rd32(hw, PF_PCI_CIAD);
- if ((reg & VF_TRANS_PENDING_M) != 0)
- dev_err(&pf->pdev->dev,
- "VF %d PCI transactions stuck\n", vf->vf_id);
- udelay(1);
+ /* no transactions pending so stop polling */
+ if ((reg & VF_TRANS_PENDING_M) == 0)
+ break;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d PCI transactions stuck\n", vf->vf_id);
+ udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
}
}
@@ -481,19 +477,20 @@ ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
}
/**
- * ice_calc_vf_first_vector_idx - Calculate absolute MSIX vector index in HW
+ * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
* @pf: pointer to PF structure
* @vf: pointer to VF that the first MSIX vector index is being calculated for
*
- * This returns the first MSIX vector index in HW that is used by this VF and
- * this will always be the OICR index in the AVF driver so any functionality
+ * This returns the first MSIX vector index in PF space that is used by this VF.
+ * This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
* using vf->first_vector_idx for queue configuration will have to increment by
* 1 to avoid meddling with the OICR index.
*/
static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
{
- return pf->hw.func_caps.common_cap.msix_vector_first_id +
- pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
+ return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
}
/**
@@ -543,7 +540,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
status = ice_add_mac(&pf->hw, &tmp_add_list);
if (status)
- dev_err(&pf->pdev->dev, "could not add mac filters\n");
+ dev_err(&pf->pdev->dev,
+ "could not add mac filters error %d\n", status);
+ else
+ vf->num_mac = 1;
/* Clear this bit after VF initialization since we shouldn't reclaim
* and reassign interrupts for synchronous or asynchronous VFR events.
@@ -551,7 +551,6 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
* expect vector assignment to be changed unless there is a request for
* more vectors.
*/
- clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states);
ice_alloc_vsi_res_exit:
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
return status;
@@ -567,11 +566,6 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
int tx_rx_queue_left;
int status;
- /* setup VF VSI and necessary resources */
- status = ice_alloc_vsi_res(vf);
- if (status)
- goto ice_alloc_vf_res_exit;
-
/* Update number of VF queues, in case VF had requested for queue
* changes
*/
@@ -581,6 +575,11 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
vf->num_req_qs != vf->num_vf_qs)
vf->num_vf_qs = vf->num_req_qs;
+ /* setup VF VSI and necessary resources */
+ status = ice_alloc_vsi_res(vf);
+ if (status)
+ goto ice_alloc_vf_res_exit;
+
if (vf->trusted)
set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else
@@ -605,27 +604,30 @@ ice_alloc_vf_res_exit:
*/
static void ice_ena_vf_mappings(struct ice_vf *vf)
{
+ int abs_vf_id, abs_first, abs_last;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int first, last, v;
struct ice_hw *hw;
- int abs_vf_id;
u32 reg;
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
first = vf->first_vector_idx;
last = (first + pf->num_vf_msix) - 1;
+ abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
+ abs_last = (abs_first + pf->num_vf_msix) - 1;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
/* VF Vector allocation */
- reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
- ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
+ reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
+ ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
VPINT_ALLOC_VALID_M);
wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
- reg = (((first << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) |
- ((last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
+ reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S)
+ & VPINT_ALLOC_PCI_FIRST_M) |
+ ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
VPINT_ALLOC_PCI_VALID_M);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
/* map the interrupts to its functions */
@@ -983,6 +985,47 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
}
/**
+ * ice_config_res_vfs - Finalize allocation of VFs resources in one go
+ * @pf: pointer to the PF structure
+ *
+ * This function is being called as last part of resetting all VFs, or when
+ * configuring VFs for the first time, where there is no resource to be freed
+ * Returns true if resources were properly allocated for all VFs, and false
+ * otherwise.
+ */
+static bool ice_config_res_vfs(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int v;
+
+ if (ice_check_avail_res(pf)) {
+ dev_err(&pf->pdev->dev,
+ "Cannot allocate VF resources, try with fewer number of VFs\n");
+ return false;
+ }
+
+ /* rearm global interrupts */
+ if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+
+ /* Finish resetting each VF and allocate resources */
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ struct ice_vf *vf = &pf->vf[v];
+
+ vf->num_vf_qs = pf->num_vf_qps;
+ dev_dbg(&pf->pdev->dev,
+ "VF-id %d has %d queues configured\n",
+ vf->vf_id, vf->num_vf_qs);
+ ice_cleanup_and_realloc_vf(vf);
+ }
+
+ ice_flush(hw);
+ clear_bit(__ICE_VF_DIS, pf->state);
+
+ return true;
+}
+
+/**
* ice_reset_all_vfs - reset all allocated VFs in one go
* @pf: pointer to the PF structure
* @is_vflr: true if VFLR was issued, false if not
@@ -1031,7 +1074,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
* finished resetting.
*/
for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
- usleep_range(10000, 20000);
/* Check each VF in sequence */
while (v < pf->num_alloc_vfs) {
@@ -1039,8 +1081,11 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
vf = &pf->vf[v];
reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
- if (!(reg & VPGEN_VFRSTAT_VFRD_M))
+ if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
+ /* only delay if the check failed */
+ usleep_range(10, 20);
break;
+ }
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
@@ -1054,7 +1099,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
*/
if (v < pf->num_alloc_vfs)
dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
- usleep_range(10000, 20000);
/* free VF resources to begin resetting the VSI state */
for (v = 0; v < pf->num_alloc_vfs; v++) {
@@ -1074,25 +1118,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
dev_err(&pf->pdev->dev,
"Failed to free MSIX resources used by SR-IOV\n");
- if (ice_check_avail_res(pf)) {
- dev_err(&pf->pdev->dev,
- "Cannot allocate VF resources, try with fewer number of VFs\n");
+ if (!ice_config_res_vfs(pf))
return false;
- }
-
- /* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
- vf = &pf->vf[v];
-
- vf->num_vf_qs = pf->num_vf_qps;
- dev_dbg(&pf->pdev->dev,
- "VF-id %d has %d queues configured\n",
- vf->vf_id, vf->num_vf_qs);
- ice_cleanup_and_realloc_vf(vf);
- }
-
- ice_flush(hw);
- clear_bit(__ICE_VF_DIS, pf->state);
return true;
}
@@ -1145,12 +1172,14 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
* poll the status register to make sure that the reset
* completed successfully.
*/
- usleep_range(10000, 20000);
reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
if (reg & VPGEN_VFRSTAT_VFRD_M) {
rsd = true;
break;
}
+
+ /* only sleep if the reset is not done */
+ usleep_range(10, 20);
}
/* Display a warning if VF didn't manage to reset in time, but need to
@@ -1160,8 +1189,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
vf->vf_id);
- usleep_range(10000, 20000);
-
/* disable promiscuous modes in case they were enabled
* ignore any error if disabling process failed
*/
@@ -1257,7 +1284,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
-
+ set_bit(__ICE_OICR_INTR_DIS, pf->state);
ice_flush(hw);
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
@@ -1283,19 +1310,16 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
vfs[i].spoofchk = true;
-
- /* Set this state so that PF driver does VF vector assignment */
- set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states);
}
pf->num_alloc_vfs = num_alloc_vfs;
- /* VF resources get allocated during reset */
- if (!ice_reset_all_vfs(pf, true)) {
+ /* VF resources get allocated with initialization */
+ if (!ice_config_res_vfs(pf)) {
ret = -EIO;
goto err_unroll_sriov;
}
- goto err_unroll_intr;
+ return ret;
err_unroll_sriov:
pf->vf = NULL;
@@ -1307,6 +1331,7 @@ err_pci_disable_sriov:
err_unroll_intr:
/* rearm interrupts here */
ice_irq_dynamic_ena(hw, NULL, NULL);
+ clear_bit(__ICE_OICR_INTR_DIS, pf->state);
return ret;
}
@@ -1490,10 +1515,10 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
- if (aq_ret) {
+ if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
dev_info(&pf->pdev->dev,
- "Unable to send the message to VF %d aq_err %d\n",
- vf->vf_id, pf->hw.mailboxq.sq_last_status);
+ "Unable to send the message to VF %d ret %d aq_err %d\n",
+ vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
return -EIO;
}
@@ -1712,18 +1737,18 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (!vsi) {
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1759,18 +1784,18 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (!vsi) {
+ if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1855,6 +1880,12 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
+ vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1910,6 +1941,12 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
+ vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1962,12 +1999,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
irqmap_info = (struct virtchnl_irq_map_info *)msg;
num_q_vectors_mapped = irqmap_info->num_vectors;
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
/* Check to make sure number of VF vectors mapped is not greater than
* number of VF vectors originally allocated, and check that
* there is actually at least a single VF queue vector mapped
@@ -1979,6 +2010,12 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
for (i = 0; i < num_q_vectors_mapped; i++) {
struct ice_q_vector *q_vector;
@@ -2070,10 +2107,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (!vsi)
- goto error_param;
-
if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
"VF-%d requesting more than supported number of queues: %d\n",
@@ -2082,11 +2115,18 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i];
if (qpi->txq.vsi_id != qci->vsi_id ||
qpi->rxq.vsi_id != qci->vsi_id ||
qpi->rxq.queue_id != qpi->txq.queue_id ||
+ qpi->txq.headwb_enabled ||
!ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2171,7 +2211,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
(struct virtchnl_ether_addr_list *)msg;
struct ice_pf *pf = vf->pf;
enum virtchnl_ops vc_op;
- LIST_HEAD(mac_list);
+ enum ice_status status;
struct ice_vsi *vsi;
int mac_count = 0;
int i;
@@ -2245,33 +2285,32 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
goto handle_mac_exit;
}
- /* get here if maddr is multicast or if VF can change MAC */
- if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ /* program the updated filter list */
+ status = ice_vsi_cfg_mac_fltr(vsi, maddr, set);
+ if (status == ICE_ERR_DOES_NOT_EXIST ||
+ status == ICE_ERR_ALREADY_EXISTS) {
+ dev_info(&pf->pdev->dev,
+ "can't %s MAC filters %pM for VF %d, error %d\n",
+ set ? "add" : "remove", maddr, vf->vf_id,
+ status);
+ } else if (status) {
+ dev_err(&pf->pdev->dev,
+ "can't %s MAC filters for VF %d, error %d\n",
+ set ? "add" : "remove", vf->vf_id, status);
+ v_ret = ice_err_to_virt_err(status);
goto handle_mac_exit;
}
+
mac_count++;
}
- /* program the updated filter list */
+ /* Track number of MAC filters programmed for the VF VSI */
if (set)
- v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list));
+ vf->num_mac += mac_count;
else
- v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list));
-
- if (v_ret) {
- dev_err(&pf->pdev->dev,
- "can't update MAC filters for VF %d, error %d\n",
- vf->vf_id, v_ret);
- } else {
- if (set)
- vf->num_mac += mac_count;
- else
- vf->num_mac -= mac_count;
- }
+ vf->num_mac -= mac_count;
handle_mac_exit:
- ice_free_fltr_list(&pf->pdev->dev, &mac_list);
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
}
@@ -2315,11 +2354,11 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg;
- int req_queues = vfres->num_queue_pairs;
+ u16 req_queues = vfres->num_queue_pairs;
struct ice_pf *pf = vf->pf;
- int max_allowed_vf_queues;
- int tx_rx_queue_left;
- int cur_queues;
+ u16 max_allowed_vf_queues;
+ u16 tx_rx_queue_left;
+ u16 cur_queues;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2327,29 +2366,30 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
}
cur_queues = vf->num_vf_qs;
- tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+ tx_rx_queue_left = min_t(u16, pf->q_left_tx, pf->q_left_rx);
max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
- if (req_queues <= 0) {
+ if (!req_queues) {
dev_err(&pf->pdev->dev,
- "VF %d tried to request %d queues. Ignoring.\n",
- vf->vf_id, req_queues);
+ "VF %d tried to request 0 queues. Ignoring.\n",
+ vf->vf_id);
} else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
"VF %d tried to request more than %d queues.\n",
vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
- } else if (req_queues - cur_queues > tx_rx_queue_left) {
+ } else if (req_queues > cur_queues &&
+ req_queues - cur_queues > tx_rx_queue_left) {
dev_warn(&pf->pdev->dev,
- "VF %d requested %d more queues, but only %d left.\n",
+ "VF %d requested %u more queues, but only %u left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
- vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues,
+ vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
ICE_MAX_BASE_QS_PER_VF);
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
ice_vc_dis_vf(vf);
dev_info(&pf->pdev->dev,
- "VF %d granted request of %d queues.\n",
+ "VF %d granted request of %u queues.\n",
vf->vf_id, req_queues);
return 0;
}
@@ -2731,20 +2771,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
err = -EPERM;
else
err = -EINVAL;
- goto error_handler;
- }
-
- /* Perform additional checks specific to RSS and Virtchnl */
- if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
- struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
-
- if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE)
- err = -EINVAL;
- } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
- struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
-
- if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE)
- err = -EINVAL;
}
error_handler:
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index c3ca522c245a..4d94853f119a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -22,6 +22,23 @@
#define VF_DEVICE_STATUS 0xAA
#define VF_TRANS_PENDING_M 0x20
+/* wait defines for polling PF_PCI_CIAD register status */
+#define ICE_PCI_CIAD_WAIT_COUNT 100
+#define ICE_PCI_CIAD_WAIT_DELAY_US 1
+
+/* VF resources default values and limitation */
+#define ICE_MAX_VF_COUNT 256
+#define ICE_MAX_QS_PER_VF 256
+#define ICE_MIN_QS_PER_VF 1
+#define ICE_DFLT_QS_PER_VF 4
+#define ICE_NONQ_VECS_VF 1
+#define ICE_MAX_SCATTER_QS_PER_VF 16
+#define ICE_MAX_BASE_QS_PER_VF 16
+#define ICE_MAX_INTR_PER_VF 65
+#define ICE_MAX_POLICY_INTR_PER_VF 33
+#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
+#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
+
/* Specific VF states */
enum ice_vf_states {
ICE_VF_STATE_INIT = 0,
@@ -30,11 +47,6 @@ enum ice_vf_states {
ICE_VF_STATE_DIS,
ICE_VF_STATE_MC_PROMISC,
ICE_VF_STATE_UC_PROMISC,
- /* state to indicate if PF needs to do vector assignment for VF.
- * This needs to be set during first time VF initialization or later
- * when VF asks for more Vectors through virtchnl OP.
- */
- ICE_VF_STATE_CFG_INTR,
ICE_VF_STATES_NBITS
};
@@ -50,7 +62,8 @@ struct ice_vf {
s16 vf_id; /* VF ID in the PF space */
u16 lan_vsi_idx; /* index into PF struct */
- int first_vector_idx; /* first vector index of this VF */
+ /* first vector index of this VF in the PF space */
+ int first_vector_idx;
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b4df3e319467..105b0624081a 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4731,8 +4731,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
- if (rx_ring->skb)
- dev_kfree_skb(rx_ring->skb);
+ dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
/* Free all the Rx ring sk_buffs */
@@ -5918,7 +5917,7 @@ static int igb_tx_map(struct igb_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t dma;
unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
@@ -6074,7 +6073,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(
+ &skb_shinfo(skb)->frags[f]));
if (igb_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
@@ -8879,8 +8879,7 @@ static int __maybe_unused igb_resume(struct device *dev)
static int __maybe_unused igb_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct igb_adapter *adapter = netdev_priv(netdev);
if (!igb_has_link(adapter))
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 34cd30d7162f..0f2b68f4bb0f 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2174,7 +2174,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
goto dma_error;
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
count++;
i++;
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 59258d791106..db289bcce21d 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -40,7 +40,7 @@ static s32 igc_reset_hw_base(struct igc_hw *hw)
ctrl = rd32(IGC_CTRL);
hw_dbg("Issuing a global reset to MAC\n");
- wr32(IGC_CTRL, ctrl | IGC_CTRL_RST);
+ wr32(IGC_CTRL, ctrl | IGC_CTRL_DEV_RST);
ret_val = igc_get_auto_rd_done(hw);
if (ret_val) {
@@ -209,6 +209,9 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
switch (hw->device_id) {
case IGC_DEV_ID_I225_LM:
case IGC_DEV_ID_I225_V:
+ case IGC_DEV_ID_I225_I:
+ case IGC_DEV_ID_I220_V:
+ case IGC_DEV_ID_I225_K:
mac->type = igc_i225;
break;
default:
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index fc0ccfe38a20..11b99acf4abe 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -54,7 +54,7 @@
#define IGC_ERR_SWFW_SYNC 13
/* Device Control */
-#define IGC_CTRL_RST 0x04000000 /* Global reset */
+#define IGC_CTRL_DEV_RST 0x20000000 /* Device reset */
#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */
#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 1039a224ac80..abb2d72911ff 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -18,6 +18,9 @@
#define IGC_DEV_ID_I225_LM 0x15F2
#define IGC_DEV_ID_I225_V 0x15F3
+#define IGC_DEV_ID_I225_I 0x15F8
+#define IGC_DEV_ID_I220_V 0x15F7
+#define IGC_DEV_ID_I225_K 0x3100
#define IGC_FUNC_0 0
@@ -151,16 +154,10 @@ struct igc_phy_info {
u16 autoneg_advertised;
u16 autoneg_mask;
- u16 cable_length;
- u16 max_cable_length;
- u16 min_cable_length;
- u16 pair_length[4];
u8 mdix;
- bool disable_polarity_correction;
bool is_mdix;
- bool polarity_correction;
bool reset_disable;
bool speed_downgraded;
bool autoneg_wait_to_complete;
@@ -190,12 +187,7 @@ struct igc_fc_info {
};
struct igc_dev_spec_base {
- bool global_device_reset;
- bool eee_disable;
bool clear_semaphore_once;
- bool module_plugged;
- u8 media_port;
- bool mas_capable;
};
struct igc_hw {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index aa9323e55406..251552855c40 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -36,6 +36,9 @@ static const struct igc_info *igc_info_tbl[] = {
static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
/* required last entry */
{0, }
};
@@ -349,8 +352,7 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
- if (rx_ring->skb)
- dev_kfree_skb(rx_ring->skb);
+ dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
/* Free all the Rx ring sk_buffs */
@@ -861,7 +863,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
struct igc_tx_buffer *tx_buffer;
union igc_adv_tx_desc *tx_desc;
u32 tx_flags = first->tx_flags;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
u16 i = tx_ring->next_to_use;
unsigned int data_len, size;
dma_addr_t dma;
@@ -1015,7 +1017,8 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(
+ &skb_shinfo(skb)->frags[f]));
if (igc_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index e5ac2d3fd816..0940a0da16f2 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1331,9 +1331,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[f];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
len = skb_frag_size(frag);
offset = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 50dfb02fa34c..171cdc552961 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -190,22 +190,12 @@ static const struct file_operations ixgbe_dbg_netdev_ops_fops = {
void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
{
const char *name = pci_name(adapter->pdev);
- struct dentry *pfile;
+
adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root);
- if (adapter->ixgbe_dbg_adapter) {
- pfile = debugfs_create_file("reg_ops", 0600,
- adapter->ixgbe_dbg_adapter, adapter,
- &ixgbe_dbg_reg_ops_fops);
- if (!pfile)
- e_dev_err("debugfs reg_ops for %s failed\n", name);
- pfile = debugfs_create_file("netdev_ops", 0600,
- adapter->ixgbe_dbg_adapter, adapter,
- &ixgbe_dbg_netdev_ops_fops);
- if (!pfile)
- e_dev_err("debugfs netdev_ops for %s failed\n", name);
- } else {
- e_dev_err("debugfs entry for %s failed\n", name);
- }
+ debugfs_create_file("reg_ops", 0600, adapter->ixgbe_dbg_adapter,
+ adapter, &ixgbe_dbg_reg_ops_fops);
+ debugfs_create_file("netdev_ops", 0600, adapter->ixgbe_dbg_adapter,
+ adapter, &ixgbe_dbg_netdev_ops_fops);
}
/**
@@ -224,8 +214,6 @@ void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
void ixgbe_dbg_init(void)
{
ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL);
- if (ixgbe_dbg_root == NULL)
- pr_err("init of debugfs failed\n");
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7882148abb43..17b7ae9f46ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1785,7 +1785,7 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
struct sk_buff *skb)
{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned char *va;
unsigned int pull_len;
@@ -1807,7 +1807,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
/* update all of the pointers */
skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
+ skb_frag_off_add(frag, pull_len);
skb->data_len -= pull_len;
skb->tail += pull_len;
}
@@ -1840,11 +1840,11 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
skb_headlen(skb),
DMA_FROM_DEVICE);
} else {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
- frag->page_offset,
+ skb_frag_off(frag),
skb_frag_size(frag),
DMA_FROM_DEVICE);
}
@@ -8183,7 +8183,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t dma;
unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
@@ -8602,7 +8602,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(
+ &skb_shinfo(skb)->frags[f]));
if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring->tx_stats.tx_busy++;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d2b41f9f87f8..8c011d4ce7a9 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3949,7 +3949,7 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct ixgbevf_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t dma;
unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
@@ -4134,8 +4134,11 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+
+ count += TXD_USE_COUNT(skb_frag_size(frag));
+ }
#else
count += skb_shinfo(skb)->nr_frags;
#endif
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 0b668357db4d..6d52cf5ce20e 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2030,23 +2030,22 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
int i, nr_frags = skb_shinfo(skb)->nr_frags;
int mask = jme->tx_ring_mask;
- const struct skb_frag_struct *frag;
u32 len;
int ret = 0;
for (i = 0 ; i < nr_frags ; ++i) {
- frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
ctxdesc = txdesc + ((idx + i + 2) & (mask));
ctxbi = txbi + ((idx + i + 2) & (mask));
ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
- skb_frag_page(frag),
- frag->page_offset, skb_frag_size(frag), hidma);
+ skb_frag_page(frag), skb_frag_off(frag),
+ skb_frag_size(frag), hidma);
if (ret) {
jme_drop_tx_map(jme, idx, i);
goto out;
}
-
}
len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
@@ -3193,8 +3192,7 @@ jme_shutdown(struct pci_dev *pdev)
static int
jme_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct jme_adapter *jme = netdev_priv(netdev);
if (!netif_running(netdev))
@@ -3236,8 +3234,7 @@ jme_suspend(struct device *dev)
static int
jme_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct jme_adapter *jme = netdev_priv(netdev);
if (!netif_running(netdev))
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index cda641ef89af..900affbdcc0e 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -458,17 +458,11 @@ static int xrx200_probe(struct platform_device *pdev)
}
priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
- if (priv->chan_rx.dma.irq < 0) {
- dev_err(dev, "failed to get RX IRQ, %i\n",
- priv->chan_rx.dma.irq);
+ if (priv->chan_rx.dma.irq < 0)
return -ENOENT;
- }
priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
- if (priv->chan_tx.dma.irq < 0) {
- dev_err(dev, "failed to get TX IRQ, %i\n",
- priv->chan_tx.dma.irq);
+ if (priv->chan_tx.dma.irq < 0)
return -ENOENT;
- }
/* get the clock */
priv->clk = devm_clk_get(dev, NULL);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 88ea5ac83c93..82ea55ae5053 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -659,7 +659,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
- if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
+ if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7)
return 1;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 895bfed26a8a..e49820675c8c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2350,10 +2350,10 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- void *addr = page_address(frag->page.p) + frag->page_offset;
+ void *addr = skb_frag_address(frag);
tx_desc = mvneta_txq_next_desc_get(txq);
- tx_desc->data_size = frag->size;
+ tx_desc->data_size = skb_frag_size(frag);
tx_desc->buf_phys_addr =
dma_map_single(pp->dev->dev.parent, addr,
@@ -4469,7 +4469,6 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
/* Device initialization routine */
static int mvneta_probe(struct platform_device *pdev)
{
- struct resource *res;
struct device_node *dn = pdev->dev.of_node;
struct device_node *bm_node;
struct mvneta_port *pp;
@@ -4553,8 +4552,7 @@ static int mvneta_probe(struct platform_device *pdev)
if (!IS_ERR(pp->clk_bus))
clk_prepare_enable(pp->clk_bus);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pp->base = devm_ioremap_resource(&pdev->dev, res);
+ pp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pp->base)) {
err = PTR_ERR(pp->base);
goto err_clk;
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 82ee2bcca6fd..46c942ef2287 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -411,15 +411,13 @@ static int mvneta_bm_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct mvneta_bm *priv;
- struct resource *res;
int err;
priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->reg_base))
return PTR_ERR(priv->reg_base);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 4d9564ba68f6..ee3bab508ee8 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -829,9 +829,8 @@ struct mvpp2_pcpu_stats {
/* Per-CPU port control */
struct mvpp2_port_pcpu {
struct hrtimer tx_done_timer;
+ struct net_device *dev;
bool timer_scheduled;
- /* Tasklet for egress finalization */
- struct tasklet_struct tx_done_tasklet;
};
struct mvpp2_queue_vector {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 274fb07362cb..4a3baa7e0142 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -452,8 +452,6 @@ static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
struct dentry *port_dir;
port_dir = debugfs_create_dir(port->dev->name, parent);
- if (IS_ERR(port_dir))
- return PTR_ERR(port_dir);
port_entry = &port->priv->dbgfs_entries->port_flow_entries[port->id];
@@ -480,8 +478,6 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
sprintf(flow_entry_name, "%02d", flow);
flow_entry_dir = debugfs_create_dir(flow_entry_name, parent);
- if (!flow_entry_dir)
- return -ENOMEM;
entry = &priv->dbgfs_entries->flow_entries[flow];
@@ -514,8 +510,6 @@ static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv)
int i, ret;
flow_dir = debugfs_create_dir("flows", parent);
- if (!flow_dir)
- return -ENOMEM;
for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i);
@@ -539,8 +533,6 @@ static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
sprintf(prs_entry_name, "%03d", tid);
prs_entry_dir = debugfs_create_dir(prs_entry_name, parent);
- if (!prs_entry_dir)
- return -ENOMEM;
entry = &priv->dbgfs_entries->prs_entries[tid];
@@ -578,8 +570,6 @@ static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
int i, ret;
prs_dir = debugfs_create_dir("parser", parent);
- if (!prs_dir)
- return -ENOMEM;
for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i);
@@ -688,8 +678,6 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent,
struct dentry *port_dir;
port_dir = debugfs_create_dir(port->dev->name, parent);
- if (IS_ERR(port_dir))
- return PTR_ERR(port_dir);
debugfs_create_file("parser_entries", 0444, port_dir, port,
&mvpp2_dbgfs_port_parser_fops);
@@ -716,15 +704,10 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
int ret, i;
mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
- if (!mvpp2_root) {
+ if (!mvpp2_root)
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
- if (IS_ERR(mvpp2_root))
- return;
- }
mvpp2_dir = debugfs_create_dir(name, mvpp2_root);
- if (IS_ERR(mvpp2_dir))
- return;
priv->dbgfs_dir = mvpp2_dir;
priv->dbgfs_entries = kzalloc(sizeof(*priv->dbgfs_entries), GFP_KERNEL);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index ccdd47f3b8fb..12e799e99803 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2651,31 +2651,21 @@ handled:
return IRQ_HANDLED;
}
-static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
-{
- ktime_t interval;
-
- if (!port_pcpu->timer_scheduled) {
- port_pcpu->timer_scheduled = true;
- interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
- hrtimer_start(&port_pcpu->tx_done_timer, interval,
- HRTIMER_MODE_REL_PINNED);
- }
-}
-
-static void mvpp2_tx_proc_cb(unsigned long data)
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
{
- struct net_device *dev = (struct net_device *)data;
- struct mvpp2_port *port = netdev_priv(dev);
+ struct net_device *dev;
+ struct mvpp2_port *port;
struct mvpp2_port_pcpu *port_pcpu;
unsigned int tx_todo, cause;
- port_pcpu = per_cpu_ptr(port->pcpu,
- mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
+ port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
+ dev = port_pcpu->dev;
if (!netif_running(dev))
- return;
+ return HRTIMER_NORESTART;
+
port_pcpu->timer_scheduled = false;
+ port = netdev_priv(dev);
/* Process all the Tx queues */
cause = (1 << port->ntxqs) - 1;
@@ -2683,18 +2673,13 @@ static void mvpp2_tx_proc_cb(unsigned long data)
mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
/* Set the timer in case not all the packets were processed */
- if (tx_todo)
- mvpp2_timer_set(port_pcpu);
-}
-
-static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
-{
- struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
- struct mvpp2_port_pcpu,
- tx_done_timer);
-
- tasklet_schedule(&port_pcpu->tx_done_tasklet);
+ if (tx_todo && !port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ hrtimer_forward_now(&port_pcpu->tx_done_timer,
+ MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+ return HRTIMER_RESTART;
+ }
return HRTIMER_NORESTART;
}
@@ -2923,14 +2908,15 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- void *addr = page_address(frag->page.p) + frag->page_offset;
+ void *addr = skb_frag_address(frag);
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
- mvpp2_txdesc_size_set(port, tx_desc, frag->size);
+ mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
mvpp2_txq_desc_put(txq);
goto cleanup;
@@ -3181,7 +3167,12 @@ out:
txq_pcpu->count > 0) {
struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
- mvpp2_timer_set(port_pcpu);
+ if (!port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ hrtimer_start(&port_pcpu->tx_done_timer,
+ MVPP2_TXDONE_HRTIMER_PERIOD_NS,
+ HRTIMER_MODE_REL_PINNED_SOFT);
+ }
}
if (test_bit(thread, &port->priv->lock_map))
@@ -3618,7 +3609,6 @@ static int mvpp2_stop(struct net_device *dev)
hrtimer_cancel(&port_pcpu->tx_done_timer);
port_pcpu->timer_scheduled = false;
- tasklet_kill(&port_pcpu->tx_done_tasklet);
}
}
mvpp2_cleanup_rxqs(port);
@@ -5010,7 +5000,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
struct device_node *port_node = to_of_node(port_fwnode);
netdev_features_t features;
struct net_device *dev;
- struct resource *res;
struct phylink *phylink;
char *mac_from = "";
unsigned int ntxqs, nrxqs, thread;
@@ -5114,8 +5103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->comphy = comphy;
if (priv->hw_version == MVPP21) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
- port->base = devm_ioremap_resource(&pdev->dev, res);
+ port->base = devm_platform_ioremap_resource(pdev, 2 + id);
if (IS_ERR(port->base)) {
err = PTR_ERR(port->base);
goto err_free_irq;
@@ -5184,13 +5172,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
+ HRTIMER_MODE_REL_PINNED_SOFT);
port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
port_pcpu->timer_scheduled = false;
-
- tasklet_init(&port_pcpu->tx_done_tasklet,
- mvpp2_tx_proc_cb,
- (unsigned long)dev);
+ port_pcpu->dev = dev;
}
}
@@ -5544,14 +5529,12 @@ static int mvpp2_probe(struct platform_device *pdev)
if (priv->hw_version == MVPP21)
queue_mode = MVPP2_QDIST_SINGLE_MODE;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
if (priv->hw_version == MVPP21) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
+ priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->lms_base))
return PTR_ERR(priv->lms_base);
} else {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 3aa998797bc1..51b77c2de400 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1425,8 +1425,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->dev = dev;
pep->clk = clk;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pep->base = devm_ioremap_resource(&pdev->dev, res);
+ pep->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pep->base)) {
err = -ENOMEM;
goto err_netdev;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 9ac854c2b371..0a2ec387a482 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3731,7 +3731,6 @@ static int skge_device_event(struct notifier_block *unused,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct skge_port *skge;
- struct dentry *d;
if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug)
goto done;
@@ -3739,33 +3738,20 @@ static int skge_device_event(struct notifier_block *unused,
skge = netdev_priv(dev);
switch (event) {
case NETDEV_CHANGENAME:
- if (skge->debugfs) {
- d = debugfs_rename(skge_debug, skge->debugfs,
- skge_debug, dev->name);
- if (d)
- skge->debugfs = d;
- else {
- netdev_info(dev, "rename failed\n");
- debugfs_remove(skge->debugfs);
- }
- }
+ if (skge->debugfs)
+ skge->debugfs = debugfs_rename(skge_debug,
+ skge->debugfs,
+ skge_debug, dev->name);
break;
case NETDEV_GOING_DOWN:
- if (skge->debugfs) {
- debugfs_remove(skge->debugfs);
- skge->debugfs = NULL;
- }
+ debugfs_remove(skge->debugfs);
+ skge->debugfs = NULL;
break;
case NETDEV_UP:
- d = debugfs_create_file(dev->name, 0444,
- skge_debug, dev,
- &skge_debug_fops);
- if (!d || IS_ERR(d))
- netdev_info(dev, "debugfs create failed\n");
- else
- skge->debugfs = d;
+ skge->debugfs = debugfs_create_file(dev->name, 0444, skge_debug,
+ dev, &skge_debug_fops);
break;
}
@@ -3780,15 +3766,8 @@ static struct notifier_block skge_notifier = {
static __init void skge_debug_init(void)
{
- struct dentry *ent;
+ skge_debug = debugfs_create_dir("skge", NULL);
- ent = debugfs_create_dir("skge", NULL);
- if (!ent || IS_ERR(ent)) {
- pr_info("debugfs create directory failed\n");
- return;
- }
-
- skge_debug = ent;
register_netdevice_notifier(&skge_notifier);
}
@@ -4078,8 +4057,7 @@ static void skge_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int skge_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct skge_hw *hw = pci_get_drvdata(pdev);
+ struct skge_hw *hw = dev_get_drvdata(dev);
int i;
if (!hw)
@@ -4103,8 +4081,7 @@ static int skge_suspend(struct device *dev)
static int skge_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct skge_hw *hw = pci_get_drvdata(pdev);
+ struct skge_hw *hw = dev_get_drvdata(dev);
int i, err;
if (!hw)
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index a01c75ede871..c2e00bb587cd 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5167,8 +5167,7 @@ static void sky2_remove(struct pci_dev *pdev)
static int sky2_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct sky2_hw *hw = pci_get_drvdata(pdev);
+ struct sky2_hw *hw = dev_get_drvdata(dev);
int i;
if (!hw)
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 1f7fff81f24d..b76cf2e1c9dc 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_VENDOR_MEDIATEK
bool "MediaTek ethernet driver"
- depends on ARCH_MEDIATEK || SOC_MT7621
+ depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620
---help---
If you have a Mediatek SoC with ethernet, say Y.
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index 7f05880cf9ef..28960e4c4e43 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -315,6 +315,10 @@ int mtk_setup_hw_path(struct mtk_eth *eth, int mac_id, int phymode)
{
int err;
+ /* No mux'ing for MT7628/88 */
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ return 0;
+
switch (phymode) {
case PHY_INTERFACE_MODE_TRGMII:
case PHY_INTERFACE_MODE_RGMII_TXID:
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index c39d7f4ab1d4..8ddbb8dcf032 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -323,11 +323,14 @@ static int mtk_phy_connect(struct net_device *dev)
goto err_phy;
}
- /* put the gmac into the right mode */
- regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
- val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
- val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
- regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
+ /* No MT7628/88 support for now */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ /* put the gmac into the right mode */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+ val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
+ val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
+ regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
+ }
/* couple phydev to net_device */
if (mtk_phy_connect_node(eth, mac, np))
@@ -395,8 +398,8 @@ static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags);
- val = mtk_r32(eth, MTK_QDMA_INT_MASK);
- mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
+ val = mtk_r32(eth, eth->tx_int_mask_reg);
+ mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
}
@@ -406,8 +409,8 @@ static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags);
- val = mtk_r32(eth, MTK_QDMA_INT_MASK);
- mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
+ val = mtk_r32(eth, eth->tx_int_mask_reg);
+ mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
}
@@ -437,6 +440,7 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
{
int ret = eth_mac_addr(dev, p);
struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
const char *macaddr = dev->dev_addr;
if (ret)
@@ -446,11 +450,19 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
return -EBUSY;
spin_lock_bh(&mac->hw->page_lock);
- mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
- MTK_GDMA_MAC_ADRH(mac->id));
- mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
- (macaddr[4] << 8) | macaddr[5],
- MTK_GDMA_MAC_ADRL(mac->id));
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
+ MT7628_SDM_MAC_ADRH);
+ mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
+ (macaddr[4] << 8) | macaddr[5],
+ MT7628_SDM_MAC_ADRL);
+ } else {
+ mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
+ MTK_GDMA_MAC_ADRH(mac->id));
+ mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
+ (macaddr[4] << 8) | macaddr[5],
+ MTK_GDMA_MAC_ADRL(mac->id));
+ }
spin_unlock_bh(&mac->hw->page_lock);
return 0;
@@ -626,19 +638,47 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
return &ring->buf[idx];
}
+static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
+ struct mtk_tx_dma *dma)
+{
+ return ring->dma_pdma - ring->dma + dma;
+}
+
+static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
+{
+ return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
+}
+
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
{
- if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
- dma_unmap_single(eth->dev,
- dma_unmap_addr(tx_buf, dma_addr0),
- dma_unmap_len(tx_buf, dma_len0),
- DMA_TO_DEVICE);
- } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
- dma_unmap_page(eth->dev,
- dma_unmap_addr(tx_buf, dma_addr0),
- dma_unmap_len(tx_buf, dma_len0),
- DMA_TO_DEVICE);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
+ dma_unmap_single(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
+ dma_unmap_page(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ }
+ } else {
+ if (dma_unmap_len(tx_buf, dma_len0)) {
+ dma_unmap_page(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ }
+
+ if (dma_unmap_len(tx_buf, dma_len1)) {
+ dma_unmap_page(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr1),
+ dma_unmap_len(tx_buf, dma_len1),
+ DMA_TO_DEVICE);
+ }
}
+
tx_buf->flags = 0;
if (tx_buf->skb &&
(tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
@@ -646,19 +686,45 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
tx_buf->skb = NULL;
}
+static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
+ struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
+ size_t size, int idx)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, size);
+ } else {
+ if (idx & 1) {
+ txd->txd3 = mapped_addr;
+ txd->txd2 |= TX_DMA_PLEN1(size);
+ dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len1, size);
+ } else {
+ tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ txd->txd1 = mapped_addr;
+ txd->txd2 = TX_DMA_PLEN0(size);
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, size);
+ }
+ }
+}
+
static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
int tx_num, struct mtk_tx_ring *ring, bool gso)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
struct mtk_tx_dma *itxd, *txd;
+ struct mtk_tx_dma *itxd_pdma, *txd_pdma;
struct mtk_tx_buf *itx_buf, *tx_buf;
dma_addr_t mapped_addr;
unsigned int nr_frags;
int i, n_desc = 1;
u32 txd4 = 0, fport;
+ int k = 0;
itxd = ring->next_free;
+ itxd_pdma = qdma_to_pdma(ring, itxd);
if (itxd == ring->last_free)
return -ENOMEM;
@@ -689,26 +755,37 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
- dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
+ setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
+ k++);
/* TX SG offload */
txd = itxd;
+ txd_pdma = qdma_to_pdma(ring, txd);
nr_frags = skb_shinfo(skb)->nr_frags;
+
for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
unsigned int offset = 0;
int frag_size = skb_frag_size(frag);
while (frag_size) {
bool last_frag = false;
unsigned int frag_map_size;
+ bool new_desc = true;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
+ (i & 0x1)) {
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ txd_pdma = qdma_to_pdma(ring, txd);
+ if (txd == ring->last_free)
+ goto err_dma;
+
+ n_desc++;
+ } else {
+ new_desc = false;
+ }
- txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
- if (txd == ring->last_free)
- goto err_dma;
- n_desc++;
frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
frag_map_size,
@@ -727,14 +804,16 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(txd->txd4, fport);
tx_buf = mtk_desc_to_tx_buf(ring, txd);
- memset(tx_buf, 0, sizeof(*tx_buf));
+ if (new_desc)
+ memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
- dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
+ setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
+ frag_map_size, k++);
+
frag_size -= frag_map_size;
offset += frag_map_size;
}
@@ -746,6 +825,12 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(itxd->txd4, txd4);
WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
(!nr_frags * TX_DMA_LS0)));
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (k & 0x1)
+ txd_pdma->txd2 |= TX_DMA_LS0;
+ else
+ txd_pdma->txd2 |= TX_DMA_LS1;
+ }
netdev_sent_queue(dev, skb->len);
skb_tx_timestamp(skb);
@@ -758,9 +843,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
*/
wmb();
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
- !netdev_xmit_more())
- mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
+ !netdev_xmit_more())
+ mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
+ } else {
+ int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
+ ring->dma_size);
+ mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
+ }
return 0;
@@ -772,7 +863,11 @@ err_dma:
mtk_tx_unmap(eth, tx_buf);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
+
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
+ itxd_pdma = qdma_to_pdma(ring, itxd);
} while (itxd != txd);
return -ENOMEM;
@@ -781,13 +876,14 @@ err_dma:
static inline int mtk_cal_txd_req(struct sk_buff *skb)
{
int i, nfrags;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
nfrags = 1;
if (skb_is_gso(skb)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
- nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
+ nfrags += DIV_ROUND_UP(skb_frag_size(frag),
+ MTK_TX_DMA_BUF_LEN);
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
@@ -902,7 +998,7 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
ring = &eth->rx_ring[i];
- idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
@@ -945,13 +1041,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct net_device *netdev;
unsigned int pktlen;
dma_addr_t dma_addr;
- int mac = 0;
+ int mac;
ring = mtk_get_rx_ring(eth);
if (unlikely(!ring))
goto rx_done;
- idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
rxd = &ring->dma[idx];
data = ring->data[idx];
@@ -960,9 +1056,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
break;
/* find out which mac the packet come from. values start at 1 */
- mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
- RX_DMA_FPORT_MASK;
- mac--;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ mac = 0;
+ } else {
+ mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
+ RX_DMA_FPORT_MASK;
+ mac--;
+ }
if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
!eth->netdev[mac]))
@@ -980,7 +1080,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto release_desc;
}
dma_addr = dma_map_single(eth->dev,
- new_data + NET_SKB_PAD,
+ new_data + NET_SKB_PAD +
+ eth->ip_align,
ring->buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
@@ -1003,7 +1104,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
skb_put(skb, pktlen);
- if (trxd.rxd4 & RX_DMA_L4_VALID)
+ if (trxd.rxd4 & eth->rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -1020,7 +1121,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
rxd->rxd1 = (unsigned int)dma_addr;
release_desc:
- rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ rxd->rxd2 = RX_DMA_LSO;
+ else
+ rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
ring->calc_idx = idx;
@@ -1039,19 +1143,14 @@ rx_done:
return done;
}
-static int mtk_poll_tx(struct mtk_eth *eth, int budget)
+static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
+ unsigned int *done, unsigned int *bytes)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
struct mtk_tx_dma *desc;
struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
- unsigned int done[MTK_MAX_DEVS];
- unsigned int bytes[MTK_MAX_DEVS];
u32 cpu, dma;
- int total = 0, i;
-
- memset(done, 0, sizeof(done));
- memset(bytes, 0, sizeof(bytes));
cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
@@ -1089,6 +1188,62 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
+ return budget;
+}
+
+static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
+ unsigned int *done, unsigned int *bytes)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_tx_dma *desc;
+ struct sk_buff *skb;
+ struct mtk_tx_buf *tx_buf;
+ u32 cpu, dma;
+
+ cpu = ring->cpu_idx;
+ dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
+
+ while ((cpu != dma) && budget) {
+ tx_buf = &ring->buf[cpu];
+ skb = tx_buf->skb;
+ if (!skb)
+ break;
+
+ if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
+ bytes[0] += skb->len;
+ done[0]++;
+ budget--;
+ }
+
+ mtk_tx_unmap(eth, tx_buf);
+
+ desc = &ring->dma[cpu];
+ ring->last_free = desc;
+ atomic_inc(&ring->free_count);
+
+ cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
+ }
+
+ ring->cpu_idx = cpu;
+
+ return budget;
+}
+
+static int mtk_poll_tx(struct mtk_eth *eth, int budget)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ unsigned int done[MTK_MAX_DEVS];
+ unsigned int bytes[MTK_MAX_DEVS];
+ int total = 0, i;
+
+ memset(done, 0, sizeof(done));
+ memset(bytes, 0, sizeof(bytes));
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
+ else
+ budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
+
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i] || !done[i])
continue;
@@ -1120,13 +1275,14 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
u32 status, mask;
int tx_done = 0;
- mtk_handle_status_irq(eth);
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_handle_status_irq(eth);
+ mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
tx_done = mtk_poll_tx(eth, budget);
if (unlikely(netif_msg_intr(eth))) {
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ status = mtk_r32(eth, eth->tx_int_status_reg);
+ mask = mtk_r32(eth, eth->tx_int_mask_reg);
dev_info(eth->dev,
"done tx %d, intr 0x%08x/0x%x\n",
tx_done, status, mask);
@@ -1135,7 +1291,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
if (tx_done == budget)
return budget;
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
+ status = mtk_r32(eth, eth->tx_int_status_reg);
if (status & MTK_TX_DONE_INT)
return budget;
@@ -1202,6 +1358,24 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
}
+ /* On MT7688 (PDMA only) this driver uses the ring->dma structs
+ * only as the framework. The real HW descriptors are the PDMA
+ * descriptors in ring->dma_pdma.
+ */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ &ring->phys_pdma,
+ GFP_ATOMIC);
+ if (!ring->dma_pdma)
+ goto no_tx_mem;
+
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
+ ring->dma_pdma[i].txd4 = 0;
+ }
+ }
+
+ ring->dma_size = MTK_DMA_SIZE;
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring->next_free = &ring->dma[0];
ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
@@ -1212,15 +1386,23 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
*/
wmb();
- mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
- mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
- mtk_w32(eth,
- ring->phys + ((MTK_DMA_SIZE - 1) * sz),
- MTK_QTX_CRX_PTR);
- mtk_w32(eth,
- ring->phys + ((MTK_DMA_SIZE - 1) * sz),
- MTK_QTX_DRX_PTR);
- mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
+ mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
+ mtk_w32(eth,
+ ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+ MTK_QTX_CRX_PTR);
+ mtk_w32(eth,
+ ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+ MTK_QTX_DRX_PTR);
+ mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
+ MTK_QTX_CFG(0));
+ } else {
+ mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
+ mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
+ mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
+ mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
+ }
return 0;
@@ -1247,6 +1429,14 @@ static void mtk_tx_clean(struct mtk_eth *eth)
ring->phys);
ring->dma = NULL;
}
+
+ if (ring->dma_pdma) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
+ ring->dma_pdma,
+ ring->phys_pdma);
+ ring->dma_pdma = NULL;
+ }
}
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
@@ -1294,14 +1484,17 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
for (i = 0; i < rx_dma_size; i++) {
dma_addr_t dma_addr = dma_map_single(eth->dev,
- ring->data[i] + NET_SKB_PAD,
+ ring->data[i] + NET_SKB_PAD + eth->ip_align,
ring->buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
return -ENOMEM;
ring->dma[i].rxd1 = (unsigned int)dma_addr;
- ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ ring->dma[i].rxd2 = RX_DMA_LSO;
+ else
+ ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
}
ring->dma_size = rx_dma_size;
ring->calc_idx_update = false;
@@ -1617,9 +1810,16 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth)
unsigned long t_start = jiffies;
while (1) {
- if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
- (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
- return 0;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
+ (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
+ return 0;
+ } else {
+ if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
+ (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
+ return 0;
+ }
+
if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
break;
}
@@ -1636,20 +1836,24 @@ static int mtk_dma_init(struct mtk_eth *eth)
if (mtk_dma_busy_wait(eth))
return -EBUSY;
- /* QDMA needs scratch memory for internal reordering of the
- * descriptors
- */
- err = mtk_init_fq_dma(eth);
- if (err)
- return err;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ /* QDMA needs scratch memory for internal reordering of the
+ * descriptors
+ */
+ err = mtk_init_fq_dma(eth);
+ if (err)
+ return err;
+ }
err = mtk_tx_alloc(eth);
if (err)
return err;
- err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
- if (err)
- return err;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
+ if (err)
+ return err;
+ }
err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
if (err)
@@ -1666,10 +1870,14 @@ static int mtk_dma_init(struct mtk_eth *eth)
return err;
}
- /* Enable random early drop and set drop threshold automatically */
- mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
- MTK_QDMA_FC_THRES);
- mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ /* Enable random early drop and set drop threshold
+ * automatically
+ */
+ mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
+ FC_THRES_MIN, MTK_QDMA_FC_THRES);
+ mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
+ }
return 0;
}
@@ -1740,13 +1948,15 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
static irqreturn_t mtk_handle_irq(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
+ u32 status;
+ status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
mtk_handle_irq_rx(irq, _eth);
}
- if (mtk_r32(eth, MTK_QDMA_INT_MASK) & MTK_TX_DONE_INT) {
- if (mtk_r32(eth, MTK_QMTK_INT_STATUS) & MTK_TX_DONE_INT)
+ if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
+ if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
mtk_handle_irq_tx(irq, _eth);
}
@@ -1778,17 +1988,23 @@ static int mtk_start_dma(struct mtk_eth *eth)
return err;
}
- mtk_w32(eth,
- MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
- MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
- MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
- MTK_RX_BT_32DWORDS,
- MTK_QDMA_GLO_CFG);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ mtk_w32(eth,
+ MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
+ MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
+ MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
+ MTK_RX_BT_32DWORDS,
+ MTK_QDMA_GLO_CFG);
- mtk_w32(eth,
- MTK_RX_DMA_EN | rx_2b_offset |
- MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
- MTK_PDMA_GLO_CFG);
+ mtk_w32(eth,
+ MTK_RX_DMA_EN | rx_2b_offset |
+ MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
+ MTK_PDMA_GLO_CFG);
+ } else {
+ mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
+ MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
+ MTK_PDMA_GLO_CFG);
+ }
return 0;
}
@@ -1816,7 +2032,6 @@ static int mtk_open(struct net_device *dev)
phy_start(dev->phydev);
netif_start_queue(dev);
-
return 0;
}
@@ -1860,7 +2075,8 @@ static int mtk_stop(struct net_device *dev)
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
- mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
mtk_dma_free(eth);
@@ -1922,6 +2138,24 @@ static int mtk_hw_init(struct mtk_eth *eth)
if (ret)
goto err_disable_pm;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ ret = device_reset(eth->dev);
+ if (ret) {
+ dev_err(eth->dev, "MAC reset failed!\n");
+ goto err_disable_pm;
+ }
+
+ /* enable interrupt delay for RX */
+ mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
+
+ /* disable delay and normal interrupt */
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+
+ return 0;
+ }
+
+ /* Non-MT7628 handling... */
ethsys_reset(eth, RSTCTRL_FE);
ethsys_reset(eth, RSTCTRL_PPE);
@@ -2425,13 +2659,13 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
- eth->netdev[id]->hw_features = MTK_HW_FEATURES;
+ eth->netdev[id]->hw_features = eth->soc->hw_features;
if (eth->hwlro)
eth->netdev[id]->hw_features |= NETIF_F_LRO;
- eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
+ eth->netdev[id]->vlan_features = eth->soc->hw_features &
~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
- eth->netdev[id]->features |= MTK_HW_FEATURES;
+ eth->netdev[id]->features |= eth->soc->hw_features;
eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
eth->netdev[id]->irq = eth->irq[0];
@@ -2446,7 +2680,6 @@ free_netdev:
static int mtk_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct device_node *mac_np;
struct mtk_eth *eth;
int err;
@@ -2459,19 +2692,36 @@ static int mtk_probe(struct platform_device *pdev)
eth->soc = of_device_get_match_data(&pdev->dev);
eth->dev = &pdev->dev;
- eth->base = devm_ioremap_resource(&pdev->dev, res);
+ eth->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
+ eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
+ } else {
+ eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
+ eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
+ eth->ip_align = NET_IP_ALIGN;
+ } else {
+ eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
+ }
+
spin_lock_init(&eth->page_lock);
spin_lock_init(&eth->tx_irq_lock);
spin_lock_init(&eth->rx_irq_lock);
- eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "mediatek,ethsys");
- if (IS_ERR(eth->ethsys)) {
- dev_err(&pdev->dev, "no ethsys regmap found\n");
- return PTR_ERR(eth->ethsys);
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,ethsys");
+ if (IS_ERR(eth->ethsys)) {
+ dev_err(&pdev->dev, "no ethsys regmap found\n");
+ return PTR_ERR(eth->ethsys);
+ }
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
@@ -2572,9 +2822,12 @@ static int mtk_probe(struct platform_device *pdev)
if (err)
goto err_free_dev;
- err = mtk_mdio_init(eth);
- if (err)
- goto err_free_dev;
+ /* No MT7628/88 support yet */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ err = mtk_mdio_init(eth);
+ if (err)
+ goto err_free_dev;
+ }
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
@@ -2637,12 +2890,14 @@ static int mtk_remove(struct platform_device *pdev)
static const struct mtk_soc_data mt2701_data = {
.caps = MT7623_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
static const struct mtk_soc_data mt7621_data = {
.caps = MT7621_CAPS,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
};
@@ -2650,12 +2905,14 @@ static const struct mtk_soc_data mt7621_data = {
static const struct mtk_soc_data mt7622_data = {
.ana_rgc3 = 0x2028,
.caps = MT7622_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
};
static const struct mtk_soc_data mt7623_data = {
.caps = MT7623_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
@@ -2663,16 +2920,25 @@ static const struct mtk_soc_data mt7623_data = {
static const struct mtk_soc_data mt7629_data = {
.ana_rgc3 = 0x128,
.caps = MT7629_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false,
};
+static const struct mtk_soc_data rt5350_data = {
+ .caps = MT7628_CAPS,
+ .hw_features = MTK_HW_FEATURES_MT7628,
+ .required_clks = MT7628_CLKS_BITMAP,
+ .required_pctl = false,
+};
+
const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
+ { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
{},
};
MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index bab94f763e2c..cc1466ae0926 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -39,7 +39,8 @@
NETIF_F_SG | NETIF_F_TSO | \
NETIF_F_TSO6 | \
NETIF_F_IPV6_CSUM)
-#define NEXT_RX_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
+#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
#define MTK_MAX_RX_RING_NUM 4
#define MTK_HW_LRO_DMA_SIZE 8
@@ -118,6 +119,7 @@
/* PDMA Global Configuration Register */
#define MTK_PDMA_GLO_CFG 0xa04
#define MTK_MULTI_EN BIT(10)
+#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
/* PDMA Reset Index Register */
#define MTK_PDMA_RST_IDX 0xa08
@@ -212,7 +214,7 @@
#define FC_THRES_MIN 0x4444
/* QDMA Interrupt Status Register */
-#define MTK_QMTK_INT_STATUS 0x1A18
+#define MTK_QDMA_INT_STATUS 0x1A18
#define MTK_RX_DONE_DLY BIT(30)
#define MTK_RX_DONE_INT3 BIT(19)
#define MTK_RX_DONE_INT2 BIT(18)
@@ -276,11 +278,18 @@
#define TX_DMA_OWNER_CPU BIT(31)
#define TX_DMA_LS0 BIT(30)
#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16)
+#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN)
#define TX_DMA_SWC BIT(14)
#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16)
+/* PDMA on MT7628 */
+#define TX_DMA_DONE BIT(31)
+#define TX_DMA_LS1 BIT(14)
+#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
+
/* QDMA descriptor rxd2 */
#define RX_DMA_DONE BIT(31)
+#define RX_DMA_LSO BIT(30)
#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
@@ -289,6 +298,7 @@
/* QDMA descriptor rxd4 */
#define RX_DMA_L4_VALID BIT(24)
+#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
#define RX_DMA_FPORT_SHIFT 19
#define RX_DMA_FPORT_MASK 0x7
@@ -412,6 +422,19 @@
#define CO_QPHY_SEL BIT(0)
#define GEPHY_MAC_SEL BIT(1)
+/* MT7628/88 specific stuff */
+#define MT7628_PDMA_OFFSET 0x0800
+#define MT7628_SDM_OFFSET 0x0c00
+
+#define MT7628_TX_BASE_PTR0 (MT7628_PDMA_OFFSET + 0x00)
+#define MT7628_TX_MAX_CNT0 (MT7628_PDMA_OFFSET + 0x04)
+#define MT7628_TX_CTX_IDX0 (MT7628_PDMA_OFFSET + 0x08)
+#define MT7628_TX_DTX_IDX0 (MT7628_PDMA_OFFSET + 0x0c)
+#define MT7628_PST_DTX_IDX0 BIT(0)
+
+#define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c)
+#define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -509,6 +532,7 @@ enum mtk_clks_map {
BIT(MTK_CLK_SGMII_CK) | \
BIT(MTK_CLK_ETH2PLL))
#define MT7621_CLKS_BITMAP (0)
+#define MT7628_CLKS_BITMAP (0)
#define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \
@@ -563,6 +587,10 @@ struct mtk_tx_ring {
struct mtk_tx_dma *last_free;
u16 thresh;
atomic_t free_count;
+ int dma_size;
+ struct mtk_tx_dma *dma_pdma; /* For MT7628/88 PDMA handling */
+ dma_addr_t phys_pdma;
+ int cpu_idx;
};
/* PDMA rx ring mode */
@@ -604,6 +632,8 @@ enum mkt_eth_capabilities {
MTK_HWLRO_BIT,
MTK_SHARED_INT_BIT,
MTK_TRGMII_MT7621_CLK_BIT,
+ MTK_QDMA_BIT,
+ MTK_SOC_MT7628_BIT,
/* MUX BITS*/
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
@@ -634,6 +664,8 @@ enum mkt_eth_capabilities {
#define MTK_HWLRO BIT(MTK_HWLRO_BIT)
#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
+#define MTK_QDMA BIT(MTK_QDMA_BIT)
+#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
@@ -687,26 +719,31 @@ enum mkt_eth_capabilities {
#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
#define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \
- MTK_GMAC2_RGMII | MTK_SHARED_INT | MTK_TRGMII_MT7621_CLK)
+ MTK_GMAC2_RGMII | MTK_SHARED_INT | \
+ MTK_TRGMII_MT7621_CLK | MTK_QDMA)
#define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \
MTK_GMAC2_SGMII | MTK_GDM1_ESW | \
MTK_MUX_GDM1_TO_GMAC1_ESW | \
- MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII)
+ MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA)
+
+#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \
+ MTK_QDMA)
-#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII)
+#define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628)
#define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \
MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \
MTK_MUX_U3_GMAC2_TO_QPHY | \
- MTK_MUX_GMAC12_TO_GEPHY_SGMII)
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
/* struct mtk_eth_data - This is the structure holding all differences
* among various plaforms
* @ana_rgc3: The offset for register ANA_RGC3 related to
* sgmiisys syscon
* @caps Flags shown the extra capability for the SoC
+ * @hw_features Flags shown HW features
* @required_clks Flags shown the bitmap for required clocks on
* the target SoC
* @required_pctl A bool value to show whether the SoC requires
@@ -717,6 +754,7 @@ struct mtk_soc_data {
u32 caps;
u32 required_clks;
bool required_pctl;
+ netdev_features_t hw_features;
};
/* currently no SoC has more than 2 macs */
@@ -810,6 +848,11 @@ struct mtk_eth {
unsigned long state;
const struct mtk_soc_data *soc;
+
+ u32 tx_int_mask_reg;
+ u32 tx_int_status_reg;
+ u32 rx_dma_l4_valid;
+ int ip_align;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index 88316c743820..eaf08f7ad128 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -99,8 +99,7 @@ static void mlx4_crdump_collect_crspace(struct mlx4_dev *dev,
readl(cr_space + offset);
err = devlink_region_snapshot_create(crdump->region_crspace,
- cr_res_size, crspace_data,
- id, &kvfree);
+ crspace_data, id, &kvfree);
if (err) {
kvfree(crspace_data);
mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
@@ -139,9 +138,7 @@ static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev,
readl(health_buf_start + offset);
err = devlink_region_snapshot_create(crdump->region_fw_health,
- HEALTH_BUFFER_SIZE,
- health_data,
- id, &kvfree);
+ health_data, id, &kvfree);
if (err) {
kvfree(health_data);
mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index c1438ae52a11..40ec5acf79c0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2645,14 +2645,6 @@ out:
en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
return;
}
-
- /* set offloads */
- priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_PARTIAL;
}
static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2660,14 +2652,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
int ret;
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
vxlan_del_task);
- /* unset offloads */
- priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_PARTIAL);
-
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 0);
if (ret)
@@ -3415,6 +3399,23 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mdev->LSO_support)
dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (mdev->dev->caps.tunnel_offload_mode ==
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ }
+
dev->vlan_features = dev->hw_features;
dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
@@ -3483,16 +3484,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->rss_hash_fn = ETH_RSS_HASH_TOP;
}
- if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
- dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_PARTIAL;
- dev->features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_PARTIAL;
- dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
- }
-
/* MTU range: 68 - hw-specific max */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = priv->max_mtu;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 36a92b19e613..4d5ca302c067 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -772,9 +772,7 @@ static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv,
/* Map fragments if any */
for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) {
- const struct skb_frag_struct *frag;
-
- frag = &shinfo->frags[i_frag];
+ const skb_frag_t *frag = &shinfo->frags[i_frag];
byte_count = skb_frag_size(frag);
dma = skb_frag_dma_map(ddev, frag,
0, byte_count,
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 1f6e16d5ea6b..07c204bd3fc4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2292,23 +2292,31 @@ static int mlx4_init_fw(struct mlx4_dev *dev)
static int mlx4_init_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_init_hca_param *init_hca = NULL;
+ struct mlx4_dev_cap *dev_cap = NULL;
struct mlx4_adapter adapter;
- struct mlx4_dev_cap dev_cap;
struct mlx4_profile profile;
- struct mlx4_init_hca_param init_hca;
u64 icm_size;
struct mlx4_config_dev_params params;
int err;
if (!mlx4_is_slave(dev)) {
- err = mlx4_dev_cap(dev, &dev_cap);
+ dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
+ init_hca = kzalloc(sizeof(*init_hca), GFP_KERNEL);
+
+ if (!dev_cap || !init_hca) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ err = mlx4_dev_cap(dev, dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
- return err;
+ goto out_free;
}
- choose_steering_mode(dev, &dev_cap);
- choose_tunnel_offload_mode(dev, &dev_cap);
+ choose_steering_mode(dev, dev_cap);
+ choose_tunnel_offload_mode(dev, dev_cap);
if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
mlx4_is_master(dev))
@@ -2331,48 +2339,48 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
MLX4_STEERING_MODE_DEVICE_MANAGED)
profile.num_mcg = MLX4_FS_NUM_MCG;
- icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
- &init_hca);
+ icm_size = mlx4_make_profile(dev, &profile, dev_cap,
+ init_hca);
if ((long long) icm_size < 0) {
err = icm_size;
- return err;
+ goto out_free;
}
dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
if (enable_4k_uar || !dev->persist->num_vfs) {
- init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
+ init_hca->log_uar_sz = ilog2(dev->caps.num_uars) +
PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
- init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
+ init_hca->uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
} else {
- init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
- init_hca.uar_page_sz = PAGE_SHIFT - 12;
+ init_hca->log_uar_sz = ilog2(dev->caps.num_uars);
+ init_hca->uar_page_sz = PAGE_SHIFT - 12;
}
- init_hca.mw_enabled = 0;
+ init_hca->mw_enabled = 0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
- init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
+ init_hca->mw_enabled = INIT_HCA_TPT_MW_ENABLE;
- err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+ err = mlx4_init_icm(dev, dev_cap, init_hca, icm_size);
if (err)
- return err;
+ goto out_free;
- err = mlx4_INIT_HCA(dev, &init_hca);
+ err = mlx4_INIT_HCA(dev, init_hca);
if (err) {
mlx4_err(dev, "INIT_HCA command failed, aborting\n");
goto err_free_icm;
}
- if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
- err = mlx4_query_func(dev, &dev_cap);
+ if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
+ err = mlx4_query_func(dev, dev_cap);
if (err < 0) {
mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
goto err_close;
} else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
- dev->caps.num_eqs = dev_cap.max_eqs;
- dev->caps.reserved_eqs = dev_cap.reserved_eqs;
- dev->caps.reserved_uars = dev_cap.reserved_uars;
+ dev->caps.num_eqs = dev_cap->max_eqs;
+ dev->caps.reserved_eqs = dev_cap->reserved_eqs;
+ dev->caps.reserved_uars = dev_cap->reserved_uars;
}
}
@@ -2381,14 +2389,13 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
* read HCA frequency by QUERY_HCA command
*/
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
- memset(&init_hca, 0, sizeof(init_hca));
- err = mlx4_QUERY_HCA(dev, &init_hca);
+ err = mlx4_QUERY_HCA(dev, init_hca);
if (err) {
mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
} else {
dev->caps.hca_core_clock =
- init_hca.hca_core_clock;
+ init_hca->hca_core_clock;
}
/* In case we got HCA frequency 0 - disable timestamping
@@ -2464,7 +2471,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
priv->eq_table.inta_pin = adapter.inta_pin;
memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id));
- return 0;
+ err = 0;
+ goto out_free;
unmap_bf:
unmap_internal_clock(dev);
@@ -2483,6 +2491,10 @@ err_free_icm:
if (!mlx4_is_slave(dev))
mlx4_free_icms(dev);
+out_free:
+ kfree(dev_cap);
+ kfree(init_hca);
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 57d2cc666fe3..f4de9ccb5df1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -23,8 +23,9 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
#
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
- en_selftest.o en/port.o en/monitor_stats.o en/reporter_tx.o \
- en/params.o en/xsk/umem.o en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o
+ en_selftest.o en/port.o en/monitor_stats.o en/health.o \
+ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \
+ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o
#
# Netdev extra
@@ -34,7 +35,8 @@ mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \
lib/geneve.o en/tc_tun_vxlan.o en/tc_tun_gre.o \
- en/tc_tun_geneve.o
+ en/tc_tun_geneve.o diag/en_tc_tracepoint.o
+mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
#
# Core extra
@@ -44,6 +46,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offlo
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
+mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += lib/hv.o lib/hv_vhca.o
#
# Ipoib netdev
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 8cdd7e66f8df..973f90888b1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1368,49 +1368,19 @@ static void clean_debug_files(struct mlx5_core_dev *dev)
debugfs_remove_recursive(dbg->dbg_root);
}
-static int create_debugfs_files(struct mlx5_core_dev *dev)
+static void create_debugfs_files(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
- int err = -ENOMEM;
-
- if (!mlx5_debugfs_root)
- return 0;
dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
- if (!dbg->dbg_root)
- return err;
-
- dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
- dev, &dfops);
- if (!dbg->dbg_in)
- goto err_dbg;
- dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
- dev, &dfops);
- if (!dbg->dbg_out)
- goto err_dbg;
-
- dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
- dev, &olfops);
- if (!dbg->dbg_outlen)
- goto err_dbg;
-
- dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
- &dbg->status);
- if (!dbg->dbg_status)
- goto err_dbg;
-
- dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
- if (!dbg->dbg_run)
- goto err_dbg;
+ debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
+ debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
+ debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops);
+ debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status);
+ debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
mlx5_cmdif_debugfs_init(dev);
-
- return 0;
-
-err_dbg:
- clean_debug_files(dev);
- return err;
}
static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
@@ -2007,17 +1977,10 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
goto err_cache;
}
- err = create_debugfs_files(dev);
- if (err) {
- err = -ENOMEM;
- goto err_wq;
- }
+ create_debugfs_files(dev);
return 0;
-err_wq:
- destroy_workqueue(cmd->wq);
-
err_cache:
destroy_msg_cache(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index a11e22d0b0cc..04854e5fbcd7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -92,8 +92,6 @@ EXPORT_SYMBOL(mlx5_debugfs_root);
void mlx5_register_debugfs(void)
{
mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
- if (IS_ERR_OR_NULL(mlx5_debugfs_root))
- mlx5_debugfs_root = NULL;
}
void mlx5_unregister_debugfs(void)
@@ -101,45 +99,25 @@ void mlx5_unregister_debugfs(void)
debugfs_remove(mlx5_debugfs_root);
}
-int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
+void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return 0;
-
atomic_set(&dev->num_qps, 0);
dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
- if (!dev->priv.qp_debugfs)
- return -ENOMEM;
-
- return 0;
}
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return;
-
debugfs_remove_recursive(dev->priv.qp_debugfs);
}
-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
+void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return 0;
-
dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root);
- if (!dev->priv.eq_debugfs)
- return -ENOMEM;
-
- return 0;
}
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return;
-
debugfs_remove_recursive(dev->priv.eq_debugfs);
}
@@ -183,85 +161,41 @@ static const struct file_operations stats_fops = {
.write = average_write,
};
-int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
+void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_stats *stats;
struct dentry **cmd;
const char *namep;
- int err;
int i;
- if (!mlx5_debugfs_root)
- return 0;
-
cmd = &dev->priv.cmdif_debugfs;
*cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
- if (!*cmd)
- return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) {
stats = &dev->cmd.stats[i];
namep = mlx5_command_str(i);
if (strcmp(namep, "unknown command opcode")) {
stats->root = debugfs_create_dir(namep, *cmd);
- if (!stats->root) {
- mlx5_core_warn(dev, "failed adding command %d\n",
- i);
- err = -ENOMEM;
- goto out;
- }
-
- stats->avg = debugfs_create_file("average", 0400,
- stats->root, stats,
- &stats_fops);
- if (!stats->avg) {
- mlx5_core_warn(dev, "failed creating debugfs file\n");
- err = -ENOMEM;
- goto out;
- }
-
- stats->count = debugfs_create_u64("n", 0400,
- stats->root,
- &stats->n);
- if (!stats->count) {
- mlx5_core_warn(dev, "failed creating debugfs file\n");
- err = -ENOMEM;
- goto out;
- }
+
+ debugfs_create_file("average", 0400, stats->root, stats,
+ &stats_fops);
+ debugfs_create_u64("n", 0400, stats->root, &stats->n);
}
}
-
- return 0;
-out:
- debugfs_remove_recursive(dev->priv.cmdif_debugfs);
- return err;
}
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return;
-
debugfs_remove_recursive(dev->priv.cmdif_debugfs);
}
-int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
+void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return 0;
-
dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root);
- if (!dev->priv.cq_debugfs)
- return -ENOMEM;
-
- return 0;
}
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return;
-
debugfs_remove_recursive(dev->priv.cq_debugfs);
}
@@ -484,7 +418,6 @@ static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
{
struct mlx5_rsc_debug *d;
char resn[32];
- int err;
int i;
d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL);
@@ -496,30 +429,15 @@ static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
d->type = type;
sprintf(resn, "0x%x", rsn);
d->root = debugfs_create_dir(resn, root);
- if (!d->root) {
- err = -ENOMEM;
- goto out_free;
- }
for (i = 0; i < nfile; i++) {
d->fields[i].i = i;
- d->fields[i].dent = debugfs_create_file(field[i], 0400,
- d->root, &d->fields[i],
- &fops);
- if (!d->fields[i].dent) {
- err = -ENOMEM;
- goto out_rem;
- }
+ debugfs_create_file(field[i], 0400, d->root, &d->fields[i],
+ &fops);
}
*dbg = d;
return 0;
-out_rem:
- debugfs_remove_recursive(d->root);
-
-out_free:
- kfree(d);
- return err;
}
static void rem_res_tree(struct mlx5_rsc_debug *d)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
new file mode 100644
index 000000000000..1177860a2ee4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_EN_REP_TP_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_EN_REP_TP_
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+#include "en_rep.h"
+
+TRACE_EVENT(mlx5e_rep_neigh_update,
+ TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, const u8 *ha,
+ bool neigh_connected),
+ TP_ARGS(nhe, ha, neigh_connected),
+ TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name)
+ __array(u8, ha, ETH_ALEN)
+ __array(u8, v4, 4)
+ __array(u8, v6, 16)
+ __field(bool, neigh_connected)
+ ),
+ TP_fast_assign(const struct mlx5e_neigh *mn = &nhe->m_neigh;
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __assign_str(devname, mn->dev->name);
+ __entry->neigh_connected = neigh_connected;
+ memcpy(__entry->ha, ha, ETH_ALEN);
+
+ p32 = (__be32 *)__entry->v4;
+ pin6 = (struct in6_addr *)__entry->v6;
+ if (mn->family == AF_INET) {
+ *p32 = mn->dst_ip.v4;
+ ipv6_addr_set_v4mapped(*p32, pin6);
+ } else if (mn->family == AF_INET6) {
+ *pin6 = mn->dst_ip.v6;
+ }
+ ),
+ TP_printk("netdev: %s MAC: %pM IPv4: %pI4 IPv6: %pI6c neigh_connected=%d\n",
+ __get_str(devname), __entry->ha,
+ __entry->v4, __entry->v6, __entry->neigh_connected
+ )
+);
+
+#endif /* _MLX5_EN_REP_TP_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ./diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE en_rep_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c
new file mode 100644
index 000000000000..c5dc6c50fa87
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#define CREATE_TRACE_POINTS
+#include "en_tc_tracepoint.h"
+
+void put_ids_to_array(int *ids,
+ const struct flow_action_entry *entries,
+ unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ ids[i] = entries[i].id;
+}
+
+#define NAME_SIZE 16
+
+static const char FLOWACT2STR[NUM_FLOW_ACTIONS][NAME_SIZE] = {
+ [FLOW_ACTION_ACCEPT] = "ACCEPT",
+ [FLOW_ACTION_DROP] = "DROP",
+ [FLOW_ACTION_TRAP] = "TRAP",
+ [FLOW_ACTION_GOTO] = "GOTO",
+ [FLOW_ACTION_REDIRECT] = "REDIRECT",
+ [FLOW_ACTION_MIRRED] = "MIRRED",
+ [FLOW_ACTION_VLAN_PUSH] = "VLAN_PUSH",
+ [FLOW_ACTION_VLAN_POP] = "VLAN_POP",
+ [FLOW_ACTION_VLAN_MANGLE] = "VLAN_MANGLE",
+ [FLOW_ACTION_TUNNEL_ENCAP] = "TUNNEL_ENCAP",
+ [FLOW_ACTION_TUNNEL_DECAP] = "TUNNEL_DECAP",
+ [FLOW_ACTION_MANGLE] = "MANGLE",
+ [FLOW_ACTION_ADD] = "ADD",
+ [FLOW_ACTION_CSUM] = "CSUM",
+ [FLOW_ACTION_MARK] = "MARK",
+ [FLOW_ACTION_WAKE] = "WAKE",
+ [FLOW_ACTION_QUEUE] = "QUEUE",
+ [FLOW_ACTION_SAMPLE] = "SAMPLE",
+ [FLOW_ACTION_POLICE] = "POLICE",
+ [FLOW_ACTION_CT] = "CT",
+};
+
+const char *parse_action(struct trace_seq *p,
+ int *ids,
+ unsigned int num)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ if (ids[i] < NUM_FLOW_ACTIONS)
+ trace_seq_printf(p, "%s ", FLOWACT2STR[ids[i]]);
+ else
+ trace_seq_printf(p, "UNKNOWN ");
+ }
+
+ trace_seq_putc(p, 0);
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
new file mode 100644
index 000000000000..d4e6cfaaade3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_TC_TP_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_TC_TP_
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+#include <net/flow_offload.h>
+#include "en_rep.h"
+
+#define __parse_action(ids, num) parse_action(p, ids, num)
+
+void put_ids_to_array(int *ids,
+ const struct flow_action_entry *entries,
+ unsigned int num);
+
+const char *parse_action(struct trace_seq *p,
+ int *ids,
+ unsigned int num);
+
+DECLARE_EVENT_CLASS(mlx5e_flower_template,
+ TP_PROTO(const struct flow_cls_offload *f),
+ TP_ARGS(f),
+ TP_STRUCT__entry(__field(void *, cookie)
+ __field(unsigned int, num)
+ __dynamic_array(int, ids, f->rule ?
+ f->rule->action.num_entries : 0)
+ ),
+ TP_fast_assign(__entry->cookie = (void *)f->cookie;
+ __entry->num = (f->rule ?
+ f->rule->action.num_entries : 0);
+ if (__entry->num)
+ put_ids_to_array(__get_dynamic_array(ids),
+ f->rule->action.entries,
+ f->rule->action.num_entries);
+ ),
+ TP_printk("cookie=%p actions= %s\n",
+ __entry->cookie, __entry->num ?
+ __parse_action(__get_dynamic_array(ids),
+ __entry->num) : "NULL"
+ )
+);
+
+DEFINE_EVENT(mlx5e_flower_template, mlx5e_configure_flower,
+ TP_PROTO(const struct flow_cls_offload *f),
+ TP_ARGS(f)
+ );
+
+DEFINE_EVENT(mlx5e_flower_template, mlx5e_delete_flower,
+ TP_PROTO(const struct flow_cls_offload *f),
+ TP_ARGS(f)
+ );
+
+TRACE_EVENT(mlx5e_stats_flower,
+ TP_PROTO(const struct flow_cls_offload *f),
+ TP_ARGS(f),
+ TP_STRUCT__entry(__field(void *, cookie)
+ __field(u64, bytes)
+ __field(u64, packets)
+ __field(u64, lastused)
+ ),
+ TP_fast_assign(__entry->cookie = (void *)f->cookie;
+ __entry->bytes = f->stats.bytes;
+ __entry->packets = f->stats.pkts;
+ __entry->lastused = f->stats.lastused;
+ ),
+ TP_printk("cookie=%p bytes=%llu packets=%llu lastused=%llu\n",
+ __entry->cookie, __entry->bytes,
+ __entry->packets, __entry->lastused
+ )
+);
+
+TRACE_EVENT(mlx5e_tc_update_neigh_used_value,
+ TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, bool neigh_used),
+ TP_ARGS(nhe, neigh_used),
+ TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name)
+ __array(u8, v4, 4)
+ __array(u8, v6, 16)
+ __field(bool, neigh_used)
+ ),
+ TP_fast_assign(const struct mlx5e_neigh *mn = &nhe->m_neigh;
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __assign_str(devname, mn->dev->name);
+ __entry->neigh_used = neigh_used;
+
+ p32 = (__be32 *)__entry->v4;
+ pin6 = (struct in6_addr *)__entry->v6;
+ if (mn->family == AF_INET) {
+ *p32 = mn->dst_ip.v4;
+ ipv6_addr_set_v4mapped(*p32, pin6);
+ } else if (mn->family == AF_INET6) {
+ *pin6 = mn->dst_ip.v6;
+ }
+ ),
+ TP_printk("netdev: %s IPv4: %pI4 IPv6: %pI6c neigh_used=%d\n",
+ __get_str(devname), __entry->v4, __entry->v6,
+ __entry->neigh_used
+ )
+);
+
+#endif /* _MLX5_TC_TP_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ./diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE en_tc_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 65bec19a438f..446792799125 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -54,6 +54,7 @@
#include "mlx5_core.h"
#include "en_stats.h"
#include "en/fs.h"
+#include "lib/hv_vhca.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
@@ -300,6 +301,7 @@ struct mlx5e_dcbx_dp {
enum {
MLX5E_RQ_STATE_ENABLED,
+ MLX5E_RQ_STATE_RECOVERING,
MLX5E_RQ_STATE_AM,
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
@@ -356,6 +358,7 @@ enum {
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS,
+ MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
};
struct mlx5e_sq_wqe_info {
@@ -480,8 +483,6 @@ struct mlx5e_xdp_mpwqe {
struct mlx5e_tx_wqe *wqe;
u8 ds_count;
u8 pkt_count;
- u8 max_ds_count;
- u8 complete;
u8 inline_on;
};
@@ -552,6 +553,8 @@ struct mlx5e_icosq {
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
+
+ struct work_struct recover_work;
} ____cacheline_aligned_in_smp;
struct mlx5e_wqe_frag_info {
@@ -671,6 +674,8 @@ struct mlx5e_rq {
struct zero_copy_allocator zca;
struct xdp_umem *umem;
+ struct work_struct recover_work;
+
/* control */
struct mlx5_wq_ctrl wq_ctrl;
__be32 mkey_be;
@@ -778,6 +783,15 @@ struct mlx5e_modify_sq_param {
int rl_index;
};
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
+struct mlx5e_hv_vhca_stats_agent {
+ struct mlx5_hv_vhca_agent *agent;
+ struct delayed_work work;
+ u16 delay;
+ void *buf;
+};
+#endif
+
struct mlx5e_xsk {
/* UMEMs are stored separately from channels, because we don't want to
* lose them when channels are recreated. The kernel also stores UMEMs,
@@ -847,7 +861,11 @@ struct mlx5e_priv {
struct mlx5e_tls *tls;
#endif
struct devlink_health_reporter *tx_reporter;
+ struct devlink_health_reporter *rx_reporter;
struct mlx5e_xsk xsk;
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
+ struct mlx5e_hv_vhca_stats_agent stats_agent;
+#endif
};
struct mlx5e_profile {
@@ -888,6 +906,26 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
+static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
+{
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
+ default:
+ return mlx5_wq_cyc_get_size(&rq->wqe.wq);
+ }
+}
+
+static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
+{
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return rq->mpwqe.wq.cur_sz;
+ default:
+ return rq->wqe.wq.cur_sz;
+ }
+}
+
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
@@ -1006,6 +1044,12 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
+int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
+void mlx5e_activate_rq(struct mlx5e_rq *rq);
+void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
+void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
+void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p);
@@ -1135,7 +1179,6 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
u16 num_channels);
-u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index be5961ff24cc..ca2161b42c7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -10,11 +10,14 @@ enum {
};
struct mlx5e_tc_table {
+ /* protects flow table */
+ struct mutex t_lock;
struct mlx5_flow_table *t;
struct rhashtable ht;
- DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ struct mod_hdr_tbl mod_hdr;
+ struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
DECLARE_HASHTABLE(hairpin_tbl, 8);
struct notifier_block netdevice_nb;
@@ -132,12 +135,17 @@ struct mlx5e_ethtool_steering {
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
-int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
-int mlx5e_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *info, u32 *rule_locs);
+int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
+int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs);
#else
static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
+static inline int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{ return -EOPNOTSUPP; }
+static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_EN_RXNFC */
#ifdef CONFIG_MLX5_EN_ARFS
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
new file mode 100644
index 000000000000..1d6b58860da6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Mellanox Technologies.
+
+#include "health.h"
+#include "lib/eq.h"
+
+int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = cq->channel->priv;
+ u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {};
+ u8 hw_status;
+ void *cqc;
+ int err;
+
+ err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out, sizeof(out));
+ if (err)
+ return err;
+
+ cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context);
+ hw_status = MLX5_GET(cqc, cqc, status);
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "CQ");
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "HW status", hw_status);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
+{
+ u8 cq_log_stride;
+ u32 cq_sz;
+ int err;
+
+ cq_sz = mlx5_cqwq_get_size(&cq->wq);
+ cq_log_stride = mlx5_cqwq_get_log_stride_size(&cq->wq);
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "CQ");
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "stride size", BIT(cq_log_stride));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "size", cq_sz);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mlx5e_health_create_reporters(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_reporter_tx_create(priv);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_rx_create(priv);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv)
+{
+ mlx5e_reporter_rx_destroy(priv);
+ mlx5e_reporter_tx_destroy(priv);
+}
+
+void mlx5e_health_channels_update(struct mlx5e_priv *priv)
+{
+ if (priv->tx_reporter)
+ devlink_health_reporter_state_update(priv->tx_reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+ if (priv->rx_reporter)
+ devlink_health_reporter_state_update(priv->rx_reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+}
+
+int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn)
+{
+ struct mlx5_core_dev *mdev = channel->mdev;
+ struct net_device *dev = channel->netdev;
+ struct mlx5e_modify_sq_param msp = {};
+ int err;
+
+ msp.curr_state = MLX5_SQC_STATE_ERR;
+ msp.next_state = MLX5_SQC_STATE_RST;
+
+ err = mlx5e_modify_sq(mdev, sqn, &msp);
+ if (err) {
+ netdev_err(dev, "Failed to move sq 0x%x to reset\n", sqn);
+ return err;
+ }
+
+ memset(&msp, 0, sizeof(msp));
+ msp.curr_state = MLX5_SQC_STATE_RST;
+ msp.next_state = MLX5_SQC_STATE_RDY;
+
+ err = mlx5e_modify_sq(mdev, sqn, &msp);
+ if (err) {
+ netdev_err(dev, "Failed to move sq 0x%x to ready\n", sqn);
+ return err;
+ }
+
+ return 0;
+}
+
+int mlx5e_health_recover_channels(struct mlx5e_priv *priv)
+{
+ int err = 0;
+
+ rtnl_lock();
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
+ err = mlx5e_safe_reopen_channels(priv);
+
+out:
+ mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
+
+ return err;
+}
+
+int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel)
+{
+ u32 eqe_count;
+
+ netdev_err(channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
+ eq->core.eqn, eq->core.cons_index, eq->core.irqn);
+
+ eqe_count = mlx5_eq_poll_irq_disabled(eq);
+ if (!eqe_count)
+ return -EIO;
+
+ netdev_err(channel->netdev, "Recovered %d eqes on EQ 0x%x\n",
+ eqe_count, eq->core.eqn);
+
+ channel->stats->eq_rearm++;
+ return 0;
+}
+
+int mlx5e_health_report(struct mlx5e_priv *priv,
+ struct devlink_health_reporter *reporter, char *err_str,
+ struct mlx5e_err_ctx *err_ctx)
+{
+ if (!reporter) {
+ netdev_err(priv->netdev, err_str);
+ return err_ctx->recover(&err_ctx->ctx);
+ }
+ return devlink_health_report(reporter, err_str, err_ctx);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
new file mode 100644
index 000000000000..d3693fa547ac
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5E_EN_HEALTH_H
+#define __MLX5E_EN_HEALTH_H
+
+#include "en.h"
+
+#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
+
+static inline bool cqe_syndrome_needs_recover(u8 syndrome)
+{
+ return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR ||
+ syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
+ syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR ||
+ syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+}
+
+int mlx5e_reporter_tx_create(struct mlx5e_priv *priv);
+void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv);
+void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq);
+int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq);
+
+int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name);
+int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg);
+
+int mlx5e_reporter_rx_create(struct mlx5e_priv *priv);
+void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv);
+void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq);
+void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
+void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
+
+#define MLX5E_REPORTER_PER_Q_MAX_LEN 256
+
+struct mlx5e_err_ctx {
+ int (*recover)(void *ctx);
+ void *ctx;
+};
+
+int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn);
+int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel);
+int mlx5e_health_recover_channels(struct mlx5e_priv *priv);
+int mlx5e_health_report(struct mlx5e_priv *priv,
+ struct devlink_health_reporter *reporter, char *err_str,
+ struct mlx5e_err_ctx *err_ctx);
+int mlx5e_health_create_reporters(struct mlx5e_priv *priv);
+void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv);
+void mlx5e_health_channels_update(struct mlx5e_priv *priv);
+
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
new file mode 100644
index 000000000000..c37b4acd9bd5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2018 Mellanox Technologies
+
+#include "en.h"
+#include "en/hv_vhca_stats.h"
+#include "lib/hv_vhca.h"
+#include "lib/hv.h"
+
+struct mlx5e_hv_vhca_per_ring_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+};
+
+static void
+mlx5e_hv_vhca_fill_ring_stats(struct mlx5e_priv *priv, int ch,
+ struct mlx5e_hv_vhca_per_ring_stats *data)
+{
+ struct mlx5e_channel_stats *stats;
+ int tc;
+
+ stats = &priv->channel_stats[ch];
+ data->rx_packets = stats->rq.packets;
+ data->rx_bytes = stats->rq.bytes;
+
+ for (tc = 0; tc < priv->max_opened_tc; tc++) {
+ data->tx_packets += stats->sq[tc].packets;
+ data->tx_bytes += stats->sq[tc].bytes;
+ }
+}
+
+static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int buf_len)
+{
+ int ch, i = 0;
+
+ for (ch = 0; ch < priv->max_nch; ch++) {
+ u64 *buf = data + i;
+
+ if (WARN_ON_ONCE(buf +
+ sizeof(struct mlx5e_hv_vhca_per_ring_stats) >
+ data + buf_len))
+ return;
+
+ mlx5e_hv_vhca_fill_ring_stats(priv, ch,
+ (struct mlx5e_hv_vhca_per_ring_stats *)buf);
+ i += sizeof(struct mlx5e_hv_vhca_per_ring_stats) / sizeof(u64);
+ }
+}
+
+static int mlx5e_hv_vhca_stats_buf_size(struct mlx5e_priv *priv)
+{
+ return (sizeof(struct mlx5e_hv_vhca_per_ring_stats) *
+ priv->max_nch);
+}
+
+static void mlx5e_hv_vhca_stats_work(struct work_struct *work)
+{
+ struct mlx5e_hv_vhca_stats_agent *sagent;
+ struct mlx5_hv_vhca_agent *agent;
+ struct delayed_work *dwork;
+ struct mlx5e_priv *priv;
+ int buf_len, rc;
+ void *buf;
+
+ dwork = to_delayed_work(work);
+ sagent = container_of(dwork, struct mlx5e_hv_vhca_stats_agent, work);
+ priv = container_of(sagent, struct mlx5e_priv, stats_agent);
+ buf_len = mlx5e_hv_vhca_stats_buf_size(priv);
+ agent = sagent->agent;
+ buf = sagent->buf;
+
+ memset(buf, 0, buf_len);
+ mlx5e_hv_vhca_fill_stats(priv, buf, buf_len);
+
+ rc = mlx5_hv_vhca_agent_write(agent, buf, buf_len);
+ if (rc) {
+ mlx5_core_err(priv->mdev,
+ "%s: Failed to write stats, err = %d\n",
+ __func__, rc);
+ return;
+ }
+
+ if (sagent->delay)
+ queue_delayed_work(priv->wq, &sagent->work, sagent->delay);
+}
+
+enum {
+ MLX5_HV_VHCA_STATS_VERSION = 1,
+ MLX5_HV_VHCA_STATS_UPDATE_ONCE = 0xFFFF,
+};
+
+static void mlx5e_hv_vhca_stats_control(struct mlx5_hv_vhca_agent *agent,
+ struct mlx5_hv_vhca_control_block *block)
+{
+ struct mlx5e_hv_vhca_stats_agent *sagent;
+ struct mlx5e_priv *priv;
+
+ priv = mlx5_hv_vhca_agent_priv(agent);
+ sagent = &priv->stats_agent;
+
+ block->version = MLX5_HV_VHCA_STATS_VERSION;
+ block->rings = priv->max_nch;
+
+ if (!block->command) {
+ cancel_delayed_work_sync(&priv->stats_agent.work);
+ return;
+ }
+
+ sagent->delay = block->command == MLX5_HV_VHCA_STATS_UPDATE_ONCE ? 0 :
+ msecs_to_jiffies(block->command * 100);
+
+ queue_delayed_work(priv->wq, &sagent->work, sagent->delay);
+}
+
+static void mlx5e_hv_vhca_stats_cleanup(struct mlx5_hv_vhca_agent *agent)
+{
+ struct mlx5e_priv *priv = mlx5_hv_vhca_agent_priv(agent);
+
+ cancel_delayed_work_sync(&priv->stats_agent.work);
+}
+
+int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
+{
+ int buf_len = mlx5e_hv_vhca_stats_buf_size(priv);
+ struct mlx5_hv_vhca_agent *agent;
+
+ priv->stats_agent.buf = kvzalloc(buf_len, GFP_KERNEL);
+ if (!priv->stats_agent.buf)
+ return -ENOMEM;
+
+ agent = mlx5_hv_vhca_agent_create(priv->mdev->hv_vhca,
+ MLX5_HV_VHCA_AGENT_STATS,
+ mlx5e_hv_vhca_stats_control, NULL,
+ mlx5e_hv_vhca_stats_cleanup,
+ priv);
+
+ if (IS_ERR_OR_NULL(agent)) {
+ if (IS_ERR(agent))
+ netdev_warn(priv->netdev,
+ "Failed to create hv vhca stats agent, err = %ld\n",
+ PTR_ERR(agent));
+
+ kfree(priv->stats_agent.buf);
+ return IS_ERR_OR_NULL(agent);
+ }
+
+ priv->stats_agent.agent = agent;
+ INIT_DELAYED_WORK(&priv->stats_agent.work, mlx5e_hv_vhca_stats_work);
+
+ return 0;
+}
+
+void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv)
+{
+ if (IS_ERR_OR_NULL(priv->stats_agent.agent))
+ return;
+
+ mlx5_hv_vhca_agent_destroy(priv->stats_agent.agent);
+ kfree(priv->stats_agent.buf);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h
new file mode 100644
index 000000000000..664463faf77b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_STATS_VHCA_H__
+#define __MLX5_EN_STATS_VHCA_H__
+#include "en.h"
+
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
+
+int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv);
+void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv);
+
+#else
+
+static inline int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv)
+{
+}
+#endif
+
+#endif /* __MLX5_EN_STATS_VHCA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h
deleted file mode 100644
index e78e92753d73..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#ifndef __MLX5E_EN_REPORTER_H
-#define __MLX5E_EN_REPORTER_H
-
-#include <linux/mlx5/driver.h>
-#include "en.h"
-
-int mlx5e_tx_reporter_create(struct mlx5e_priv *priv);
-void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv);
-void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq);
-int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq);
-
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
new file mode 100644
index 000000000000..b860569d4247
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Mellanox Technologies.
+
+#include "health.h"
+#include "params.h"
+
+static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_rq_out);
+ void *out;
+ void *rqc;
+ int err;
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_rq(dev, rqn, out);
+ if (err)
+ goto out;
+
+ rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context);
+ *state = MLX5_GET(rqc, rqc, state);
+
+out:
+ kvfree(out);
+ return err;
+}
+
+static int mlx5e_wait_for_icosq_flush(struct mlx5e_icosq *icosq)
+{
+ unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
+
+ while (time_before(jiffies, exp_time)) {
+ if (icosq->cc == icosq->pc)
+ return 0;
+
+ msleep(20);
+ }
+
+ netdev_err(icosq->channel->netdev,
+ "Wait for ICOSQ 0x%x flush timeout (cc = 0x%x, pc = 0x%x)\n",
+ icosq->sqn, icosq->cc, icosq->pc);
+
+ return -ETIMEDOUT;
+}
+
+static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq)
+{
+ WARN_ONCE(icosq->cc != icosq->pc, "ICOSQ 0x%x: cc (0x%x) != pc (0x%x)\n",
+ icosq->sqn, icosq->cc, icosq->pc);
+ icosq->cc = 0;
+ icosq->pc = 0;
+}
+
+static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
+{
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_icosq *icosq;
+ struct net_device *dev;
+ struct mlx5e_rq *rq;
+ u8 state;
+ int err;
+
+ icosq = ctx;
+ rq = &icosq->channel->rq;
+ mdev = icosq->channel->mdev;
+ dev = icosq->channel->netdev;
+ err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state);
+ if (err) {
+ netdev_err(dev, "Failed to query ICOSQ 0x%x state. err = %d\n",
+ icosq->sqn, err);
+ goto out;
+ }
+
+ if (state != MLX5_SQC_STATE_ERR)
+ goto out;
+
+ mlx5e_deactivate_rq(rq);
+ err = mlx5e_wait_for_icosq_flush(icosq);
+ if (err)
+ goto out;
+
+ mlx5e_deactivate_icosq(icosq);
+
+ /* At this point, both the rq and the icosq are disabled */
+
+ err = mlx5e_health_sq_to_ready(icosq->channel, icosq->sqn);
+ if (err)
+ goto out;
+
+ mlx5e_reset_icosq_cc_pc(icosq);
+ mlx5e_free_rx_descs(rq);
+ clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
+ mlx5e_activate_icosq(icosq);
+ mlx5e_activate_rq(rq);
+
+ rq->stats->recover++;
+ return 0;
+out:
+ clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
+ return err;
+}
+
+void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
+{
+ struct mlx5e_priv *priv = icosq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = icosq;
+ err_ctx.recover = mlx5e_rx_reporter_err_icosq_cqe_recover;
+ sprintf(err_str, "ERR CQE on ICOSQ: 0x%x", icosq->sqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
+static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
+{
+ struct net_device *dev = rq->netdev;
+ int err;
+
+ err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
+ if (err) {
+ netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
+ return err;
+ }
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err) {
+ netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
+{
+ struct mlx5_core_dev *mdev;
+ struct net_device *dev;
+ struct mlx5e_rq *rq;
+ u8 state;
+ int err;
+
+ rq = ctx;
+ mdev = rq->mdev;
+ dev = rq->netdev;
+ err = mlx5e_query_rq_state(mdev, rq->rqn, &state);
+ if (err) {
+ netdev_err(dev, "Failed to query RQ 0x%x state. err = %d\n",
+ rq->rqn, err);
+ goto out;
+ }
+
+ if (state != MLX5_RQC_STATE_ERR)
+ goto out;
+
+ mlx5e_deactivate_rq(rq);
+ mlx5e_free_rx_descs(rq);
+
+ err = mlx5e_rq_to_ready(rq, MLX5_RQC_STATE_ERR);
+ if (err)
+ goto out;
+
+ clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
+ mlx5e_activate_rq(rq);
+ rq->stats->recover++;
+ return 0;
+out:
+ clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
+ return err;
+}
+
+void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
+{
+ struct mlx5e_priv *priv = rq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = rq;
+ err_ctx.recover = mlx5e_rx_reporter_err_rq_cqe_recover;
+ sprintf(err_str, "ERR CQE on RQ: 0x%x", rq->rqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
+static int mlx5e_rx_reporter_timeout_recover(void *ctx)
+{
+ struct mlx5e_icosq *icosq;
+ struct mlx5_eq_comp *eq;
+ struct mlx5e_rq *rq;
+ int err;
+
+ rq = ctx;
+ icosq = &rq->channel->icosq;
+ eq = rq->cq.mcq.eq;
+ err = mlx5e_health_channel_eq_recover(eq, rq->channel);
+ if (err)
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
+
+ return err;
+}
+
+void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+{
+ struct mlx5e_icosq *icosq = &rq->channel->icosq;
+ struct mlx5e_priv *priv = rq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = rq;
+ err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
+ sprintf(err_str, "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x\n",
+ icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
+static int mlx5e_rx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
+{
+ return err_ctx->recover(err_ctx->ctx);
+}
+
+static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter,
+ void *context)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_err_ctx *err_ctx = context;
+
+ return err_ctx ? mlx5e_rx_reporter_recover_from_ctx(err_ctx) :
+ mlx5e_health_recover_channels(priv);
+}
+
+static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = rq->channel->priv;
+ struct mlx5e_params *params;
+ struct mlx5e_icosq *icosq;
+ u8 icosq_hw_state;
+ int wqes_sz;
+ u8 hw_state;
+ u16 wq_head;
+ int err;
+
+ params = &priv->channels.params;
+ icosq = &rq->channel->icosq;
+ err = mlx5e_query_rq_state(priv->mdev, rq->rqn, &hw_state);
+ if (err)
+ return err;
+
+ err = mlx5_core_query_sq_state(priv->mdev, icosq->sqn, &icosq_hw_state);
+ if (err)
+ return err;
+
+ wqes_sz = mlx5e_rqwq_get_cur_sz(rq);
+ wq_head = params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+ rq->mpwqe.wq.head : mlx5_wq_cyc_get_head(&rq->wqe.wq);
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->channel->ix);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "SW state", rq->state);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "cc", wq_head);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "ICOSQ HW state", icosq_hw_state);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_cq_diagnose(&rq->cq, fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_params *params = &priv->channels.params;
+ struct mlx5e_rq *generic_rq;
+ u32 rq_stride, rq_sz;
+ int i, err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ generic_rq = &priv->channels.c[0]->rq;
+ rq_sz = mlx5e_rqwq_get_size(generic_rq);
+ rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "Common config");
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "RQ");
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride);
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_cq_common_diagnose(&generic_rq->cq, fmsg);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
+ if (err)
+ goto unlock;
+
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
+
+ err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg);
+ if (err)
+ goto unlock;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ goto unlock;
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
+ .name = "rx",
+ .recover = mlx5e_rx_reporter_recover,
+ .diagnose = mlx5e_rx_reporter_diagnose,
+};
+
+#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
+
+int mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
+{
+ struct devlink *devlink = priv_to_devlink(priv->mdev);
+ struct devlink_health_reporter *reporter;
+
+ reporter = devlink_health_reporter_create(devlink,
+ &mlx5_rx_reporter_ops,
+ MLX5E_REPORTER_RX_GRACEFUL_PERIOD,
+ true, priv);
+ if (IS_ERR(reporter)) {
+ netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
+ PTR_ERR(reporter));
+ return PTR_ERR(reporter);
+ }
+ priv->rx_reporter = reporter;
+ return 0;
+}
+
+void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv)
+{
+ if (!priv->rx_reporter)
+ return;
+
+ devlink_health_reporter_destroy(priv->rx_reporter);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index c7f86453c638..bfed558637c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -1,16 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019 Mellanox Technologies. */
-#include <net/devlink.h>
-#include "reporter.h"
-#include "lib/eq.h"
-
-#define MLX5E_TX_REPORTER_PER_SQ_MAX_LEN 256
-
-struct mlx5e_tx_err_ctx {
- int (*recover)(struct mlx5e_txqsq *sq);
- struct mlx5e_txqsq *sq;
-};
+#include "health.h"
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
{
@@ -40,41 +31,20 @@ static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
sq->pc = 0;
}
-static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
+static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
{
- struct mlx5_core_dev *mdev = sq->channel->mdev;
- struct net_device *dev = sq->channel->netdev;
- struct mlx5e_modify_sq_param msp = {0};
+ struct mlx5_core_dev *mdev;
+ struct net_device *dev;
+ struct mlx5e_txqsq *sq;
+ u8 state;
int err;
- msp.curr_state = curr_state;
- msp.next_state = MLX5_SQC_STATE_RST;
-
- err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
- if (err) {
- netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
- return err;
- }
-
- memset(&msp, 0, sizeof(msp));
- msp.curr_state = MLX5_SQC_STATE_RST;
- msp.next_state = MLX5_SQC_STATE_RDY;
-
- err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
- if (err) {
- netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
- return err;
- }
-
- return 0;
-}
+ sq = ctx;
+ mdev = sq->channel->mdev;
+ dev = sq->channel->netdev;
-static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
-{
- struct mlx5_core_dev *mdev = sq->channel->mdev;
- struct net_device *dev = sq->channel->netdev;
- u8 state;
- int err;
+ if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+ return 0;
err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
if (err) {
@@ -97,7 +67,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
* pending WQEs. SQ can safely reset the SQ.
*/
- err = mlx5e_sq_to_ready(sq, state);
+ err = mlx5e_health_sq_to_ready(sq->channel, sq->sqn);
if (err)
goto out;
@@ -112,115 +82,98 @@ out:
return err;
}
-static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
- char *err_str,
- struct mlx5e_tx_err_ctx *err_ctx)
+void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
{
- if (IS_ERR_OR_NULL(tx_reporter)) {
- netdev_err(err_ctx->sq->channel->netdev, err_str);
- return err_ctx->recover(err_ctx->sq);
- }
-
- return devlink_health_report(tx_reporter, err_str, err_ctx);
-}
+ struct mlx5e_priv *priv = sq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {0};
-void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq)
-{
- char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN];
- struct mlx5e_tx_err_ctx err_ctx = {0};
-
- err_ctx.sq = sq;
- err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
+ err_ctx.ctx = sq;
+ err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn);
- mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str,
- &err_ctx);
+ mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
}
-static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq)
+static int mlx5e_tx_reporter_timeout_recover(void *ctx)
{
- struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
- u32 eqe_count;
-
- netdev_err(sq->channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
- eq->core.eqn, eq->core.cons_index, eq->core.irqn);
+ struct mlx5_eq_comp *eq;
+ struct mlx5e_txqsq *sq;
+ int err;
- eqe_count = mlx5_eq_poll_irq_disabled(eq);
- if (!eqe_count) {
+ sq = ctx;
+ eq = sq->cq.mcq.eq;
+ err = mlx5e_health_channel_eq_recover(eq, sq->channel);
+ if (err)
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- return -EIO;
- }
- netdev_err(sq->channel->netdev, "Recover %d eqes on EQ 0x%x\n",
- eqe_count, eq->core.eqn);
- sq->channel->stats->eq_rearm++;
- return 0;
+ return err;
}
-int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq)
+int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
{
- char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN];
- struct mlx5e_tx_err_ctx err_ctx;
+ struct mlx5e_priv *priv = sq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx;
- err_ctx.sq = sq;
- err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
+ err_ctx.ctx = sq;
+ err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
sprintf(err_str,
"TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
jiffies_to_usecs(jiffies - sq->txq->trans_start));
- return mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str,
- &err_ctx);
+ return mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
}
/* state lock cannot be grabbed within this function.
* It can cause a dead lock or a read-after-free.
*/
-static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
-{
- return err_ctx->recover(err_ctx->sq);
-}
-
-static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
+static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
{
- int err = 0;
-
- rtnl_lock();
- mutex_lock(&priv->state_lock);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto out;
-
- err = mlx5e_safe_reopen_channels(priv);
-
-out:
- mutex_unlock(&priv->state_lock);
- rtnl_unlock();
-
- return err;
+ return err_ctx->recover(err_ctx->ctx);
}
static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
void *context)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- struct mlx5e_tx_err_ctx *err_ctx = context;
+ struct mlx5e_err_ctx *err_ctx = context;
return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) :
- mlx5e_tx_reporter_recover_all(priv);
+ mlx5e_health_recover_channels(priv);
}
static int
mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
- u32 sqn, u8 state, bool stopped)
+ struct mlx5e_txqsq *sq, int tc)
{
+ struct mlx5e_priv *priv = sq->channel->priv;
+ bool stopped = netif_xmit_stopped(sq->txq);
+ u8 state;
int err;
+ err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
+ if (err)
+ return err;
+
err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sqn);
+ err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
if (err)
return err;
@@ -232,6 +185,18 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
if (err)
return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_cq_diagnose(&sq->cq, fmsg);
+ if (err)
+ return err;
+
err = devlink_fmsg_obj_nest_end(fmsg);
if (err)
return err;
@@ -243,31 +208,61 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- int i, err = 0;
+ struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
+ u32 sq_stride, sq_sz;
+
+ int i, tc, err = 0;
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
+ sq_sz = mlx5_wq_cyc_get_size(&generic_sq->wq);
+ sq_stride = MLX5_SEND_WQE_BB;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "Common Config");
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SQ");
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride);
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_cq_common_diagnose(&generic_sq->cq, fmsg);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ goto unlock;
+
err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
if (err)
goto unlock;
- for (i = 0; i < priv->channels.num * priv->channels.params.num_tc;
- i++) {
- struct mlx5e_txqsq *sq = priv->txq2sq[i];
- u8 state;
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
- err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
- if (err)
- goto unlock;
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ struct mlx5e_txqsq *sq = &c->sq[tc];
- err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq->sqn,
- state,
- netif_xmit_stopped(sq->txq));
- if (err)
- goto unlock;
+ err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc);
+ if (err)
+ goto unlock;
+ }
}
err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err)
@@ -286,25 +281,30 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
#define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
-int mlx5e_tx_reporter_create(struct mlx5e_priv *priv)
+int mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
{
+ struct devlink_health_reporter *reporter;
struct mlx5_core_dev *mdev = priv->mdev;
- struct devlink *devlink = priv_to_devlink(mdev);
+ struct devlink *devlink;
- priv->tx_reporter =
+ devlink = priv_to_devlink(mdev);
+ reporter =
devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops,
MLX5_REPORTER_TX_GRACEFUL_PERIOD,
true, priv);
- if (IS_ERR(priv->tx_reporter))
+ if (IS_ERR(reporter)) {
netdev_warn(priv->netdev,
"Failed to create tx reporter, err = %ld\n",
- PTR_ERR(priv->tx_reporter));
- return IS_ERR_OR_NULL(priv->tx_reporter);
+ PTR_ERR(reporter));
+ return PTR_ERR(reporter);
+ }
+ priv->tx_reporter = reporter;
+ return 0;
}
-void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv)
+void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv)
{
- if (IS_ERR_OR_NULL(priv->tx_reporter))
+ if (!priv->tx_reporter)
return;
devlink_health_reporter_destroy(priv->tx_reporter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index a6a52806be45..4c4620db3d31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -31,29 +31,36 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
real_dev = is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : dev;
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
- uplink_upper = netdev_master_upper_dev_get(uplink_dev);
+
+ rcu_read_lock();
+ uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
+ /* mlx5_lag_is_sriov() is a blocking function which can't be called
+ * while holding rcu read lock. Take the net_device for correctness
+ * sake.
+ */
+ if (uplink_upper)
+ dev_hold(uplink_upper);
+ rcu_read_unlock();
+
dst_is_lag_dev = (uplink_upper &&
netif_is_lag_master(uplink_upper) &&
real_dev == uplink_upper &&
mlx5_lag_is_sriov(priv->mdev));
+ if (uplink_upper)
+ dev_put(uplink_upper);
/* if the egress device isn't on the same HW e-switch or
* it's a LAG device, use the uplink
*/
+ *route_dev = dev;
if (!netdev_port_same_parent_id(priv->netdev, real_dev) ||
- dst_is_lag_dev) {
- *route_dev = dev;
+ dst_is_lag_dev || is_vlan_dev(*route_dev))
*out_dev = uplink_dev;
- } else {
- *route_dev = dev;
- if (is_vlan_dev(*route_dev))
- *out_dev = uplink_dev;
- else if (mlx5e_eswitch_rep(dev) &&
- mlx5e_is_valid_eswitch_fwd_dev(priv, dev))
- *out_dev = *route_dev;
- else
- return -EOPNOTSUPP;
- }
+ else if (mlx5e_eswitch_rep(dev) &&
+ mlx5e_is_valid_eswitch_fwd_dev(priv, dev))
+ *out_dev = *route_dev;
+ else
+ return -EOPNOTSUPP;
if (!(mlx5e_eswitch_rep(*out_dev) &&
mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index ddfe19adb3d9..87be96747902 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -6,7 +6,7 @@
#include "en.h"
-#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
+#define MLX5E_SQ_NOPS_ROOM (MLX5_SEND_WQE_MAX_WQEBBS - 1)
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
MLX5E_SQ_NOPS_ROOM)
@@ -117,9 +117,27 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
mlx5_write64((__be32 *)ctrl, uar_map);
}
-static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5e_tx_wqe *wqe)
+static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
{
- return !!wqe->ctrl.tisn;
+ return cseg && !!cseg->tisn;
+}
+
+static inline u8
+mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
+ struct sk_buff *skb)
+{
+ u8 mode;
+
+ if (mlx5e_transport_inline_tx_wqe(cseg))
+ return MLX5_INLINE_MODE_TCP_UDP;
+
+ mode = sq->min_inline_mode;
+
+ if (skb_vlan_tag_present(skb) &&
+ test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
+ mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
+
+ return mode;
}
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index b0b982cf69bb..1ed5c33e022f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -179,33 +179,19 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
struct mlx5_wq_cyc *wq = &sq->wq;
- u8 wqebbs;
- u16 pi;
-
- mlx5e_xdpsq_fetch_wqe(sq, &session->wqe);
-
- prefetchw(session->wqe->data);
- session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
- session->pkt_count = 0;
- session->complete = 0;
+ u16 pi, contig_wqebbs;
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
-/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
- * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
- * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
- * full-session WQE be cache-aligned.
- */
-#if L1_CACHE_BYTES < 128
-#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
-#else
-#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
-#endif
+ if (unlikely(contig_wqebbs < MLX5_SEND_WQE_MAX_WQEBBS))
+ mlx5e_fill_xdpsq_frag_edge(sq, wq, pi, contig_wqebbs);
- wqebbs = min_t(u16, mlx5_wq_cyc_get_contig_wqebbs(wq, pi),
- MLX5E_XDP_MPW_MAX_WQEBBS);
+ session->wqe = mlx5e_xdpsq_fetch_wqe(sq, &pi);
- session->max_ds_count = MLX5_SEND_WQEBB_NUM_DS * wqebbs;
+ prefetchw(session->wqe->data);
+ session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
+ session->pkt_count = 0;
mlx5e_xdp_update_inline_state(sq);
@@ -244,7 +230,7 @@ static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
{
if (unlikely(!sq->mpwqe.wqe)) {
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
- MLX5_SEND_WQE_MAX_WQEBBS))) {
+ MLX5E_XDPSQ_STOP_ROOM))) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
sq->stats->full++;
@@ -285,8 +271,8 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
- if (unlikely(session->complete ||
- session->ds_count == session->max_ds_count))
+ if (unlikely(mlx5e_xdp_no_room_for_inline_pkt(session) ||
+ session->ds_count == MLX5E_XDP_MPW_MAX_NUM_DS))
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index b90923932668..36ac1e3816b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -40,6 +40,26 @@
(sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
+#define MLX5E_XDPSQ_STOP_ROOM (MLX5E_SQ_STOP_ROOM)
+
+#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
+#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT \
+ DIV_ROUND_UP(MLX5E_XDP_INLINE_WQE_SZ_THRSD, MLX5_SEND_WQE_DS)
+
+/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
+ * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
+ * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
+ * full-session WQE be cache-aligned.
+ */
+#if L1_CACHE_BYTES < 128
+#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
+#else
+#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
+#endif
+
+#define MLX5E_XDP_MPW_MAX_NUM_DS \
+ (MLX5E_XDP_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
+
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
@@ -114,6 +134,30 @@ static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq)
session->inline_on = 1;
}
+static inline bool
+mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session)
+{
+ return session->inline_on &&
+ session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS;
+}
+
+static inline void
+mlx5e_fill_xdpsq_frag_edge(struct mlx5e_xdpsq *sq, struct mlx5_wq_cyc *wq,
+ u16 pi, u16 nnops)
+{
+ struct mlx5e_xdp_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
+
+ edge_wi = wi + nnops;
+ /* fill sq frag edge with nops to avoid wqe wrapping two pages */
+ for (; wi < edge_wi; wi++) {
+ wi->num_wqebbs = 1;
+ wi->num_pkts = 0;
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
+
+ sq->stats->nops += nnops;
+}
+
static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_xmit_data *xdptxd,
@@ -126,20 +170,12 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
session->pkt_count++;
-#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
-
if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
struct mlx5_wqe_inline_seg *inline_dseg =
(struct mlx5_wqe_inline_seg *)dseg;
u16 ds_len = sizeof(*inline_dseg) + dma_len;
u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS);
- if (unlikely(session->ds_count + ds_cnt > session->max_ds_count)) {
- /* Not enough space for inline wqe, send with memory pointer */
- session->complete = true;
- goto no_inline;
- }
-
inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG);
memcpy(inline_dseg->data, xdptxd->data, dma_len);
@@ -148,21 +184,23 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
return;
}
-no_inline:
dseg->addr = cpu_to_be64(xdptxd->dma_addr);
dseg->byte_count = cpu_to_be32(dma_len);
dseg->lkey = sq->mkey_be;
session->ds_count++;
}
-static inline void mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq,
- struct mlx5e_tx_wqe **wqe)
+static inline struct mlx5e_tx_wqe *
+mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, u16 *pi)
{
struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ struct mlx5e_tx_wqe *wqe;
+
+ *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
+ memset(wqe, 0, sizeof(*wqe));
- *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- memset(*wqe, 0, sizeof(**wqe));
+ return wqe;
}
static inline void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 7f78c004d12f..d360750b25b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -60,24 +60,28 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
struct mlx5e_channel *c)
{
- struct mlx5e_channel_param cparam = {};
+ struct mlx5e_channel_param *cparam;
struct dim_cq_moder icocq_moder = {};
int err;
if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev))
return -EINVAL;
- mlx5e_build_xsk_cparam(priv, params, xsk, &cparam);
+ cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL);
+ if (!cparam)
+ return -ENOMEM;
- err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam.rx_cq, &c->xskrq.cq);
+ mlx5e_build_xsk_cparam(priv, params, xsk, cparam);
+
+ err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->xskrq.cq);
if (unlikely(err))
- return err;
+ goto err_free_cparam;
- err = mlx5e_open_rq(c, params, &cparam.rq, xsk, umem, &c->xskrq);
+ err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq);
if (unlikely(err))
goto err_close_rx_cq;
- err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam.tx_cq, &c->xsksq.cq);
+ err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xsksq.cq);
if (unlikely(err))
goto err_close_rq;
@@ -87,21 +91,23 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
* is disabled and then reenabled, but the SQ continues receiving CQEs
* from the old UMEM.
*/
- err = mlx5e_open_xdpsq(c, params, &cparam.xdp_sq, umem, &c->xsksq, true);
+ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true);
if (unlikely(err))
goto err_close_tx_cq;
- err = mlx5e_open_cq(c, icocq_moder, &cparam.icosq_cq, &c->xskicosq.cq);
+ err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->xskicosq.cq);
if (unlikely(err))
goto err_close_sq;
/* Create a dedicated SQ for posting NOPs whenever we need an IRQ to be
* triggered and NAPI to be called on the correct CPU.
*/
- err = mlx5e_open_icosq(c, params, &cparam.icosq, &c->xskicosq);
+ err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->xskicosq);
if (unlikely(err))
goto err_close_icocq;
+ kvfree(cparam);
+
spin_lock_init(&c->xskicosq_lock);
set_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
@@ -123,6 +129,9 @@ err_close_rq:
err_close_rx_cq:
mlx5e_close_cq(&c->xskrq.cq);
+err_free_cparam:
+ kvfree(cparam);
+
return err;
}
@@ -141,6 +150,7 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
void mlx5e_activate_xsk(struct mlx5e_channel *c)
{
+ mlx5e_activate_icosq(&c->xskicosq);
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
/* TX queue is created active. */
@@ -153,6 +163,7 @@ void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
{
mlx5e_deactivate_rq(&c->xskrq);
/* TX queue is disabled on close. */
+ mlx5e_deactivate_icosq(&c->xskicosq);
}
static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 35e188cf4ea4..fd2c75b4b519 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -26,6 +26,13 @@ int mlx5e_xsk_async_xmit(struct net_device *dev, u32 qid)
return -ENXIO;
if (!napi_if_scheduled_mark_missed(&c->napi)) {
+ /* To avoid WQE overrun, don't post a NOP if XSKICOSQ is not
+ * active and not polled by NAPI. Return 0, because the upcoming
+ * activate will trigger the IRQ for us.
+ */
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state)))
+ return 0;
+
spin_lock(&c->xskicosq_lock);
mlx5e_trigger_irq(&c->xskicosq);
spin_unlock(&c->xskicosq_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 1539cf3de5dc..f7890e0ce96c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -180,15 +180,3 @@ out:
return err;
}
-
-u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev)
-{
- u8 min_inline_mode;
-
- mlx5_query_min_inline(mdev, &min_inline_mode);
- if (min_inline_mode == MLX5_INLINE_MODE_NONE &&
- !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
- min_inline_mode = MLX5_INLINE_MODE_L2;
-
- return min_inline_mode;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 8dd31b5c740c..01f2918063af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1101,7 +1101,7 @@ void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv,
struct mlx5e_params *params)
{
- params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev);
+ mlx5_query_min_inline(priv->mdev, &params->tx_min_inline_mode);
if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP &&
params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 20e628c907e5..7347d673f448 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1958,21 +1958,27 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
-#ifndef CONFIG_MLX5_EN_RXNFC
-/* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS
- * otherwise this function will be defined from en_fs_ethtool.c
- */
static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- if (info->cmd != ETHTOOL_GRXRINGS)
- return -EOPNOTSUPP;
- /* ring_count is needed by ethtool -x */
- info->data = priv->channels.params.num_channels;
- return 0;
+ /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part
+ * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc,
+ * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc
+ * is compiled out via CONFIG_MLX5_EN_RXNFC=n.
+ */
+ if (info->cmd == ETHTOOL_GRXRINGS) {
+ info->data = priv->channels.params.num_channels;
+ return 0;
+ }
+
+ return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs);
+}
+
+static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ return mlx5e_ethtool_set_rxnfc(dev, cmd);
}
-#endif
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
@@ -1993,9 +1999,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
.get_rxnfc = mlx5e_get_rxnfc,
-#ifdef CONFIG_MLX5_EN_RXNFC
.set_rxnfc = mlx5e_set_rxnfc,
-#endif
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
.get_pauseparam = mlx5e_get_pauseparam,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 94304abc49e9..eed7101e8bb7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -888,10 +888,10 @@ static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
return 0;
}
-int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
- int err = 0;
struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
@@ -911,16 +911,13 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err;
}
-int mlx5e_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *info, u32 *rule_locs)
+int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0;
switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = priv->channels.params.num_channels;
- break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = priv->fs.ethtool.tot_num_rules;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 9d5f6e56188f..8592b98d0e70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -56,12 +56,13 @@
#include "en/xdp.h"
#include "lib/eq.h"
#include "en/monitor_stats.h"
-#include "en/reporter.h"
+#include "en/health.h"
#include "en/params.h"
#include "en/xsk/umem.h"
#include "en/xsk/setup.h"
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
+#include "en/hv_vhca_stats.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
@@ -247,26 +248,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
-static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
-{
- switch (rq->wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
- default:
- return mlx5_wq_cyc_get_size(&rq->wqe.wq);
- }
-}
-
-static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
-{
- switch (rq->wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return rq->mpwqe.wq.cur_sz;
- default:
- return rq->wqe.wq.cur_sz;
- }
-}
-
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
struct mlx5e_channel *c)
{
@@ -382,6 +363,13 @@ static void mlx5e_free_di_list(struct mlx5e_rq *rq)
kvfree(rq->wqe.di);
}
+static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
+{
+ struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work);
+
+ mlx5e_reporter_rq_cqe_err(rq);
+}
+
static int mlx5e_alloc_rq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
@@ -418,6 +406,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->stats = &c->priv->channel_stats[c->ix].xskrq;
else
rq->stats = &c->priv->channel_stats[c->ix].rq;
+ INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
if (IS_ERR(rq->xdp_prog)) {
@@ -720,8 +709,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
return err;
}
-static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
- int next_state)
+int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -829,10 +817,11 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
+ mlx5e_reporter_rx_timeout(rq);
return -ETIMEDOUT;
}
-static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
{
__be16 wqe_ix_be;
u16 wqe_ix;
@@ -911,7 +900,7 @@ err_free_rq:
return err;
}
-static void mlx5e_activate_rq(struct mlx5e_rq *rq)
+void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
mlx5e_trigger_irq(&rq->channel->icosq);
@@ -926,6 +915,8 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
cancel_work_sync(&rq->dim.work);
+ cancel_work_sync(&rq->channel->icosq.recover_work);
+ cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_free_rq(rq);
@@ -1042,6 +1033,14 @@ static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
return 0;
}
+static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
+{
+ struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
+ recover_work);
+
+ mlx5e_reporter_icosq_cqe_err(sq);
+}
+
static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
struct mlx5e_sq_param *param,
struct mlx5e_icosq *sq)
@@ -1064,6 +1063,8 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
+ INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work);
+
return 0;
err_sq_wq_destroy:
@@ -1130,6 +1131,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
sq->stop_room = MLX5E_SQ_STOP_ROOM;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
+ if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
+ set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (mlx5_accel_is_tls_device(c->priv->mdev)) {
@@ -1377,7 +1380,7 @@ static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
recover_work);
- mlx5e_tx_reporter_err_cqe(sq);
+ mlx5e_reporter_tx_err_cqe(sq);
}
int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
@@ -1393,7 +1396,6 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = params->tx_min_inline_mode;
- set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
if (err)
goto err_free_icosq;
@@ -1407,12 +1409,22 @@ err_free_icosq:
return err;
}
-void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
{
- struct mlx5e_channel *c = sq->channel;
+ set_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
+}
- clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
+{
+ struct mlx5e_channel *c = icosq->channel;
+
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
napi_synchronize(&c->napi);
+}
+
+void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
mlx5e_destroy_sq(c->mdev, sq->sqn);
mlx5e_free_icosq(sq);
@@ -1989,6 +2001,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
+ mlx5e_activate_icosq(&c->icosq);
mlx5e_activate_rq(&c->rq);
netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix);
@@ -2004,6 +2017,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
mlx5e_deactivate_xsk(c);
mlx5e_deactivate_rq(&c->rq);
+ mlx5e_deactivate_icosq(&c->icosq);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->sq[tc]);
}
@@ -2321,10 +2335,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
- if (!IS_ERR_OR_NULL(priv->tx_reporter))
- devlink_health_reporter_state_update(priv->tx_reporter,
- DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
-
+ mlx5e_health_channels_update(priv);
kvfree(cparam);
return 0;
@@ -3199,7 +3210,6 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
int tc;
- mlx5e_tx_reporter_destroy(priv);
for (tc = 0; tc < priv->profile->max_tc; tc++)
mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
}
@@ -3422,7 +3432,7 @@ out:
#ifdef CONFIG_MLX5_ESWITCH
static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct flow_cls_offload *cls_flower,
- int flags)
+ unsigned long flags)
{
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
@@ -3442,12 +3452,12 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
+ unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
struct mlx5e_priv *priv = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
- MLX5E_TC_NIC_OFFLOAD);
+ return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
default:
return -EOPNOTSUPP;
}
@@ -3460,10 +3470,12 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct flow_block_offload *f = type_data;
switch (type) {
#ifdef CONFIG_MLX5_ESWITCH
case TC_SETUP_BLOCK:
+ f->unlocked_driver_cb = true;
return flow_block_cb_setup_simple(type_data,
&mlx5e_block_cb_list,
mlx5e_setup_tc_block_cb,
@@ -3640,7 +3652,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- if (!enable && mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD)) {
+ if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
@@ -3781,9 +3793,10 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
}
if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
- features &= ~NETIF_F_LRO;
- if (params->lro_en)
+ if (features & NETIF_F_LRO) {
netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
+ features &= ~NETIF_F_LRO;
+ }
}
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
@@ -3950,7 +3963,8 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
/* Disable CQE compression */
- netdev_warn(priv->netdev, "Disabling cqe compression");
+ if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
+ netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
if (err) {
netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
@@ -4267,7 +4281,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
if (!netif_xmit_stopped(dev_queue))
continue;
- if (mlx5e_tx_reporter_timeout(sq))
+ if (mlx5e_reporter_tx_timeout(sq))
report_failed = true;
}
@@ -4768,7 +4782,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
/* TX inline */
- params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
+ mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
/* RSS */
mlx5e_build_rss_params(rss_params, params->num_channels);
@@ -4965,12 +4979,14 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev);
mlx5e_build_tc2txq_maps(priv);
+ mlx5e_health_create_reporters(priv);
return 0;
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
+ mlx5e_health_destroy_reporters(priv);
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
mlx5e_netdev_cleanup(priv->netdev, priv);
@@ -5073,7 +5089,6 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_initialize(priv);
#endif
- mlx5e_tx_reporter_create(priv);
return 0;
}
@@ -5097,6 +5112,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_init(priv);
+ mlx5e_hv_vhca_stats_create(priv);
if (netdev->reg_state != NETREG_REGISTERED)
return;
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -5129,6 +5145,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
queue_work(priv->wq, &priv->set_rx_mode_work);
+ mlx5e_hv_vhca_stats_destroy(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index d0684fdb69e1..e7ac6233037d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -46,6 +46,8 @@
#include "en/tc_tun.h"
#include "fs_core.h"
#include "lib/port_tun.h"
+#define CREATE_TRACE_POINTS
+#include "diag/en_rep_tracepoint.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
@@ -389,24 +391,17 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
.set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
};
-static int mlx5e_rep_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static void mlx5e_rep_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
- struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
u64 parent_id;
priv = netdev_priv(dev);
- esw = priv->mdev->priv.eswitch;
-
- if (esw->mode == MLX5_ESWITCH_NONE)
- return -EOPNOTSUPP;
parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
ppid->id_len = sizeof(parent_id);
memcpy(ppid->id, &parent_id, sizeof(parent_id));
-
- return 0;
}
static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
@@ -531,47 +526,97 @@ void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
neigh_update->min_interval);
}
+static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
+{
+ return refcount_inc_not_zero(&nhe->refcnt);
+}
+
+static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe);
+
+static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
+{
+ if (refcount_dec_and_test(&nhe->refcnt)) {
+ mlx5e_rep_neigh_entry_remove(nhe);
+ kfree_rcu(nhe, rcu);
+ }
+}
+
+static struct mlx5e_neigh_hash_entry *
+mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv,
+ struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_neigh_hash_entry *next = NULL;
+
+ rcu_read_lock();
+
+ for (next = nhe ?
+ list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
+ &nhe->neigh_list,
+ struct mlx5e_neigh_hash_entry,
+ neigh_list) :
+ list_first_or_null_rcu(&rpriv->neigh_update.neigh_list,
+ struct mlx5e_neigh_hash_entry,
+ neigh_list);
+ next;
+ next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
+ &next->neigh_list,
+ struct mlx5e_neigh_hash_entry,
+ neigh_list))
+ if (mlx5e_rep_neigh_entry_hold(next))
+ break;
+
+ rcu_read_unlock();
+
+ if (nhe)
+ mlx5e_rep_neigh_entry_release(nhe);
+
+ return next;
+}
+
static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
{
struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
neigh_update.neigh_stats_work.work);
struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_neigh_hash_entry *nhe;
+ struct mlx5e_neigh_hash_entry *nhe = NULL;
rtnl_lock();
if (!list_empty(&rpriv->neigh_update.neigh_list))
mlx5e_rep_queue_neigh_stats_work(priv);
- list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
+ while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL)
mlx5e_tc_update_neigh_used_value(nhe);
rtnl_unlock();
}
-static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
-{
- refcount_inc(&nhe->refcnt);
-}
-
-static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
-{
- if (refcount_dec_and_test(&nhe->refcnt))
- kfree(nhe);
-}
-
static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
bool neigh_connected,
unsigned char ha[ETH_ALEN])
{
struct ethhdr *eth = (struct ethhdr *)e->encap_header;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ bool encap_connected;
+ LIST_HEAD(flow_list);
ASSERT_RTNL();
+ /* wait for encap to be fully initialized */
+ wait_for_completion(&e->res_ready);
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
+ if (e->compl_result || (encap_connected == neigh_connected &&
+ ether_addr_equal(e->h_dest, ha)))
+ goto unlock;
+
+ mlx5e_take_all_encap_flows(e, &flow_list);
+
if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
(!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
- mlx5e_tc_encap_flows_del(priv, e);
+ mlx5e_tc_encap_flows_del(priv, e, &flow_list);
if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
ether_addr_copy(e->h_dest, ha);
@@ -581,8 +626,11 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
*/
ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
- mlx5e_tc_encap_flows_add(priv, e);
+ mlx5e_tc_encap_flows_add(priv, e, &flow_list);
}
+unlock:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ mlx5e_put_encap_flow_list(priv, &flow_list);
}
static void mlx5e_rep_neigh_update(struct work_struct *work)
@@ -594,7 +642,6 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
unsigned char ha[ETH_ALEN];
struct mlx5e_priv *priv;
bool neigh_connected;
- bool encap_connected;
u8 nud_state, dead;
rtnl_lock();
@@ -612,13 +659,15 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
neigh_connected = (nud_state & NUD_VALID) && !dead;
+ trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected);
+
list_for_each_entry(e, &nhe->encap_list, encap_list) {
- encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
- priv = netdev_priv(e->out_dev);
+ if (!mlx5e_encap_take(e))
+ continue;
- if (encap_connected != neigh_connected ||
- !ether_addr_equal(e->h_dest, ha))
- mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
+ priv = netdev_priv(e->out_dev);
+ mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
+ mlx5e_encap_put(priv, e);
}
mlx5e_rep_neigh_entry_release(nhe);
rtnl_unlock();
@@ -659,8 +708,8 @@ mlx5e_rep_indr_offload(struct net_device *netdev,
struct flow_cls_offload *flower,
struct mlx5e_rep_indr_block_priv *indr_priv)
{
+ unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
- int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD;
int err = 0;
switch (flower->command) {
@@ -714,6 +763,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
+ f->unlocked_driver_cb = true;
f->driver_block_list = &mlx5e_block_cb_list;
switch (f->command) {
@@ -722,10 +772,6 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
if (indr_priv)
return -EEXIST;
- if (flow_block_cb_is_busy(mlx5e_rep_indr_setup_block_cb,
- indr_priv, &mlx5e_block_cb_list))
- return -EBUSY;
-
indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
if (!indr_priv)
return -ENOMEM;
@@ -785,9 +831,9 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
{
int err;
- err = __tc_indr_block_cb_register(netdev, rpriv,
- mlx5e_rep_indr_setup_tc_cb,
- rpriv);
+ err = __flow_indr_block_cb_register(netdev, rpriv,
+ mlx5e_rep_indr_setup_tc_cb,
+ rpriv);
if (err) {
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
@@ -800,8 +846,8 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
struct net_device *netdev)
{
- __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
- rpriv);
+ __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
+ rpriv);
}
static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
@@ -827,6 +873,28 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
return NOTIFY_OK;
}
+static void
+mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv,
+ struct mlx5e_neigh_hash_entry *nhe,
+ struct neighbour *n)
+{
+ /* Take a reference to ensure the neighbour and mlx5 encap
+ * entry won't be destructed until we drop the reference in
+ * delayed work.
+ */
+ neigh_hold(n);
+
+ /* This assignment is valid as long as the the neigh reference
+ * is taken
+ */
+ nhe->n = n;
+
+ if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
+ mlx5e_rep_neigh_entry_release(nhe);
+ neigh_release(n);
+ }
+}
+
static struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
struct mlx5e_neigh *m_neigh);
@@ -859,34 +927,13 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
m_neigh.family = n->ops->family;
memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
- /* We are in atomic context and can't take RTNL mutex, so use
- * spin_lock_bh to lookup the neigh table. bh is used since
- * netevent can be called from a softirq context.
- */
- spin_lock_bh(&neigh_update->encap_lock);
+ rcu_read_lock();
nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
- if (!nhe) {
- spin_unlock_bh(&neigh_update->encap_lock);
+ rcu_read_unlock();
+ if (!nhe)
return NOTIFY_DONE;
- }
-
- /* This assignment is valid as long as the the neigh reference
- * is taken
- */
- nhe->n = n;
-
- /* Take a reference to ensure the neighbour and mlx5 encap
- * entry won't be destructed until we drop the reference in
- * delayed work.
- */
- neigh_hold(n);
- mlx5e_rep_neigh_entry_hold(nhe);
- if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
- mlx5e_rep_neigh_entry_release(nhe);
- neigh_release(n);
- }
- spin_unlock_bh(&neigh_update->encap_lock);
+ mlx5e_rep_queue_neigh_update_work(priv, nhe, n);
break;
case NETEVENT_DELAY_PROBE_TIME_UPDATE:
@@ -903,19 +950,15 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
#endif
return NOTIFY_DONE;
- /* We are in atomic context and can't take RTNL mutex,
- * so use spin_lock_bh to walk the neigh list and look for
- * the relevant device. bh is used since netevent can be
- * called from a softirq context.
- */
- spin_lock_bh(&neigh_update->encap_lock);
- list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
+ neigh_list) {
if (p->dev == nhe->m_neigh.dev) {
found = true;
break;
}
}
- spin_unlock_bh(&neigh_update->encap_lock);
+ rcu_read_unlock();
if (!found)
return NOTIFY_DONE;
@@ -946,7 +989,7 @@ static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
return err;
INIT_LIST_HEAD(&neigh_update->neigh_list);
- spin_lock_init(&neigh_update->encap_lock);
+ mutex_init(&neigh_update->encap_lock);
INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
mlx5e_rep_neigh_stats_work);
mlx5e_rep_neigh_update_init_interval(rpriv);
@@ -973,6 +1016,7 @@ static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
+ mutex_destroy(&neigh_update->encap_lock);
rhashtable_destroy(&neigh_update->neigh_ht);
}
@@ -988,28 +1032,27 @@ static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
if (err)
return err;
- list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
+ list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
return err;
}
-static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
- struct mlx5e_neigh_hash_entry *nhe)
+static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe)
{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv;
- spin_lock_bh(&rpriv->neigh_update.encap_lock);
+ mutex_lock(&rpriv->neigh_update.encap_lock);
- list_del(&nhe->neigh_list);
+ list_del_rcu(&nhe->neigh_list);
rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
&nhe->rhash_node,
mlx5e_neigh_ht_params);
- spin_unlock_bh(&rpriv->neigh_update.encap_lock);
+ mutex_unlock(&rpriv->neigh_update.encap_lock);
}
-/* This function must only be called under RTNL lock or under the
- * representor's encap_lock in case RTNL mutex can't be held.
+/* This function must only be called under the representor's encap_lock or
+ * inside rcu read lock section.
*/
static struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
@@ -1017,9 +1060,11 @@ mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ struct mlx5e_neigh_hash_entry *nhe;
- return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
- mlx5e_neigh_ht_params);
+ nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
+ mlx5e_neigh_ht_params);
+ return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL;
}
static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
@@ -1032,8 +1077,10 @@ static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
if (!*nhe)
return -ENOMEM;
+ (*nhe)->priv = priv;
memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
+ spin_lock_init(&(*nhe)->encap_list_lock);
INIT_LIST_HEAD(&(*nhe)->encap_list);
refcount_set(&(*nhe)->refcnt, 1);
@@ -1047,19 +1094,6 @@ out_free:
return err;
}
-static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
- struct mlx5e_neigh_hash_entry *nhe)
-{
- /* The neigh hash entry must be removed from the hash table regardless
- * of the reference count value, so it won't be found by the next
- * neigh notification call. The neigh hash entry reference count is
- * incremented only during creation and neigh notification calls and
- * protects from freeing the nhe struct.
- */
- mlx5e_rep_neigh_entry_remove(priv, nhe);
- mlx5e_rep_neigh_entry_release(nhe);
-}
-
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
@@ -1072,16 +1106,26 @@ int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
if (err)
return err;
+
+ mutex_lock(&rpriv->neigh_update.encap_lock);
nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
if (!nhe) {
err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
if (err) {
+ mutex_unlock(&rpriv->neigh_update.encap_lock);
mlx5_tun_entropy_refcount_dec(tun_entropy,
e->reformat_type);
return err;
}
}
- list_add(&e->encap_list, &nhe->encap_list);
+
+ e->nhe = nhe;
+ spin_lock(&nhe->encap_list_lock);
+ list_add_rcu(&e->encap_list, &nhe->encap_list);
+ spin_unlock(&nhe->encap_list_lock);
+
+ mutex_unlock(&rpriv->neigh_update.encap_lock);
+
return 0;
}
@@ -1091,13 +1135,16 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
- struct mlx5e_neigh_hash_entry *nhe;
- list_del(&e->encap_list);
- nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
+ if (!e->nhe)
+ return;
+
+ spin_lock(&e->nhe->encap_list_lock);
+ list_del_rcu(&e->encap_list);
+ spin_unlock(&e->nhe->encap_list_lock);
- if (list_empty(&nhe->encap_list))
- mlx5e_rep_neigh_entry_destroy(priv, nhe);
+ mlx5e_rep_neigh_entry_release(e->nhe);
+ e->nhe = NULL;
mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
}
@@ -1160,15 +1207,34 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
}
}
+static
+int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ switch (ma->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return mlx5e_tc_configure_matchall(priv, ma);
+ case TC_CLSMATCHALL_DESTROY:
+ return mlx5e_tc_delete_matchall(priv, ma);
+ case TC_CLSMATCHALL_STATS:
+ mlx5e_tc_stats_matchall(priv, ma);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
+ unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_priv *priv = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
- MLX5E_TC_ESW_OFFLOAD);
+ return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
+ case TC_SETUP_CLSMATCHALL:
+ return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
default:
return -EOPNOTSUPP;
}
@@ -1180,9 +1246,11 @@ static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct flow_block_offload *f = type_data;
switch (type) {
case TC_SETUP_BLOCK:
+ f->unlocked_driver_cb = true;
return flow_block_cb_setup_simple(type_data,
&mlx5e_rep_block_cb_list,
mlx5e_rep_setup_tc_cb,
@@ -1564,6 +1632,7 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
uplink_priv = &rpriv->uplink_priv;
+ mutex_init(&uplink_priv->unready_flows_lock);
INIT_LIST_HEAD(&uplink_priv->unready_flows);
/* init shared tc flow table */
@@ -1608,6 +1677,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
/* delete shared tc flow table */
mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
+ mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
}
}
@@ -1731,37 +1801,46 @@ is_devlink_port_supported(const struct mlx5_core_dev *dev,
mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
}
+static unsigned int
+vport_to_devlink_port_index(const struct mlx5_core_dev *dev, u16 vport_num)
+{
+ return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
+}
+
static int register_devlink_port(struct mlx5_core_dev *dev,
struct mlx5e_rep_priv *rpriv)
{
struct devlink *devlink = priv_to_devlink(dev);
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct netdev_phys_item_id ppid = {};
- int ret;
+ unsigned int dl_port_index = 0;
if (!is_devlink_port_supported(dev, rpriv))
return 0;
- ret = mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
- if (ret)
- return ret;
+ mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
- if (rep->vport == MLX5_VPORT_UPLINK)
+ if (rep->vport == MLX5_VPORT_UPLINK) {
devlink_port_attrs_set(&rpriv->dl_port,
DEVLINK_PORT_FLAVOUR_PHYSICAL,
PCI_FUNC(dev->pdev->devfn), false, 0,
&ppid.id[0], ppid.id_len);
- else if (rep->vport == MLX5_VPORT_PF)
+ dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
+ } else if (rep->vport == MLX5_VPORT_PF) {
devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
dev->pdev->devfn);
- else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport))
+ dl_port_index = rep->vport;
+ } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
+ rpriv->rep->vport)) {
devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
dev->pdev->devfn,
rep->vport - 1);
+ dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
+ }
- return devlink_port_register(devlink, &rpriv->dl_port, rep->vport);
+ return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
}
static void unregister_devlink_port(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index c56e6ee4350c..a0ae5069d8c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -35,6 +35,7 @@
#include <net/ip_tunnels.h>
#include <linux/rhashtable.h>
+#include <linux/mutex.h>
#include "eswitch.h"
#include "en.h"
#include "lib/port_tun.h"
@@ -48,7 +49,7 @@ struct mlx5e_neigh_update_table {
*/
struct list_head neigh_list;
/* protect lookup/remove operations */
- spinlock_t encap_lock;
+ struct mutex encap_lock;
struct notifier_block netevent_nb;
struct delayed_work neigh_stats_work;
unsigned long min_interval; /* jiffies */
@@ -75,6 +76,8 @@ struct mlx5_rep_uplink_priv {
struct mlx5_tun_entropy tun_entropy;
+ /* protects unready_flows */
+ struct mutex unready_flows_lock;
struct list_head unready_flows;
struct work_struct reoffload_flows_work;
};
@@ -86,6 +89,7 @@ struct mlx5e_rep_priv {
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
+ struct rtnl_link_stats64 prev_vf_vport_stats;
struct devlink_port dl_port;
};
@@ -107,6 +111,7 @@ struct mlx5e_neigh {
struct mlx5e_neigh_hash_entry {
struct rhash_head rhash_node;
struct mlx5e_neigh m_neigh;
+ struct mlx5e_priv *priv;
/* Save the neigh hash entry in a list on the representor in
* addition to the hash table. In order to iterate easily over the
@@ -114,6 +119,8 @@ struct mlx5e_neigh_hash_entry {
*/
struct list_head neigh_list;
+ /* protects encap list */
+ spinlock_t encap_list_lock;
/* encap list sharing the same neigh */
struct list_head encap_list;
@@ -134,6 +141,8 @@ struct mlx5e_neigh_hash_entry {
* 'used' value and avoid neigh deleting by the kernel.
*/
unsigned long reported_lastuse;
+
+ struct rcu_head rcu;
};
enum {
@@ -142,6 +151,8 @@ enum {
};
struct mlx5e_encap_entry {
+ /* attached neigh hash entry */
+ struct mlx5e_neigh_hash_entry *nhe;
/* neigh hash entry list of encaps sharing the same neigh */
struct list_head encap_list;
struct mlx5e_neigh m_neigh;
@@ -161,6 +172,10 @@ struct mlx5e_encap_entry {
u8 flags;
char *encap_header;
int encap_size;
+ refcount_t refcnt;
+ struct completion res_ready;
+ int compl_result;
+ struct rcu_head rcu;
};
struct mlx5e_rep_sq {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index ac6e586d403d..2fd2760d0bb7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -48,6 +48,7 @@
#include "lib/clock.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
+#include "en/health.h"
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{
@@ -615,6 +616,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
+ if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+ queue_work(cq->channel->priv->wq, &sq->recover_work);
break;
}
do {
@@ -859,13 +862,24 @@ tail_padding_csum(struct sk_buff *skb, int offset,
}
static void
-mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
- struct mlx5e_rq_stats *stats)
+mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
+ struct mlx5e_rq_stats *stats)
{
struct ipv6hdr *ip6;
struct iphdr *ip4;
int pkt_len;
+ /* Fixup vlan headers, if any */
+ if (network_depth > ETH_HLEN)
+ /* CQE csum is calculated from the IP header and does
+ * not cover VLAN headers (if present). This will add
+ * the checksum manually.
+ */
+ skb->csum = csum_partial(skb->data + ETH_HLEN,
+ network_depth - ETH_HLEN,
+ skb->csum);
+
+ /* Fixup tail padding, if any */
switch (proto) {
case htons(ETH_P_IP):
ip4 = (struct iphdr *)(skb->data + network_depth);
@@ -931,16 +945,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return; /* CQE csum covers all received bytes */
/* csum might need some fixups ...*/
- if (network_depth > ETH_HLEN)
- /* CQE csum is calculated from the IP header and does
- * not cover VLAN headers (if present). This will add
- * the checksum manually.
- */
- skb->csum = csum_partial(skb->data + ETH_HLEN,
- network_depth - ETH_HLEN,
- skb->csum);
-
- mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
+ mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
return;
}
@@ -1065,11 +1070,6 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
prefetchw(va); /* xdp_frame data area */
prefetch(data);
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
- rq->stats->wqe_err++;
- return NULL;
- }
-
rcu_read_lock();
consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, false);
rcu_read_unlock();
@@ -1097,11 +1097,6 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
u16 byte_cnt = cqe_bcnt - headlen;
struct sk_buff *skb;
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
- rq->stats->wqe_err++;
- return NULL;
- }
-
/* XDP is not supported in this configuration, as incoming packets
* might spread among multiple pages.
*/
@@ -1135,6 +1130,15 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
return skb;
}
+static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
+
+ if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
+ !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state))
+ queue_work(rq->channel->priv->wq, &rq->recover_work);
+}
+
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
@@ -1147,6 +1151,12 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ trigger_report(rq, cqe);
+ rq->stats->wqe_err++;
+ goto free_wqe;
+ }
+
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
@@ -1188,6 +1198,11 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ rq->stats->wqe_err++;
+ goto free_wqe;
+ }
+
skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
@@ -1322,7 +1337,8 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi->consumed_strides += cstrides;
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ trigger_report(rq, cqe);
rq->stats->wqe_err++;
goto mpwrq_cqe_out;
}
@@ -1498,6 +1514,11 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ rq->stats->wqe_err++;
+ goto wq_free_wqe;
+ }
+
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
@@ -1533,26 +1554,27 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ rq->stats->wqe_err++;
+ goto wq_free_wqe;
+ }
+
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
rq, cqe, wi, cqe_bcnt);
- if (unlikely(!skb)) {
- /* a DROP, save the page-reuse checks */
- mlx5e_free_rx_wqe(rq, wi, true);
- goto wq_cyc_pop;
- }
+ if (unlikely(!skb)) /* a DROP, save the page-reuse checks */
+ goto wq_free_wqe;
+
skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
- if (unlikely(!skb)) {
- mlx5e_free_rx_wqe(rq, wi, true);
- goto wq_cyc_pop;
- }
+ if (unlikely(!skb))
+ goto wq_free_wqe;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
+wq_free_wqe:
mlx5e_free_rx_wqe(rq, wi, true);
-wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 57f9f346d213..18e4c162256a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -74,6 +74,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
@@ -90,6 +91,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
@@ -107,6 +109,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
@@ -200,6 +203,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
+ s->rx_xdp_tx_nops += xdpsq_stats->nops;
s->rx_xdp_tx_full += xdpsq_stats->full;
s->rx_xdp_tx_err += xdpsq_stats->err;
s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
@@ -217,6 +221,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_cache_waive += rq_stats->cache_waive;
s->rx_congst_umr += rq_stats->congst_umr;
s->rx_arfs_err += rq_stats->arfs_err;
+ s->rx_recover += rq_stats->recover;
s->ch_events += ch_stats->events;
s->ch_poll += ch_stats->poll;
s->ch_arm += ch_stats->arm;
@@ -227,6 +232,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_xdp_xmit += xdpsq_red_stats->xmit;
s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
+ s->tx_xdp_nops += xdpsq_red_stats->nops;
s->tx_xdp_full += xdpsq_red_stats->full;
s->tx_xdp_err += xdpsq_red_stats->err;
s->tx_xdp_cqes += xdpsq_red_stats->cqes;
@@ -1294,6 +1300,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
};
static const struct counter_desc sq_stats_desc[] = {
@@ -1331,6 +1338,7 @@ static const struct counter_desc rq_xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
@@ -1340,6 +1348,7 @@ static const struct counter_desc xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 76ac111e14d0..c281e567711d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -81,6 +81,7 @@ struct mlx5e_sw_stats {
u64 rx_xdp_tx_xmit;
u64 rx_xdp_tx_mpwqe;
u64 rx_xdp_tx_inlnw;
+ u64 rx_xdp_tx_nops;
u64 rx_xdp_tx_full;
u64 rx_xdp_tx_err;
u64 rx_xdp_tx_cqe;
@@ -97,6 +98,7 @@ struct mlx5e_sw_stats {
u64 tx_xdp_xmit;
u64 tx_xdp_mpwqe;
u64 tx_xdp_inlnw;
+ u64 tx_xdp_nops;
u64 tx_xdp_full;
u64 tx_xdp_err;
u64 tx_xdp_cqes;
@@ -114,6 +116,7 @@ struct mlx5e_sw_stats {
u64 rx_cache_waive;
u64 rx_congst_umr;
u64 rx_arfs_err;
+ u64 rx_recover;
u64 ch_events;
u64 ch_poll;
u64 ch_arm;
@@ -247,6 +250,7 @@ struct mlx5e_rq_stats {
u64 cache_waive;
u64 congst_umr;
u64 arfs_err;
+ u64 recover;
};
struct mlx5e_sq_stats {
@@ -288,6 +292,7 @@ struct mlx5e_xdpsq_stats {
u64 xmit;
u64 mpwqe;
u64 inlnw;
+ u64 nops;
u64 full;
u64 err;
/* dirtied @completion */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 00b2d4a86159..5581a8045ede 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -38,6 +38,8 @@
#include <linux/mlx5/fs.h>
#include <linux/mlx5/device.h>
#include <linux/rhashtable.h>
+#include <linux/refcount.h>
+#include <linux/completion.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
@@ -54,6 +56,7 @@
#include "en/tc_tun.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
+#include "diag/en_tc_tracepoint.h"
struct mlx5_nic_flow_attr {
u32 action;
@@ -65,19 +68,20 @@ struct mlx5_nic_flow_attr {
struct mlx5_fc *counter;
};
-#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
+#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
enum {
- MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
- MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
- MLX5E_TC_FLOW_ESWITCH = MLX5E_TC_ESW_OFFLOAD,
- MLX5E_TC_FLOW_NIC = MLX5E_TC_NIC_OFFLOAD,
- MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE),
- MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 1),
- MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 2),
- MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 3),
- MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 4),
- MLX5E_TC_FLOW_NOT_READY = BIT(MLX5E_TC_FLOW_BASE + 5),
+ MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
+ MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
+ MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
+ MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
+ MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
+ MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
+ MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
+ MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
+ MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
+ MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
+ MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
};
#define MLX5E_TC_MAX_SPLITS 1
@@ -100,6 +104,7 @@ enum {
* container_of(helper item, containing struct type, helper field[index])
*/
struct encap_flow_item {
+ struct mlx5e_encap_entry *e; /* attached encap instance */
struct list_head list;
int index;
};
@@ -108,7 +113,7 @@ struct mlx5e_tc_flow {
struct rhash_head node;
struct mlx5e_priv *priv;
u64 cookie;
- u16 flags;
+ unsigned long flags;
struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
/* Flow can be associated with multiple encap IDs.
* The number of encaps is bounded by the number of supported
@@ -116,10 +121,17 @@ struct mlx5e_tc_flow {
*/
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5e_tc_flow *peer_flow;
+ struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
+ struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */
struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
+ int tmp_efi_index;
+ struct list_head tmp_list; /* temporary flow list used by neigh update */
+ refcount_t refcnt;
+ struct rcu_head rcu_head;
+ struct completion init_done;
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -157,12 +169,20 @@ struct mlx5e_hairpin_entry {
/* a node of a hash table which keeps all the hairpin entries */
struct hlist_node hairpin_hlist;
+ /* protects flows list */
+ spinlock_t flows_lock;
/* flows sharing the same hairpin */
struct list_head flows;
+ /* hpe's that were not fully initialized when dead peer update event
+ * function traversed them.
+ */
+ struct list_head dead_peer_wait_list;
u16 peer_vhca_id;
u8 prio;
struct mlx5e_hairpin *hp;
+ refcount_t refcnt;
+ struct completion res_ready;
};
struct mod_hdr_key {
@@ -174,16 +194,93 @@ struct mlx5e_mod_hdr_entry {
/* a node of a hash table which keeps all the mod_hdr entries */
struct hlist_node mod_hdr_hlist;
+ /* protects flows list */
+ spinlock_t flows_lock;
/* flows sharing the same mod_hdr entry */
struct list_head flows;
struct mod_hdr_key key;
u32 mod_hdr_id;
+
+ refcount_t refcnt;
+ struct completion res_ready;
+ int compl_result;
};
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
+static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow);
+
+static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
+{
+ if (!flow || !refcount_inc_not_zero(&flow->refcnt))
+ return ERR_PTR(-EINVAL);
+ return flow;
+}
+
+static void mlx5e_flow_put(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ if (refcount_dec_and_test(&flow->refcnt)) {
+ mlx5e_tc_del_flow(priv, flow);
+ kfree_rcu(flow, rcu_head);
+ }
+}
+
+static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
+{
+ /* Complete all memory stores before setting bit. */
+ smp_mb__before_atomic();
+ set_bit(flag, &flow->flags);
+}
+
+#define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
+
+static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
+ unsigned long flag)
+{
+ /* test_and_set_bit() provides all necessary barriers */
+ return test_and_set_bit(flag, &flow->flags);
+}
+
+#define flow_flag_test_and_set(flow, flag) \
+ __flow_flag_test_and_set(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
+static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
+{
+ /* Complete all memory stores before clearing bit. */
+ smp_mb__before_atomic();
+ clear_bit(flag, &flow->flags);
+}
+
+#define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
+static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
+{
+ bool ret = test_bit(flag, &flow->flags);
+
+ /* Read fields of flow structure only after checking flags. */
+ smp_mb__after_atomic();
+ return ret;
+}
+
+#define flow_flag_test(flow, flag) __flow_flag_test(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
+static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
+{
+ return flow_flag_test(flow, ESWITCH);
+}
+
+static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
+{
+ return flow_flag_test(flow, OFFLOADED);
+}
+
static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
{
return jhash(key->actions,
@@ -199,15 +296,62 @@ static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
}
+static struct mod_hdr_tbl *
+get_mod_hdr_table(struct mlx5e_priv *priv, int namespace)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr :
+ &priv->fs.tc.mod_hdr;
+}
+
+static struct mlx5e_mod_hdr_entry *
+mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key)
+{
+ struct mlx5e_mod_hdr_entry *mh, *found = NULL;
+
+ hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
+ if (!cmp_mod_hdr_info(&mh->key, key)) {
+ refcount_inc(&mh->refcnt);
+ found = mh;
+ break;
+ }
+ }
+
+ return found;
+}
+
+static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
+ struct mlx5e_mod_hdr_entry *mh,
+ int namespace)
+{
+ struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace);
+
+ if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
+ return;
+ hash_del(&mh->mod_hdr_hlist);
+ mutex_unlock(&tbl->lock);
+
+ WARN_ON(!list_empty(&mh->flows));
+ if (mh->compl_result > 0)
+ mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
+
+ kfree(mh);
+}
+
+static int get_flow_name_space(struct mlx5e_tc_flow *flow)
+{
+ return mlx5e_is_eswitch_flow(flow) ?
+ MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
+}
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5e_tc_flow_parse_attr *parse_attr)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int num_actions, actions_size, namespace, err;
struct mlx5e_mod_hdr_entry *mh;
+ struct mod_hdr_tbl *tbl;
struct mod_hdr_key key;
- bool found = false;
u32 hash_key;
num_actions = parse_attr->num_mod_hdr_actions;
@@ -218,80 +362,82 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
hash_key = hash_mod_hdr_info(&key);
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
- namespace = MLX5_FLOW_NAMESPACE_FDB;
- hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
- mod_hdr_hlist, hash_key) {
- if (!cmp_mod_hdr_info(&mh->key, &key)) {
- found = true;
- break;
- }
- }
- } else {
- namespace = MLX5_FLOW_NAMESPACE_KERNEL;
- hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
- mod_hdr_hlist, hash_key) {
- if (!cmp_mod_hdr_info(&mh->key, &key)) {
- found = true;
- break;
- }
- }
- }
+ namespace = get_flow_name_space(flow);
+ tbl = get_mod_hdr_table(priv, namespace);
- if (found)
+ mutex_lock(&tbl->lock);
+ mh = mlx5e_mod_hdr_get(tbl, &key, hash_key);
+ if (mh) {
+ mutex_unlock(&tbl->lock);
+ wait_for_completion(&mh->res_ready);
+
+ if (mh->compl_result < 0) {
+ err = -EREMOTEIO;
+ goto attach_header_err;
+ }
goto attach_flow;
+ }
mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
- if (!mh)
+ if (!mh) {
+ mutex_unlock(&tbl->lock);
return -ENOMEM;
+ }
mh->key.actions = (void *)mh + sizeof(*mh);
memcpy(mh->key.actions, key.actions, actions_size);
mh->key.num_actions = num_actions;
+ spin_lock_init(&mh->flows_lock);
INIT_LIST_HEAD(&mh->flows);
+ refcount_set(&mh->refcnt, 1);
+ init_completion(&mh->res_ready);
+
+ hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
+ mutex_unlock(&tbl->lock);
err = mlx5_modify_header_alloc(priv->mdev, namespace,
mh->key.num_actions,
mh->key.actions,
&mh->mod_hdr_id);
- if (err)
- goto out_err;
-
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
- hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
- else
- hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
+ if (err) {
+ mh->compl_result = err;
+ goto alloc_header_err;
+ }
+ mh->compl_result = 1;
+ complete_all(&mh->res_ready);
attach_flow:
+ flow->mh = mh;
+ spin_lock(&mh->flows_lock);
list_add(&flow->mod_hdr, &mh->flows);
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ spin_unlock(&mh->flows_lock);
+ if (mlx5e_is_eswitch_flow(flow))
flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
else
flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
return 0;
-out_err:
- kfree(mh);
+alloc_header_err:
+ complete_all(&mh->res_ready);
+attach_header_err:
+ mlx5e_mod_hdr_put(priv, mh, namespace);
return err;
}
static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
- struct list_head *next = flow->mod_hdr.next;
+ /* flow wasn't fully initialized */
+ if (!flow->mh)
+ return;
+ spin_lock(&flow->mh->flows_lock);
list_del(&flow->mod_hdr);
+ spin_unlock(&flow->mh->flows_lock);
- if (list_empty(next)) {
- struct mlx5e_mod_hdr_entry *mh;
-
- mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
-
- mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
- hash_del(&mh->mod_hdr_hlist);
- kfree(mh);
- }
+ mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow));
+ flow->mh = NULL;
}
static
@@ -555,13 +701,35 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
hairpin_hlist, hash_key) {
- if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
+ if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
+ refcount_inc(&hpe->refcnt);
return hpe;
+ }
}
return NULL;
}
+static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
+ struct mlx5e_hairpin_entry *hpe)
+{
+ /* no more hairpin flows for us, release the hairpin pair */
+ if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
+ return;
+ hash_del(&hpe->hairpin_hlist);
+ mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+
+ if (!IS_ERR_OR_NULL(hpe->hp)) {
+ netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
+ dev_name(hpe->hp->pair->peer_mdev->device));
+
+ mlx5e_hairpin_destroy(hpe->hp);
+ }
+
+ WARN_ON(!list_empty(&hpe->flows));
+ kfree(hpe);
+}
+
#define UNKNOWN_MATCH_PRIO 8
static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
@@ -627,17 +795,37 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
extack);
if (err)
return err;
+
+ mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
- if (hpe)
+ if (hpe) {
+ mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ wait_for_completion(&hpe->res_ready);
+
+ if (IS_ERR(hpe->hp)) {
+ err = -EREMOTEIO;
+ goto out_err;
+ }
goto attach_flow;
+ }
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
- if (!hpe)
+ if (!hpe) {
+ mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
return -ENOMEM;
+ }
+ spin_lock_init(&hpe->flows_lock);
INIT_LIST_HEAD(&hpe->flows);
+ INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
hpe->peer_vhca_id = peer_id;
hpe->prio = match_prio;
+ refcount_set(&hpe->refcnt, 1);
+ init_completion(&hpe->res_ready);
+
+ hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
+ hash_hairpin_info(peer_id, match_prio));
+ mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
params.log_data_size = 15;
params.log_data_size = min_t(u8, params.log_data_size,
@@ -659,9 +847,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
params.num_channels = link_speed64;
hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
+ hpe->hp = hp;
+ complete_all(&hpe->res_ready);
if (IS_ERR(hp)) {
err = PTR_ERR(hp);
- goto create_hairpin_err;
+ goto out_err;
}
netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
@@ -669,46 +859,39 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
dev_name(hp->pair->peer_mdev->device),
hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
- hpe->hp = hp;
- hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
- hash_hairpin_info(peer_id, match_prio));
-
attach_flow:
if (hpe->hp->num_channels > 1) {
- flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
+ flow_flag_set(flow, HAIRPIN_RSS);
flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
} else {
flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
}
+
+ flow->hpe = hpe;
+ spin_lock(&hpe->flows_lock);
list_add(&flow->hairpin, &hpe->flows);
+ spin_unlock(&hpe->flows_lock);
return 0;
-create_hairpin_err:
- kfree(hpe);
+out_err:
+ mlx5e_hairpin_put(priv, hpe);
return err;
}
static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
- struct list_head *next = flow->hairpin.next;
+ /* flow wasn't fully initialized */
+ if (!flow->hpe)
+ return;
+ spin_lock(&flow->hpe->flows_lock);
list_del(&flow->hairpin);
+ spin_unlock(&flow->hpe->flows_lock);
- /* no more hairpin flows for us, release the hairpin pair */
- if (list_empty(next)) {
- struct mlx5e_hairpin_entry *hpe;
-
- hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
-
- netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
- dev_name(hpe->hp->pair->peer_mdev->device));
-
- mlx5e_hairpin_destroy(hpe->hp);
- hash_del(&hpe->hairpin_hlist);
- kfree(hpe);
- }
+ mlx5e_hairpin_put(priv, flow->hpe);
+ flow->hpe = NULL;
}
static int
@@ -727,18 +910,17 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
.flags = FLOW_ACT_NO_APPEND,
};
struct mlx5_fc *counter = NULL;
- bool table_created = false;
int err, dest_ix = 0;
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
flow_context->flow_tag = attr->flow_tag;
- if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
+ if (flow_flag_test(flow, HAIRPIN)) {
err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
- if (err) {
- goto err_add_hairpin_flow;
- }
- if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
+ if (err)
+ return err;
+
+ if (flow_flag_test(flow, HAIRPIN_RSS)) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[dest_ix].ft = attr->hairpin_ft;
} else {
@@ -754,10 +936,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(dev, true);
- if (IS_ERR(counter)) {
- err = PTR_ERR(counter);
- goto err_fc_create;
- }
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
+
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[dest_ix].counter_id = mlx5_fc_id(counter);
dest_ix++;
@@ -769,9 +950,10 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
flow_act.modify_id = attr->mod_hdr_id;
kfree(parse_attr->mod_hdr_actions);
if (err)
- goto err_create_mod_hdr_id;
+ return err;
}
+ mutex_lock(&priv->fs.tc.t_lock);
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
int tc_grp_size, tc_tbl_size;
u32 max_flow_counter;
@@ -791,15 +973,13 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
MLX5E_TC_TABLE_NUM_GROUPS,
MLX5E_TC_FT_LEVEL, 0);
if (IS_ERR(priv->fs.tc.t)) {
+ mutex_unlock(&priv->fs.tc.t_lock);
NL_SET_ERR_MSG_MOD(extack,
"Failed to create tc offload table\n");
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
- err = PTR_ERR(priv->fs.tc.t);
- goto err_create_ft;
+ return PTR_ERR(priv->fs.tc.t);
}
-
- table_created = true;
}
if (attr->match_level != MLX5_MATCH_NONE)
@@ -807,29 +987,12 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
&flow_act, dest, dest_ix);
+ mutex_unlock(&priv->fs.tc.t_lock);
- if (IS_ERR(flow->rule[0])) {
- err = PTR_ERR(flow->rule[0]);
- goto err_add_rule;
- }
+ if (IS_ERR(flow->rule[0]))
+ return PTR_ERR(flow->rule[0]);
return 0;
-
-err_add_rule:
- if (table_created) {
- mlx5_destroy_flow_table(priv->fs.tc.t);
- priv->fs.tc.t = NULL;
- }
-err_create_ft:
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5e_detach_mod_hdr(priv, flow);
-err_create_mod_hdr_id:
- mlx5_fc_destroy(dev, counter);
-err_fc_create:
- if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
- mlx5e_hairpin_flow_del(priv, flow);
-err_add_hairpin_flow:
- return err;
}
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
@@ -839,18 +1002,21 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5_fc *counter = NULL;
counter = attr->counter;
- mlx5_del_flow_rules(flow->rule[0]);
+ if (!IS_ERR_OR_NULL(flow->rule[0]))
+ mlx5_del_flow_rules(flow->rule[0]);
mlx5_fc_destroy(priv->mdev, counter);
- if (!mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD) && priv->fs.tc.t) {
+ mutex_lock(&priv->fs.tc.t_lock);
+ if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
}
+ mutex_unlock(&priv->fs.tc.t_lock);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
- if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+ if (flow_flag_test(flow, HAIRPIN))
mlx5e_hairpin_flow_del(priv, flow);
}
@@ -885,7 +1051,6 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
}
}
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
return rule;
}
@@ -894,7 +1059,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_esw_flow_attr *attr)
{
- flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
+ flow_flag_clear(flow, OFFLOADED);
if (attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
@@ -917,7 +1082,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
- flow->flags |= MLX5E_TC_FLOW_SLOW;
+ flow_flag_set(flow, SLOW);
return rule;
}
@@ -932,7 +1097,26 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
slow_attr->split_count = 0;
slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
- flow->flags &= ~MLX5E_TC_FLOW_SLOW;
+ flow_flag_clear(flow, SLOW);
+}
+
+/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
+ * function.
+ */
+static void unready_flow_add(struct mlx5e_tc_flow *flow,
+ struct list_head *unready_flows)
+{
+ flow_flag_set(flow, NOT_READY);
+ list_add_tail(&flow->unready, unready_flows);
+}
+
+/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
+ * function.
+ */
+static void unready_flow_del(struct mlx5e_tc_flow *flow)
+{
+ list_del(&flow->unready);
+ flow_flag_clear(flow, NOT_READY);
}
static void add_unready_flow(struct mlx5e_tc_flow *flow)
@@ -945,14 +1129,24 @@ static void add_unready_flow(struct mlx5e_tc_flow *flow)
rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &rpriv->uplink_priv;
- flow->flags |= MLX5E_TC_FLOW_NOT_READY;
- list_add_tail(&flow->unready, &uplink_priv->unready_flows);
+ mutex_lock(&uplink_priv->unready_flows_lock);
+ unready_flow_add(flow, &uplink_priv->unready_flows);
+ mutex_unlock(&uplink_priv->unready_flows_lock);
}
static void remove_unready_flow(struct mlx5e_tc_flow *flow)
{
- list_del(&flow->unready);
- flow->flags &= ~MLX5E_TC_FLOW_NOT_READY;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+
+ esw = flow->priv->mdev->priv.eswitch;
+ rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &rpriv->uplink_priv;
+
+ mutex_lock(&uplink_priv->unready_flows_lock);
+ unready_flow_del(flow);
+ mutex_unlock(&uplink_priv->unready_flows_lock);
}
static int
@@ -980,14 +1174,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (attr->chain > max_chain) {
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
- err = -EOPNOTSUPP;
- goto err_max_prio_chain;
+ return -EOPNOTSUPP;
}
if (attr->prio > max_prio) {
NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
- err = -EOPNOTSUPP;
- goto err_max_prio_chain;
+ return -EOPNOTSUPP;
}
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
@@ -1002,7 +1194,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
extack, &encap_dev, &encap_valid);
if (err)
- goto err_attach_encap;
+ return err;
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
@@ -1012,21 +1204,19 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err)
- goto err_add_vlan;
+ return err;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
kfree(parse_attr->mod_hdr_actions);
if (err)
- goto err_mod_hdr;
+ return err;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(attr->counter_dev, true);
- if (IS_ERR(counter)) {
- err = PTR_ERR(counter);
- goto err_create_counter;
- }
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
attr->counter = counter;
}
@@ -1044,27 +1234,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
}
- if (IS_ERR(flow->rule[0])) {
- err = PTR_ERR(flow->rule[0]);
- goto err_add_rule;
- }
+ if (IS_ERR(flow->rule[0]))
+ return PTR_ERR(flow->rule[0]);
+ else
+ flow_flag_set(flow, OFFLOADED);
return 0;
-
-err_add_rule:
- mlx5_fc_destroy(attr->counter_dev, counter);
-err_create_counter:
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5e_detach_mod_hdr(priv, flow);
-err_mod_hdr:
- mlx5_eswitch_del_vlan_action(esw, attr);
-err_add_vlan:
- for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
- if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
- mlx5e_detach_encap(priv, flow, out_index);
-err_attach_encap:
-err_max_prio_chain:
- return err;
}
static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
@@ -1088,14 +1263,14 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr slow_attr;
int out_index;
- if (flow->flags & MLX5E_TC_FLOW_NOT_READY) {
+ if (flow_flag_test(flow, NOT_READY)) {
remove_unready_flow(flow);
kvfree(attr->parse_attr);
return;
}
- if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
- if (flow->flags & MLX5E_TC_FLOW_SLOW)
+ if (mlx5e_is_offloaded_flow(flow)) {
+ if (flow_flag_test(flow, SLOW))
mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
else
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
@@ -1119,13 +1294,13 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e)
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr slow_attr, *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
- struct encap_flow_item *efi;
struct mlx5e_tc_flow *flow;
int err;
@@ -1142,16 +1317,17 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(priv);
- list_for_each_entry(efi, &e->flows, list) {
+ list_for_each_entry(flow, flow_list, tmp_list) {
bool all_flow_encaps_valid = true;
int i;
- flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
+ if (!mlx5e_is_offloaded_flow(flow))
+ continue;
esw_attr = flow->esw_attr;
spec = &esw_attr->parse_attr->spec;
- esw_attr->dests[efi->index].encap_id = e->encap_id;
- esw_attr->dests[efi->index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+ esw_attr->dests[flow->tmp_efi_index].encap_id = e->encap_id;
+ esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
/* Flow can be associated with multiple encap entries.
* Before offloading the flow verify that all of them have
* a valid neighbour.
@@ -1177,30 +1353,32 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
}
mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */
flow->rule[0] = rule;
+ /* was unset when slow path rule removed */
+ flow_flag_set(flow, OFFLOADED);
}
}
void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e)
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr slow_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
- struct encap_flow_item *efi;
struct mlx5e_tc_flow *flow;
int err;
- list_for_each_entry(efi, &e->flows, list) {
- flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
+ list_for_each_entry(flow, flow_list, tmp_list) {
+ if (!mlx5e_is_offloaded_flow(flow))
+ continue;
spec = &flow->esw_attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
/* mark the flow's encap dest as non-valid */
- flow->esw_attr->dests[efi->index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
+ flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1210,8 +1388,9 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
}
mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */
flow->rule[0] = rule;
+ /* was unset when fast path rule removed */
+ flow_flag_set(flow, OFFLOADED);
}
/* we know that the encap is valid */
@@ -1221,17 +1400,90 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
{
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ if (mlx5e_is_eswitch_flow(flow))
return flow->esw_attr->counter;
else
return flow->nic_attr->counter;
}
+/* Takes reference to all flows attached to encap and adds the flows to
+ * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
+ */
+void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
+{
+ struct encap_flow_item *efi;
+ struct mlx5e_tc_flow *flow;
+
+ list_for_each_entry(efi, &e->flows, list) {
+ flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
+ if (IS_ERR(mlx5e_flow_get(flow)))
+ continue;
+ wait_for_completion(&flow->init_done);
+
+ flow->tmp_efi_index = efi->index;
+ list_add(&flow->tmp_list, flow_list);
+ }
+}
+
+/* Iterate over tmp_list of flows attached to flow_list head. */
+void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
+{
+ struct mlx5e_tc_flow *flow, *tmp;
+
+ list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
+ mlx5e_flow_put(priv, flow);
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_encap_entry *next = NULL;
+
+retry:
+ rcu_read_lock();
+
+ /* find encap with non-zero reference counter value */
+ for (next = e ?
+ list_next_or_null_rcu(&nhe->encap_list,
+ &e->encap_list,
+ struct mlx5e_encap_entry,
+ encap_list) :
+ list_first_or_null_rcu(&nhe->encap_list,
+ struct mlx5e_encap_entry,
+ encap_list);
+ next;
+ next = list_next_or_null_rcu(&nhe->encap_list,
+ &next->encap_list,
+ struct mlx5e_encap_entry,
+ encap_list))
+ if (mlx5e_encap_take(next))
+ break;
+
+ rcu_read_unlock();
+
+ /* release starting encap */
+ if (e)
+ mlx5e_encap_put(netdev_priv(e->out_dev), e);
+ if (!next)
+ return next;
+
+ /* wait for encap to be fully initialized */
+ wait_for_completion(&next->res_ready);
+ /* continue searching if encap entry is not in valid state after completion */
+ if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ e = next;
+ goto retry;
+ }
+
+ return next;
+}
+
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
{
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
+ struct mlx5e_encap_entry *e = NULL;
struct mlx5e_tc_flow *flow;
- struct mlx5e_encap_entry *e;
struct mlx5_fc *counter;
struct neigh_table *tbl;
bool neigh_used = false;
@@ -1247,14 +1499,25 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
else
return;
- list_for_each_entry(e, &nhe->encap_list, encap_list) {
- struct encap_flow_item *efi;
- if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
- continue;
- list_for_each_entry(efi, &e->flows, list) {
+ /* mlx5e_get_next_valid_encap() releases previous encap before returning
+ * next one.
+ */
+ while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
+ struct mlx5e_priv *priv = netdev_priv(e->out_dev);
+ struct encap_flow_item *efi, *tmp;
+ struct mlx5_eswitch *esw;
+ LIST_HEAD(flow_list);
+
+ esw = priv->mdev->priv.eswitch;
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ list_for_each_entry_safe(efi, tmp, &e->flows, list) {
flow = container_of(efi, struct mlx5e_tc_flow,
encaps[efi->index]);
- if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ if (IS_ERR(mlx5e_flow_get(flow)))
+ continue;
+ list_add(&flow->tmp_list, &flow_list);
+
+ if (mlx5e_is_offloaded_flow(flow)) {
counter = mlx5e_tc_get_counter(flow);
lastuse = mlx5_fc_query_lastuse(counter);
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
@@ -1263,10 +1526,18 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
}
}
}
- if (neigh_used)
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_put_encap_flow_list(priv, &flow_list);
+ if (neigh_used) {
+ /* release current encap before breaking the loop */
+ mlx5e_encap_put(priv, e);
break;
+ }
}
+ trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
+
if (neigh_used) {
nhe->reported_lastuse = jiffies;
@@ -1282,40 +1553,69 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
}
}
-static void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow, int out_index)
+static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
{
- struct list_head *next = flow->encaps[out_index].list.next;
+ WARN_ON(!list_empty(&e->flows));
- list_del(&flow->encaps[out_index].list);
- if (list_empty(next)) {
- struct mlx5e_encap_entry *e;
-
- e = list_entry(next, struct mlx5e_encap_entry, flows);
+ if (e->compl_result > 0) {
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
if (e->flags & MLX5_ENCAP_ENTRY_VALID)
mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
+ }
- hash_del_rcu(&e->encap_hlist);
- kfree(e->encap_header);
- kfree(e);
+ kfree(e->encap_header);
+ kfree_rcu(e, rcu);
+}
+
+void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
+ return;
+ hash_del_rcu(&e->encap_hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_encap_dealloc(priv, e);
+}
+
+static void mlx5e_detach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow, int out_index)
+{
+ struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ /* flow wasn't fully initialized */
+ if (!e)
+ return;
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ list_del(&flow->encaps[out_index].list);
+ flow->encaps[out_index].e = NULL;
+ if (!refcount_dec_and_test(&e->refcnt)) {
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ return;
}
+ hash_del_rcu(&e->encap_hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_encap_dealloc(priv, e);
}
static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
- if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
- !(flow->flags & MLX5E_TC_FLOW_DUP))
+ if (!flow_flag_test(flow, ESWITCH) ||
+ !flow_flag_test(flow, DUP))
return;
mutex_lock(&esw->offloads.peer_mutex);
list_del(&flow->peer);
mutex_unlock(&esw->offloads.peer_mutex);
- flow->flags &= ~MLX5E_TC_FLOW_DUP;
+ flow_flag_clear(flow, DUP);
mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
kvfree(flow->peer_flow);
@@ -1339,7 +1639,7 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
+ if (mlx5e_is_eswitch_flow(flow)) {
mlx5e_tc_del_fdb_peer_flow(flow);
mlx5e_tc_del_fdb_flow(priv, flow);
} else {
@@ -1840,6 +2140,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep;
+ bool is_eswitch_flow;
int err;
inner_match_level = MLX5_MATCH_NONE;
@@ -1850,7 +2151,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
outer_match_level : inner_match_level;
- if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
+ is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
+ if (!err && is_eswitch_flow) {
rep = rpriv->rep;
if (rep->vport != MLX5_VPORT_UPLINK &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
@@ -1864,7 +2166,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
}
}
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
+ if (is_eswitch_flow) {
flow->esw_attr->inner_match_level = inner_match_level;
flow->esw_attr->outer_match_level = outer_match_level;
} else {
@@ -2385,14 +2687,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
{
u32 actions;
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ if (mlx5e_is_eswitch_flow(flow))
actions = flow->esw_attr->action;
else
actions = flow->nic_attr->action;
- if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
+ if (flow_flag_test(flow, EGRESS) &&
!((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) ||
- (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)))
+ (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
+ (actions & MLX5_FLOW_CONTEXT_ACTION_DROP)))
return false;
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
@@ -2542,7 +2845,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
same_hw_devs(priv, netdev_priv(peer_dev))) {
parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
- flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
+ flow_flag_set(flow, HAIRPIN);
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
} else {
@@ -2629,6 +2932,31 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
+bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
+{
+ return refcount_inc_not_zero(&e->refcnt);
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
+ uintptr_t hash_key)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_encap_entry *e;
+ struct encap_key e_key;
+
+ hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
+ encap_hlist, hash_key) {
+ e_key.ip_tun_key = &e->tun_info->key;
+ e_key.tc_tunnel = e->tunnel;
+ if (!cmp_encap_info(&e_key, key) &&
+ mlx5e_encap_take(e))
+ return e;
+ }
+
+ return NULL;
+}
+
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct net_device *mirred_dev,
@@ -2641,11 +2969,10 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
const struct ip_tunnel_info *tun_info;
- struct encap_key key, e_key;
+ struct encap_key key;
struct mlx5e_encap_entry *e;
unsigned short family;
uintptr_t hash_key;
- bool found = false;
int err = 0;
parse_attr = attr->parse_attr;
@@ -2660,42 +2987,60 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
hash_key = hash_encap_info(&key);
- hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
- encap_hlist, hash_key) {
- e_key.ip_tun_key = &e->tun_info->key;
- e_key.tc_tunnel = e->tunnel;
- if (!cmp_encap_info(&e_key, &key)) {
- found = true;
- break;
- }
- }
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ e = mlx5e_encap_get(priv, &key, hash_key);
/* must verify if encap is valid or not */
- if (found)
+ if (e) {
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ wait_for_completion(&e->res_ready);
+
+ /* Protect against concurrent neigh update. */
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ if (e->compl_result < 0) {
+ err = -EREMOTEIO;
+ goto out_err;
+ }
goto attach_flow;
+ }
e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (!e)
- return -ENOMEM;
+ if (!e) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ refcount_set(&e->refcnt, 1);
+ init_completion(&e->res_ready);
e->tun_info = tun_info;
err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
- if (err)
+ if (err) {
+ kfree(e);
+ e = NULL;
goto out_err;
+ }
INIT_LIST_HEAD(&e->flows);
+ hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
if (family == AF_INET)
err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
else if (family == AF_INET6)
err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
- if (err)
+ /* Protect against concurrent neigh update. */
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ complete_all(&e->res_ready);
+ if (err) {
+ e->compl_result = err;
goto out_err;
-
- hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
+ }
+ e->compl_result = 1;
attach_flow:
+ flow->encaps[out_index].e = e;
list_add(&flow->encaps[out_index].list, &e->flows);
flow->encaps[out_index].index = out_index;
*encap_dev = e->out_dev;
@@ -2706,11 +3051,14 @@ attach_flow:
} else {
*encap_valid = false;
}
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
return err;
out_err:
- kfree(e);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ if (e)
+ mlx5e_encap_put(priv, e);
return err;
}
@@ -2890,12 +3238,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
- struct net_device *uplink_upper = netdev_master_upper_dev_get(uplink_dev);
+ struct net_device *uplink_upper;
+ rcu_read_lock();
+ uplink_upper =
+ netdev_master_upper_dev_get_rcu(uplink_dev);
if (uplink_upper &&
netif_is_lag_master(uplink_upper) &&
uplink_upper == out_dev)
out_dev = uplink_dev;
+ rcu_read_unlock();
if (is_vlan_dev(out_dev)) {
err = add_vlan_push_action(priv, attr,
@@ -3066,19 +3418,19 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return 0;
}
-static void get_flags(int flags, u16 *flow_flags)
+static void get_flags(int flags, unsigned long *flow_flags)
{
- u16 __flow_flags = 0;
+ unsigned long __flow_flags = 0;
- if (flags & MLX5E_TC_INGRESS)
- __flow_flags |= MLX5E_TC_FLOW_INGRESS;
- if (flags & MLX5E_TC_EGRESS)
- __flow_flags |= MLX5E_TC_FLOW_EGRESS;
+ if (flags & MLX5_TC_FLAG(INGRESS))
+ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
+ if (flags & MLX5_TC_FLAG(EGRESS))
+ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
- if (flags & MLX5E_TC_ESW_OFFLOAD)
- __flow_flags |= MLX5E_TC_FLOW_ESWITCH;
- if (flags & MLX5E_TC_NIC_OFFLOAD)
- __flow_flags |= MLX5E_TC_FLOW_NIC;
+ if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
+ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
+ if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
+ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
*flow_flags = __flow_flags;
}
@@ -3090,12 +3442,13 @@ static const struct rhashtable_params tc_ht_params = {
.automatic_shrinking = true,
};
-static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags)
+static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
+ unsigned long flags)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *uplink_rpriv;
- if (flags & MLX5E_TC_ESW_OFFLOAD) {
+ if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
return &uplink_rpriv->uplink_priv.tc_ht;
} else /* NIC offload */
@@ -3106,7 +3459,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
{
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
- flow->flags & MLX5E_TC_FLOW_INGRESS;
+ flow_flag_test(flow, INGRESS);
bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
@@ -3125,13 +3478,13 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
static int
mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
- struct flow_cls_offload *f, u16 flow_flags,
+ struct flow_cls_offload *f, unsigned long flow_flags,
struct mlx5e_tc_flow_parse_attr **__parse_attr,
struct mlx5e_tc_flow **__flow)
{
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
- int err;
+ int out_index, err;
flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
@@ -3143,6 +3496,12 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
flow->cookie = f->cookie;
flow->flags = flow_flags;
flow->priv = priv;
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
+ INIT_LIST_HEAD(&flow->encaps[out_index].list);
+ INIT_LIST_HEAD(&flow->mod_hdr);
+ INIT_LIST_HEAD(&flow->hairpin);
+ refcount_set(&flow->refcnt, 1);
+ init_completion(&flow->init_done);
*__flow = flow;
*__parse_attr = parse_attr;
@@ -3182,7 +3541,7 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
static struct mlx5e_tc_flow *
__mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
- u16 flow_flags,
+ unsigned long flow_flags,
struct net_device *filter_dev,
struct mlx5_eswitch_rep *in_rep,
struct mlx5_core_dev *in_mdev)
@@ -3193,7 +3552,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
int attr_size, err;
- flow_flags |= MLX5E_TC_FLOW_ESWITCH;
+ flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
attr_size = sizeof(struct mlx5_esw_flow_attr);
err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
&parse_attr, &flow);
@@ -3215,6 +3574,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
goto err_free;
err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
+ complete_all(&flow->init_done);
if (err) {
if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
goto err_free;
@@ -3225,15 +3585,14 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return flow;
err_free:
- kfree(flow);
- kvfree(parse_attr);
+ mlx5e_flow_put(priv, flow);
out:
return ERR_PTR(err);
}
static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
struct mlx5e_tc_flow *flow,
- u16 flow_flags)
+ unsigned long flow_flags)
{
struct mlx5e_priv *priv = flow->priv, *peer_priv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
@@ -3271,7 +3630,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
}
flow->peer_flow = peer_flow;
- flow->flags |= MLX5E_TC_FLOW_DUP;
+ flow_flag_set(flow, DUP);
mutex_lock(&esw->offloads.peer_mutex);
list_add_tail(&flow->peer, &esw->offloads.peer_flows);
mutex_unlock(&esw->offloads.peer_mutex);
@@ -3284,7 +3643,7 @@ out:
static int
mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
- u16 flow_flags,
+ unsigned long flow_flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
@@ -3318,7 +3677,7 @@ out:
static int
mlx5e_add_nic_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
- u16 flow_flags,
+ unsigned long flow_flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
@@ -3332,7 +3691,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
return -EOPNOTSUPP;
- flow_flags |= MLX5E_TC_FLOW_NIC;
+ flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
attr_size = sizeof(struct mlx5_nic_flow_attr);
err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
&parse_attr, &flow);
@@ -3353,14 +3712,14 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ flow_flag_set(flow, OFFLOADED);
kvfree(parse_attr);
*__flow = flow;
return 0;
err_free:
- kfree(flow);
+ mlx5e_flow_put(priv, flow);
kvfree(parse_attr);
out:
return err;
@@ -3369,12 +3728,12 @@ out:
static int
mlx5e_tc_add_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
- int flags,
+ unsigned long flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- u16 flow_flags;
+ unsigned long flow_flags;
int err;
get_flags(flags, &flow_flags);
@@ -3393,14 +3752,16 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv,
}
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags)
+ struct flow_cls_offload *f, unsigned long flags)
{
struct netlink_ext_ack *extack = f->common.extack;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
int err = 0;
- flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+ rcu_read_lock();
+ flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
+ rcu_read_unlock();
if (flow) {
NL_SET_ERR_MSG_MOD(extack,
"flow cookie already exists, ignoring");
@@ -3411,55 +3772,68 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
goto out;
}
+ trace_mlx5e_configure_flower(f);
err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
if (err)
goto out;
- err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
+ err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
if (err)
goto err_free;
return 0;
err_free:
- mlx5e_tc_del_flow(priv, flow);
- kfree(flow);
+ mlx5e_flow_put(priv, flow);
out:
return err;
}
-#define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
-#define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
-
static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
{
- if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
- return true;
+ bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
+ bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
- return false;
+ return flow_flag_test(flow, INGRESS) == dir_ingress &&
+ flow_flag_test(flow, EGRESS) == dir_egress;
}
int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags)
+ struct flow_cls_offload *f, unsigned long flags)
{
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
+ int err;
+ rcu_read_lock();
flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
- if (!flow || !same_flow_direction(flow, flags))
- return -EINVAL;
+ if (!flow || !same_flow_direction(flow, flags)) {
+ err = -EINVAL;
+ goto errout;
+ }
+ /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
+ * set.
+ */
+ if (flow_flag_test_and_set(flow, DELETED)) {
+ err = -EINVAL;
+ goto errout;
+ }
rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
+ rcu_read_unlock();
- mlx5e_tc_del_flow(priv, flow);
-
- kfree(flow);
+ trace_mlx5e_delete_flower(f);
+ mlx5e_flow_put(priv, flow);
return 0;
+
+errout:
+ rcu_read_unlock();
+ return err;
}
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags)
+ struct flow_cls_offload *f, unsigned long flags)
{
struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
@@ -3469,15 +3843,24 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
u64 lastuse = 0;
u64 packets = 0;
u64 bytes = 0;
+ int err = 0;
- flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
- if (!flow || !same_flow_direction(flow, flags))
- return -EINVAL;
+ rcu_read_lock();
+ flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
+ tc_ht_params));
+ rcu_read_unlock();
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
- if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ if (!same_flow_direction(flow, flags)) {
+ err = -EINVAL;
+ goto errout;
+ }
+
+ if (mlx5e_is_offloaded_flow(flow)) {
counter = mlx5e_tc_get_counter(flow);
if (!counter)
- return 0;
+ goto errout;
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
}
@@ -3489,8 +3872,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
if (!peer_esw)
goto out;
- if ((flow->flags & MLX5E_TC_FLOW_DUP) &&
- (flow->peer_flow->flags & MLX5E_TC_FLOW_OFFLOADED)) {
+ if (flow_flag_test(flow, DUP) &&
+ flow_flag_test(flow->peer_flow, OFFLOADED)) {
u64 bytes2;
u64 packets2;
u64 lastuse2;
@@ -3509,15 +3892,118 @@ no_peer_counter:
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out:
flow_stats_update(&f->stats, bytes, packets, lastuse);
+ trace_mlx5e_stats_flower(f);
+errout:
+ mlx5e_flow_put(priv, flow);
+ return err;
+}
+
+static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch *esw;
+ u16 vport_num;
+ u32 rate_mbps;
+ int err;
+
+ esw = priv->mdev->priv.eswitch;
+ /* rate is given in bytes/sec.
+ * First convert to bits/sec and then round to the nearest mbit/secs.
+ * mbit means million bits.
+ * Moreover, if rate is non zero we choose to configure to a minimum of
+ * 1 mbit/sec.
+ */
+ rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
+ vport_num = rpriv->rep->vport;
+
+ err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
+
+ return err;
+}
+
+static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ const struct flow_action_entry *act;
+ int err;
+ int i;
+
+ if (!flow_action_has_entries(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
+ return -EINVAL;
+ }
+
+ if (!flow_offload_has_one_action(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
+ if (err)
+ return err;
+
+ rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
+ return -EOPNOTSUPP;
+ }
+ }
return 0;
}
+int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ struct netlink_ext_ack *extack = ma->common.extack;
+ int prio = TC_H_MAJ(ma->common.prio) >> 16;
+
+ if (prio != 1) {
+ NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
+ return -EINVAL;
+ }
+
+ return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
+}
+
+int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ struct netlink_ext_ack *extack = ma->common.extack;
+
+ return apply_police_params(priv, 0, extack);
+}
+
+void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct rtnl_link_stats64 cur_stats;
+ u64 dbytes;
+ u64 dpkts;
+
+ cur_stats = priv->stats.vf_vport;
+ dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
+ dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
+ rpriv->prev_vf_vport_stats = cur_stats;
+ flow_stats_update(&ma->stats, dpkts, dbytes, jiffies);
+}
+
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
- struct mlx5e_hairpin_entry *hpe;
+ struct mlx5e_hairpin_entry *hpe, *tmp;
+ LIST_HEAD(init_wait_list);
u16 peer_vhca_id;
int bkt;
@@ -3526,9 +4012,18 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
- hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
- if (hpe->peer_vhca_id == peer_vhca_id)
+ mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
+ hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
+ if (refcount_inc_not_zero(&hpe->refcnt))
+ list_add(&hpe->dead_peer_wait_list, &init_wait_list);
+ mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+
+ list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
+ wait_for_completion(&hpe->res_ready);
+ if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
hpe->hp->pair->peer_gone = true;
+
+ mlx5e_hairpin_put(priv, hpe);
}
}
@@ -3564,7 +4059,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
struct mlx5e_tc_table *tc = &priv->fs.tc;
int err;
- hash_init(tc->mod_hdr_tbl);
+ mutex_init(&tc->t_lock);
+ mutex_init(&tc->mod_hdr.lock);
+ hash_init(tc->mod_hdr.hlist);
+ mutex_init(&tc->hairpin_tbl_lock);
hash_init(tc->hairpin_tbl);
err = rhashtable_init(&tc->ht, &tc_ht_params);
@@ -3596,12 +4094,16 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier(&tc->netdevice_nb);
+ mutex_destroy(&tc->mod_hdr.lock);
+ mutex_destroy(&tc->hairpin_tbl_lock);
+
rhashtable_destroy(&tc->ht);
if (!IS_ERR_OR_NULL(tc->t)) {
mlx5_destroy_flow_table(tc->t);
tc->t = NULL;
}
+ mutex_destroy(&tc->t_lock);
}
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
@@ -3614,7 +4116,7 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
}
-int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags)
+int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
{
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
@@ -3636,10 +4138,10 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
reoffload_flows_work);
struct mlx5e_tc_flow *flow, *tmp;
- rtnl_lock();
+ mutex_lock(&rpriv->unready_flows_lock);
list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
- remove_unready_flow(flow);
+ unready_flow_del(flow);
}
- rtnl_unlock();
+ mutex_unlock(&rpriv->unready_flows_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 3ab39275ca7d..924c6ef86a14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -40,13 +40,15 @@
#ifdef CONFIG_MLX5_ESWITCH
enum {
- MLX5E_TC_INGRESS = BIT(0),
- MLX5E_TC_EGRESS = BIT(1),
- MLX5E_TC_NIC_OFFLOAD = BIT(2),
- MLX5E_TC_ESW_OFFLOAD = BIT(3),
- MLX5E_TC_LAST_EXPORTED_BIT = 3,
+ MLX5E_TC_FLAG_INGRESS_BIT,
+ MLX5E_TC_FLAG_EGRESS_BIT,
+ MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
+ MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
+ MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
};
+#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
+
int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
@@ -54,23 +56,37 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags);
+ struct flow_cls_offload *f, unsigned long flags);
int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags);
+ struct flow_cls_offload *f, unsigned long flags);
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags);
+ struct flow_cls_offload *f, unsigned long flags);
+
+int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *f);
+int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *f);
+void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma);
struct mlx5e_encap_entry;
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e);
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list);
void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e);
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list);
+bool mlx5e_encap_take(struct mlx5e_encap_entry *e);
+void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e);
+
+void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list);
+void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
struct mlx5e_neigh_hash_entry;
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
-int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags);
+int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
@@ -80,7 +96,11 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
-static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags) { return 0; }
+static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv,
+ unsigned long flags)
+{
+ return 0;
+}
#endif
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 600e92cb629a..d3a67a9b4eba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -210,7 +210,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
@@ -292,8 +292,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
stats->packets += skb_shinfo(skb)->gso_segs;
} else {
- u8 mode = mlx5e_transport_inline_tx_wqe(wqe) ?
- MLX5_INLINE_MODE_TCP_UDP : sq->min_inline_mode;
+ u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb);
opcode = MLX5_OPCODE_SEND;
mss = 0;
@@ -608,9 +607,11 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
stats->packets += skb_shinfo(skb)->gso_segs;
} else {
+ u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb);
+
opcode = MLX5_OPCODE_SEND;
mss = 0;
- ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
+ ihs = mlx5e_calc_min_inline(mode, skb);
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
stats->packets++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 41f25ea2e8d9..09d4c64b6e73 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -215,11 +215,7 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
*/
dma_rmb();
- if (likely(eqe->type < MLX5_EVENT_TYPE_MAX))
- atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
- else
- mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
-
+ atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
++eq->cons_index;
@@ -415,7 +411,7 @@ void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
int mlx5_eq_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *eq_table;
- int i, err;
+ int i;
eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
if (!eq_table)
@@ -423,9 +419,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
dev->priv.eq_table = eq_table;
- err = mlx5_eq_debugfs_init(dev);
- if (err)
- goto kvfree_eq_table;
+ mlx5_eq_debugfs_init(dev);
mutex_init(&eq_table->lock);
for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
@@ -433,11 +427,6 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
eq_table->irq_table = dev->priv.irq_table;
return 0;
-
-kvfree_eq_table:
- kvfree(eq_table);
- dev->priv.eq_table = NULL;
- return err;
}
void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
@@ -945,9 +934,6 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{
struct mlx5_eq_table *eqt = dev->priv.eq_table;
- if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
- return -EINVAL;
-
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
}
EXPORT_SYMBOL(mlx5_eq_notifier_register);
@@ -956,9 +942,6 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{
struct mlx5_eq_table *eqt = dev->priv.eq_table;
- if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
- return -EINVAL;
-
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
}
EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 1f3891fde2eb..f0692407f617 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -58,20 +58,9 @@ struct vport_addr {
bool mc_promisc;
};
-enum {
- UC_ADDR_CHANGE = BIT(0),
- MC_ADDR_CHANGE = BIT(1),
- PROMISC_CHANGE = BIT(3),
-};
-
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
-/* Vport context events */
-#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
- MC_ADDR_CHANGE | \
- PROMISC_CHANGE)
-
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
@@ -108,13 +97,13 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
- if (events_mask & UC_ADDR_CHANGE)
+ if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_uc_address_change, 1);
- if (events_mask & MC_ADDR_CHANGE)
+ if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_mc_address_change, 1);
- if (events_mask & PROMISC_CHANGE)
+ if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_promisc_change, 1);
@@ -463,6 +452,22 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
return err;
}
+#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
+ MLX5_VPORT_MC_ADDR_CHANGE | \
+ MLX5_VPORT_PROMISC_CHANGE)
+
+static int esw_legacy_enable(struct mlx5_eswitch *esw)
+{
+ int ret;
+
+ ret = esw_create_legacy_table(esw);
+ if (ret)
+ return ret;
+
+ mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
+ return 0;
+}
+
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
{
esw_cleanup_vepa_rules(esw);
@@ -470,6 +475,19 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
esw_destroy_legacy_vepa_table(esw);
}
+static void esw_legacy_disable(struct mlx5_eswitch *esw)
+{
+ struct esw_mc_addr *mc_promisc;
+
+ mlx5_eswitch_disable_pf_vf_vports(esw);
+
+ mc_promisc = &esw->mc_promisc;
+ if (mc_promisc->uplink_rule)
+ mlx5_del_flow_rules(mc_promisc->uplink_rule);
+
+ esw_destroy_legacy_table(esw);
+}
+
/* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
struct vport_addr *vaddr);
@@ -901,21 +919,21 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
vport->vport, mac);
- if (vport->enabled_events & UC_ADDR_CHANGE) {
+ if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
}
- if (vport->enabled_events & MC_ADDR_CHANGE)
+ if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
- if (vport->enabled_events & PROMISC_CHANGE) {
+ if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
esw_update_vport_rx_mode(esw, vport);
if (!IS_ERR_OR_NULL(vport->allmulti_rule))
esw_update_vport_mc_promisc(esw, vport);
}
- if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE))
+ if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
@@ -1393,18 +1411,49 @@ out:
return err;
}
+static bool element_type_supported(struct mlx5_eswitch *esw, int type)
+{
+ struct mlx5_core_dev *dev = esw->dev = esw->dev;
+
+ switch (type) {
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_TASR;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_VPORT;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_VPORT_TC;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+ }
+ return false;
+}
+
/* Vport QoS management */
-static int esw_create_tsar(struct mlx5_eswitch *esw)
+static void esw_create_tsar(struct mlx5_eswitch *esw)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_core_dev *dev = esw->dev;
+ __be32 *attr;
int err;
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
- return 0;
+ return;
+
+ if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
+ return;
if (esw->qos.enabled)
- return -EEXIST;
+ return;
+
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+ *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
@@ -1412,11 +1461,10 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
&esw->qos.root_tsar_id);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
- return err;
+ return;
}
esw->qos.enabled = true;
- return 0;
}
static void esw_destroy_tsar(struct mlx5_eswitch *esw)
@@ -1537,6 +1585,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw,
return 0;
}
+int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
+ u32 rate_mbps)
+{
+ u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
+
+ return mlx5_modify_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ ctx,
+ vport->qos.esw_tsar_ix,
+ MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW);
+}
+
static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
{
((u8 *)node_guid)[7] = mac[0];
@@ -1619,7 +1683,7 @@ static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
}
static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- int enable_events)
+ enum mlx5_eswitch_vport_event enabled_events)
{
u16 vport_num = vport->vport;
@@ -1641,7 +1705,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
/* Sync with current vport context */
- vport->enabled_events = enable_events;
+ vport->enabled_events = enabled_events;
vport->enabled = true;
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
@@ -1770,11 +1834,46 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
-int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
+/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
+ * whichever are present on the eswitch.
+ */
+void
+mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
+ enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
+ int i;
+
+ /* Enable PF vport */
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ esw_enable_vport(esw, vport, enabled_events);
+
+ /* Enable ECPF vports */
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ esw_enable_vport(esw, vport, enabled_events);
+ }
+
+ /* Enable VF vports */
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ esw_enable_vport(esw, vport, enabled_events);
+}
+
+/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
+ * whichever are previously enabled on the eswitch.
+ */
+void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int i;
+
+ mlx5_esw_for_all_vports_reverse(esw, i, vport)
+ esw_disable_vport(esw, vport);
+}
+
+int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
+{
int err;
- int i, enabled_events;
if (!ESW_ALLOWED(esw) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
@@ -1788,44 +1887,23 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
+ esw_create_tsar(esw);
+
esw->mode = mode;
mlx5_lag_update(esw->dev);
if (mode == MLX5_ESWITCH_LEGACY) {
- err = esw_create_legacy_table(esw);
- if (err)
- goto abort;
+ err = esw_legacy_enable(esw);
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- err = esw_offloads_init(esw);
+ err = esw_offloads_enable(esw);
}
if (err)
goto abort;
- err = esw_create_tsar(esw);
- if (err)
- esw_warn(esw->dev, "Failed to create eswitch TSAR");
-
- enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? SRIOV_VPORT_EVENTS :
- UC_ADDR_CHANGE;
-
- /* Enable PF vport */
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- esw_enable_vport(esw, vport, enabled_events);
-
- /* Enable ECPF vports */
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- esw_enable_vport(esw, vport, enabled_events);
- }
-
- /* Enable VF vports */
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
- esw_enable_vport(esw, vport, enabled_events);
-
mlx5_eswitch_event_handlers_register(esw);
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
@@ -1847,10 +1925,7 @@ abort:
void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{
- struct esw_mc_addr *mc_promisc;
- struct mlx5_vport *vport;
int old_mode;
- int i;
if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
return;
@@ -1859,21 +1934,14 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- mc_promisc = &esw->mc_promisc;
mlx5_eswitch_event_handlers_unregister(esw);
- mlx5_esw_for_all_vports(esw, i, vport)
- esw_disable_vport(esw, vport);
-
- if (mc_promisc && mc_promisc->uplink_rule)
- mlx5_del_flow_rules(mc_promisc->uplink_rule);
-
- esw_destroy_tsar(esw);
-
if (esw->mode == MLX5_ESWITCH_LEGACY)
- esw_destroy_legacy_table(esw);
+ esw_legacy_disable(esw);
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
- esw_offloads_cleanup(esw);
+ esw_offloads_disable(esw);
+
+ esw_destroy_tsar(esw);
old_mode = esw->mode;
esw->mode = MLX5_ESWITCH_NONE;
@@ -1931,8 +1999,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (err)
goto abort;
+ mutex_init(&esw->offloads.encap_tbl_lock);
hash_init(esw->offloads.encap_tbl);
- hash_init(esw->offloads.mod_hdr_tbl);
+ mutex_init(&esw->offloads.mod_hdr.lock);
+ hash_init(esw->offloads.mod_hdr.hlist);
+ atomic64_set(&esw->offloads.num_flows, 0);
mutex_init(&esw->state_lock);
mlx5_esw_for_all_vports(esw, i, vport) {
@@ -1968,6 +2039,8 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
esw_offloads_cleanup_reps(esw);
+ mutex_destroy(&esw->offloads.mod_hdr.lock);
+ mutex_destroy(&esw->offloads.encap_tbl_lock);
kfree(esw->vports);
kfree(esw);
}
@@ -2085,23 +2158,19 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
if (vlan > 4095 || qos > 7)
return -EINVAL;
- mutex_lock(&esw->state_lock);
-
err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
if (err)
- goto unlock;
+ return err;
evport->info.vlan = vlan;
evport->info.qos = qos;
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
if (err)
- goto unlock;
+ return err;
err = esw_vport_egress_config(esw, evport);
}
-unlock:
- mutex_unlock(&esw->state_lock);
return err;
}
@@ -2109,11 +2178,16 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos)
{
u8 set_flags = 0;
+ int err;
if (vlan || qos)
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
- return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
+ mutex_lock(&esw->state_lock);
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
+ mutex_unlock(&esw->state_lock);
+
+ return err;
}
int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 04685dbb280c..aba9e7a6ad3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -35,6 +35,7 @@
#include <linux/if_ether.h>
#include <linux/if_link.h>
+#include <linux/atomic.h>
#include <net/devlink.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/eswitch.h>
@@ -101,6 +102,13 @@ struct mlx5_vport_info {
bool trusted;
};
+/* Vport context events */
+enum mlx5_eswitch_vport_event {
+ MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
+ MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
+ MLX5_VPORT_PROMISC_CHANGE = BIT(3),
+};
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
int vport;
@@ -122,7 +130,7 @@ struct mlx5_vport {
} qos;
bool enabled;
- u16 enabled_events;
+ enum mlx5_eswitch_vport_event enabled_events;
};
enum offloads_fdb_flags {
@@ -173,13 +181,14 @@ struct mlx5_esw_offload {
struct mlx5_eswitch_rep *vport_reps;
struct list_head peer_flows;
struct mutex peer_mutex;
+ struct mutex encap_tbl_lock; /* protects encap_tbl */
DECLARE_HASHTABLE(encap_tbl, 8);
- DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ struct mod_hdr_tbl mod_hdr;
DECLARE_HASHTABLE(termtbl_tbl, 8);
struct mutex termtbl_mutex; /* protects termtbl hash */
const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
u8 inline_mode;
- u64 num_flows;
+ atomic64_t num_flows;
enum devlink_eswitch_encap_mode encap;
};
@@ -207,8 +216,11 @@ enum {
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
+ /* legacy data structures */
struct mlx5_eswitch_fdb fdb_table;
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
+ struct esw_mc_addr mc_promisc;
+ /* end of legacy */
struct workqueue_struct *work_queue;
struct mlx5_vport *vports;
u32 flags;
@@ -218,7 +230,6 @@ struct mlx5_eswitch {
* and async SRIOV admin state changes
*/
struct mutex state_lock;
- struct esw_mc_addr mc_promisc;
struct {
bool enabled;
@@ -233,8 +244,8 @@ struct mlx5_eswitch {
struct mlx5_esw_functions esw_funcs;
};
-void esw_offloads_cleanup(struct mlx5_eswitch *esw);
-int esw_offloads_init(struct mlx5_eswitch *esw);
+void esw_offloads_disable(struct mlx5_eswitch *esw);
+int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -251,6 +262,8 @@ void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
+int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
+ u32 rate_mbps);
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
@@ -513,6 +526,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
(vport) = &(esw)->vports[i], \
(i) < (esw)->total_vports; (i)++)
+#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
+ for ((i) = (esw)->total_vports - 1; \
+ (vport) = &(esw)->vports[i], \
+ (i) >= MLX5_VPORT_PF; (i)--)
+
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
for ((i) = MLX5_VPORT_FIRST_VF; \
(vport) = &(esw)->vports[(i)], \
@@ -574,6 +592,11 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
+void
+mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
+ enum mlx5_eswitch_vport_event enabled_events);
+void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 0323fd078271..7d3582ee66b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -229,7 +229,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (IS_ERR(rule))
goto err_add_rule;
else
- esw->offloads.num_flows++;
+ atomic64_inc(&esw->offloads.num_flows);
return rule;
@@ -294,7 +294,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
if (IS_ERR(rule))
goto add_err;
- esw->offloads.num_flows++;
+ atomic64_inc(&esw->offloads.num_flows);
return rule;
add_err:
@@ -322,7 +322,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
}
- esw->offloads.num_flows--;
+ atomic64_dec(&esw->offloads.num_flows);
if (fwd_rule) {
esw_put_prio_table(esw, attr->chain, attr->prio, 1);
@@ -438,9 +438,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
!attr->dest_chain);
+ mutex_lock(&esw->state_lock);
+
err = esw_add_vlan_action_check(attr, push, pop, fwd);
if (err)
- return err;
+ goto unlock;
attr->vlan_handled = false;
@@ -453,11 +455,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
attr->vlan_handled = true;
}
- return 0;
+ goto unlock;
}
if (!push && !pop)
- return 0;
+ goto unlock;
if (!(offloads->vlan_push_pop_refcount)) {
/* it's the 1st vlan rule, apply global vlan pop policy */
@@ -482,6 +484,8 @@ skip_set_push:
out:
if (!err)
attr->vlan_handled = true;
+unlock:
+ mutex_unlock(&esw->state_lock);
return err;
}
@@ -504,6 +508,8 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ mutex_lock(&esw->state_lock);
+
vport = esw_vlan_action_get_vport(attr, push, pop);
if (!push && !pop && fwd) {
@@ -511,7 +517,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
vport->vlan_refcount--;
- return 0;
+ goto out;
}
if (push) {
@@ -529,12 +535,13 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
skip_unset_push:
offloads->vlan_push_pop_refcount--;
if (offloads->vlan_push_pop_refcount)
- return 0;
+ goto out;
/* no more vlan rules, stop global vlan pop policy */
err = esw_set_global_vlan_pop(esw, 0);
out:
+ mutex_unlock(&esw->state_lock);
return err;
}
@@ -583,38 +590,15 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
-static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw)
+static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
u8 fdb_to_vport_reg_c_id;
int err;
- err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
- out, sizeof(out));
- if (err)
- return err;
-
- fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.fdb_to_vport_reg_c_id);
-
- fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
- MLX5_SET(modify_esw_vport_context_in, in,
- esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
-
- MLX5_SET(modify_esw_vport_context_in, in,
- field_select.fdb_to_vport_reg_c_id, 1);
-
- return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
- in, sizeof(in));
-}
-
-static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
-{
- u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
- u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
- u8 fdb_to_vport_reg_c_id;
- int err;
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+ return 0;
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
out, sizeof(out));
@@ -624,7 +608,10 @@ static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
esw_vport_context.fdb_to_vport_reg_c_id);
- fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
+ if (enable)
+ fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
+ else
+ fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
@@ -1402,10 +1389,9 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
int total_vports = esw->total_vports;
- struct mlx5_core_dev *dev = esw->dev;
struct mlx5_eswitch_rep *rep;
- u8 hw_id[ETH_ALEN], rep_type;
int vport_index;
+ u8 rep_type;
esw->offloads.vport_reps = kcalloc(total_vports,
sizeof(struct mlx5_eswitch_rep),
@@ -1413,12 +1399,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
if (!esw->offloads.vport_reps)
return -ENOMEM;
- mlx5_query_mac_address(dev, hw_id);
-
mlx5_esw_for_all_reps(esw, vport_index, rep) {
rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
rep->vport_index = vport_index;
- ether_addr_copy(rep->hw_id, hw_id);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
atomic_set(&rep->rep_data[rep_type].state,
@@ -2120,7 +2103,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
return NOTIFY_OK;
}
-int esw_offloads_init(struct mlx5_eswitch *esw)
+int esw_offloads_enable(struct mlx5_eswitch *esw)
{
int err;
@@ -2134,11 +2117,11 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err)
return err;
- if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
- err = mlx5_eswitch_enable_passing_vport_metadata(esw);
- if (err)
- goto err_vport_metadata;
- }
+ err = esw_set_passing_vport_metadata(esw, true);
+ if (err)
+ goto err_vport_metadata;
+
+ mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
err = esw_offloads_load_all_reps(esw);
if (err)
@@ -2152,8 +2135,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
return 0;
err_reps:
- if (mlx5_eswitch_vport_match_metadata_enabled(esw))
- mlx5_eswitch_disable_passing_vport_metadata(esw);
+ mlx5_eswitch_disable_pf_vf_vports(esw);
+ esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_steering_cleanup(esw);
return err;
@@ -2178,13 +2161,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
return err;
}
-void esw_offloads_cleanup(struct mlx5_eswitch *esw)
+void esw_offloads_disable(struct mlx5_eswitch *esw)
{
mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw);
- if (mlx5_eswitch_vport_match_metadata_enabled(esw))
- mlx5_eswitch_disable_passing_vport_metadata(esw);
+ mlx5_eswitch_disable_pf_vf_vports(esw);
+ esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
@@ -2345,7 +2328,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
break;
}
- if (esw->offloads.num_flows > 0) {
+ if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set inline mode when flows are configured");
return -EOPNOTSUPP;
@@ -2455,7 +2438,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
if (esw->offloads.encap == encap)
return 0;
- if (esw->offloads.num_flows > 0) {
+ if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set encapsulation when flows are configured");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 7ac1249eadc3..b84a225bbe86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -566,7 +566,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
+int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
+ enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
+ u32 *id)
{
u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
@@ -574,6 +576,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
MLX5_SET(alloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
+ MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
@@ -581,6 +584,11 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
return err;
}
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
+{
+ return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
+}
+
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
{
u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
@@ -615,67 +623,24 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
return 0;
}
-struct mlx5_cmd_fc_bulk {
- u32 id;
- int num;
- int outlen;
- u32 out[0];
-};
-
-struct mlx5_cmd_fc_bulk *
-mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
-{
- struct mlx5_cmd_fc_bulk *b;
- int outlen =
- MLX5_ST_SZ_BYTES(query_flow_counter_out) +
- MLX5_ST_SZ_BYTES(traffic_counter) * num;
-
- b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
- if (!b)
- return NULL;
-
- b->id = id;
- b->num = num;
- b->outlen = outlen;
-
- return b;
-}
-
-void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
+int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
{
- kfree(b);
+ return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+ MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
}
-int
-mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
+int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
+ u32 *out)
{
+ int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
- MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
- MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
- return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
-}
-
-void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
- struct mlx5_cmd_fc_bulk *b, u32 id,
- u64 *packets, u64 *bytes)
-{
- int index = id - b->id;
- void *stats;
-
- if (index < 0 || index >= b->num) {
- mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
- id, b->id, b->id + b->num - 1);
- return;
- }
-
- stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
- flow_statistics[index]);
- *packets = MLX5_GET64(traffic_counter, stats, packets);
- *bytes = MLX5_GET64(traffic_counter, stats, octets);
+ MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
+ MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index e340f9af2f5a..bc4606306009 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -78,20 +78,16 @@ struct mlx5_flow_cmds {
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
+int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
+ enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
+ u32 *id);
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
u64 *packets, u64 *bytes);
-struct mlx5_cmd_fc_bulk;
-
-struct mlx5_cmd_fc_bulk *
-mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num);
-void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b);
-int
-mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b);
-void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
- struct mlx5_cmd_fc_bulk *b, u32 id,
- u64 *packets, u64 *bytes);
+int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len);
+int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
+ u32 *out);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 1834d9f3aa1c..ab69effb056d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -40,6 +40,8 @@
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
+#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
+#define MLX5_FC_POOL_USED_BUFF_RATIO 10
struct mlx5_fc_cache {
u64 packets;
@@ -58,12 +60,18 @@ struct mlx5_fc {
u64 lastpackets;
u64 lastbytes;
+ struct mlx5_fc_bulk *bulk;
u32 id;
bool aging;
struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
};
+static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
+static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
+static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
+static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
+
/* locking scheme:
*
* It is the responsibility of the user to prevent concurrent calls or bad
@@ -75,7 +83,7 @@ struct mlx5_fc {
* access to counter list:
* - create (user context)
* - mlx5_fc_create() only adds to an addlist to be used by
- * mlx5_fc_stats_query_work(). addlist is a lockless single linked list
+ * mlx5_fc_stats_work(). addlist is a lockless single linked list
* that doesn't require any additional synchronization when adding single
* node.
* - spawn thread to do the actual destroy
@@ -136,81 +144,87 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
spin_unlock(&fc_stats->counters_idr_lock);
}
-/* The function returns the last counter that was queried so the caller
- * function can continue calling it till all counters are queried.
- */
-static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
- struct mlx5_fc *first,
- u32 last_id)
+static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- struct mlx5_fc *counter = NULL;
- struct mlx5_cmd_fc_bulk *b;
- bool more = false;
- u32 afirst_id;
- int num;
- int err;
+ return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
+ (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
+}
- int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
- (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
+static void update_counter_cache(int index, u32 *bulk_raw_data,
+ struct mlx5_fc_cache *cache)
+{
+ void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
+ flow_statistics[index]);
+ u64 packets = MLX5_GET64(traffic_counter, stats, packets);
+ u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
- /* first id must be aligned to 4 when using bulk query */
- afirst_id = first->id & ~0x3;
+ if (cache->packets == packets)
+ return;
- /* number of counters to query inc. the last counter */
- num = ALIGN(last_id - afirst_id + 1, 4);
- if (num > max_bulk) {
- num = max_bulk;
- last_id = afirst_id + num - 1;
- }
+ cache->packets = packets;
+ cache->bytes = bytes;
+ cache->lastuse = jiffies;
+}
- b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
- if (!b) {
- mlx5_core_err(dev, "Error allocating resources for bulk query\n");
- return NULL;
- }
+static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
+ struct mlx5_fc *first,
+ u32 last_id)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ bool query_more_counters = (first->id <= last_id);
+ int max_bulk_len = get_max_bulk_query_len(dev);
+ u32 *data = fc_stats->bulk_query_out;
+ struct mlx5_fc *counter = first;
+ u32 bulk_base_id;
+ int bulk_len;
+ int err;
- err = mlx5_cmd_fc_bulk_query(dev, b);
- if (err) {
- mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
- goto out;
- }
+ while (query_more_counters) {
+ /* first id must be aligned to 4 when using bulk query */
+ bulk_base_id = counter->id & ~0x3;
- counter = first;
- list_for_each_entry_from(counter, &fc_stats->counters, list) {
- struct mlx5_fc_cache *c = &counter->cache;
- u64 packets;
- u64 bytes;
+ /* number of counters to query inc. the last counter */
+ bulk_len = min_t(int, max_bulk_len,
+ ALIGN(last_id - bulk_base_id + 1, 4));
- if (counter->id > last_id) {
- more = true;
- break;
+ err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
+ data);
+ if (err) {
+ mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
+ return;
}
+ query_more_counters = false;
- mlx5_cmd_fc_bulk_get(dev, b,
- counter->id, &packets, &bytes);
+ list_for_each_entry_from(counter, &fc_stats->counters, list) {
+ int counter_index = counter->id - bulk_base_id;
+ struct mlx5_fc_cache *cache = &counter->cache;
- if (c->packets == packets)
- continue;
+ if (counter->id >= bulk_base_id + bulk_len) {
+ query_more_counters = true;
+ break;
+ }
- c->packets = packets;
- c->bytes = bytes;
- c->lastuse = jiffies;
+ update_counter_cache(counter_index, data, cache);
+ }
}
-
-out:
- mlx5_cmd_fc_bulk_free(b);
-
- return more ? counter : NULL;
}
-static void mlx5_free_fc(struct mlx5_core_dev *dev,
- struct mlx5_fc *counter)
+static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
mlx5_cmd_fc_free(dev, counter->id);
kfree(counter);
}
+static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ if (counter->bulk)
+ mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
+ else
+ mlx5_fc_free(dev, counter);
+}
+
static void mlx5_fc_stats_work(struct work_struct *work)
{
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
@@ -234,7 +248,7 @@ static void mlx5_fc_stats_work(struct work_struct *work)
llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
mlx5_fc_stats_remove(dev, counter);
- mlx5_free_fc(dev, counter);
+ mlx5_fc_release(dev, counter);
}
if (time_before(now, fc_stats->next_query) ||
@@ -244,32 +258,62 @@ static void mlx5_fc_stats_work(struct work_struct *work)
counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
list);
- while (counter)
- counter = mlx5_fc_stats_query(dev, counter, last->id);
+ if (counter)
+ mlx5_fc_stats_query_counter_range(dev, counter, last->id);
fc_stats->next_query = now + fc_stats->sampling_interval;
}
-struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct mlx5_fc *counter;
int err;
counter = kzalloc(sizeof(*counter), GFP_KERNEL);
if (!counter)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&counter->list);
err = mlx5_cmd_fc_alloc(dev, &counter->id);
- if (err)
- goto err_out;
+ if (err) {
+ kfree(counter);
+ return ERR_PTR(err);
+ }
+
+ return counter;
+}
+
+static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc *counter;
+
+ if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) {
+ counter = mlx5_fc_pool_acquire_counter(&fc_stats->fc_pool);
+ if (!IS_ERR(counter))
+ return counter;
+ }
+
+ return mlx5_fc_single_alloc(dev);
+}
+
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+{
+ struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging);
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ int err;
+
+ if (IS_ERR(counter))
+ return counter;
+
+ INIT_LIST_HEAD(&counter->list);
+ counter->aging = aging;
if (aging) {
u32 id = counter->id;
counter->cache.lastuse = jiffies;
- counter->aging = true;
+ counter->lastbytes = counter->cache.bytes;
+ counter->lastpackets = counter->cache.packets;
idr_preload(GFP_KERNEL);
spin_lock(&fc_stats->counters_idr_lock);
@@ -290,10 +334,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
return counter;
err_out_alloc:
- mlx5_cmd_fc_free(dev, counter->id);
-err_out:
- kfree(counter);
-
+ mlx5_fc_release(dev, counter);
return ERR_PTR(err);
}
EXPORT_SYMBOL(mlx5_fc_create);
@@ -317,13 +358,15 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
return;
}
- mlx5_free_fc(dev, counter);
+ mlx5_fc_release(dev, counter);
}
EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ int max_bulk_len;
+ int max_out_len;
spin_lock_init(&fc_stats->counters_idr_lock);
idr_init(&fc_stats->counters_idr);
@@ -331,14 +374,25 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
init_llist_head(&fc_stats->addlist);
init_llist_head(&fc_stats->dellist);
+ max_bulk_len = get_max_bulk_query_len(dev);
+ max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
+ fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL);
+ if (!fc_stats->bulk_query_out)
+ return -ENOMEM;
+
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq)
- return -ENOMEM;
+ goto err_wq_create;
fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
+ mlx5_fc_pool_init(&fc_stats->fc_pool, dev);
return 0;
+
+err_wq_create:
+ kfree(fc_stats->bulk_query_out);
+ return -ENOMEM;
}
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
@@ -352,14 +406,16 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
destroy_workqueue(dev->priv.fc_stats.wq);
dev->priv.fc_stats.wq = NULL;
- idr_destroy(&fc_stats->counters_idr);
-
tmplist = llist_del_all(&fc_stats->addlist);
llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
- mlx5_free_fc(dev, counter);
+ mlx5_fc_release(dev, counter);
list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
- mlx5_free_fc(dev, counter);
+ mlx5_fc_release(dev, counter);
+
+ mlx5_fc_pool_cleanup(&fc_stats->fc_pool);
+ idr_destroy(&fc_stats->counters_idr);
+ kfree(fc_stats->bulk_query_out);
}
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
@@ -406,3 +462,243 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
fc_stats->sampling_interval = min_t(unsigned long, interval,
fc_stats->sampling_interval);
}
+
+/* Flow counter bluks */
+
+struct mlx5_fc_bulk {
+ struct list_head pool_list;
+ u32 base_id;
+ int bulk_len;
+ unsigned long *bitmask;
+ struct mlx5_fc fcs[0];
+};
+
+static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
+ u32 id)
+{
+ counter->bulk = bulk;
+ counter->id = id;
+}
+
+static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
+{
+ return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+}
+
+static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
+{
+ enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
+ struct mlx5_fc_bulk *bulk;
+ int err = -ENOMEM;
+ int bulk_len;
+ u32 base_id;
+ int i;
+
+ alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
+ bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
+
+ bulk = kzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc),
+ GFP_KERNEL);
+ if (!bulk)
+ goto err_alloc_bulk;
+
+ bulk->bitmask = kcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!bulk->bitmask)
+ goto err_alloc_bitmask;
+
+ err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
+ if (err)
+ goto err_mlx5_cmd_bulk_alloc;
+
+ bulk->base_id = base_id;
+ bulk->bulk_len = bulk_len;
+ for (i = 0; i < bulk_len; i++) {
+ mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
+ set_bit(i, bulk->bitmask);
+ }
+
+ return bulk;
+
+err_mlx5_cmd_bulk_alloc:
+ kfree(bulk->bitmask);
+err_alloc_bitmask:
+ kfree(bulk);
+err_alloc_bulk:
+ return ERR_PTR(err);
+}
+
+static int
+mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
+{
+ if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
+ mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
+ return -EBUSY;
+ }
+
+ mlx5_cmd_fc_free(dev, bulk->base_id);
+ kfree(bulk->bitmask);
+ kfree(bulk);
+
+ return 0;
+}
+
+static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
+{
+ int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
+
+ if (free_fc_index >= bulk->bulk_len)
+ return ERR_PTR(-ENOSPC);
+
+ clear_bit(free_fc_index, bulk->bitmask);
+ return &bulk->fcs[free_fc_index];
+}
+
+static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
+{
+ int fc_index = fc->id - bulk->base_id;
+
+ if (test_bit(fc_index, bulk->bitmask))
+ return -EINVAL;
+
+ set_bit(fc_index, bulk->bitmask);
+ return 0;
+}
+
+/* Flow counters pool API */
+
+static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
+{
+ fc_pool->dev = dev;
+ mutex_init(&fc_pool->pool_lock);
+ INIT_LIST_HEAD(&fc_pool->fully_used);
+ INIT_LIST_HEAD(&fc_pool->partially_used);
+ INIT_LIST_HEAD(&fc_pool->unused);
+ fc_pool->available_fcs = 0;
+ fc_pool->used_fcs = 0;
+ fc_pool->threshold = 0;
+}
+
+static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
+{
+ struct mlx5_core_dev *dev = fc_pool->dev;
+ struct mlx5_fc_bulk *bulk;
+ struct mlx5_fc_bulk *tmp;
+
+ list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
+ mlx5_fc_bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
+ mlx5_fc_bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
+ mlx5_fc_bulk_destroy(dev, bulk);
+}
+
+static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
+{
+ fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
+ fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
+}
+
+static struct mlx5_fc_bulk *
+mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
+{
+ struct mlx5_core_dev *dev = fc_pool->dev;
+ struct mlx5_fc_bulk *new_bulk;
+
+ new_bulk = mlx5_fc_bulk_create(dev);
+ if (!IS_ERR(new_bulk))
+ fc_pool->available_fcs += new_bulk->bulk_len;
+ mlx5_fc_pool_update_threshold(fc_pool);
+ return new_bulk;
+}
+
+static void
+mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
+{
+ struct mlx5_core_dev *dev = fc_pool->dev;
+
+ fc_pool->available_fcs -= bulk->bulk_len;
+ mlx5_fc_bulk_destroy(dev, bulk);
+ mlx5_fc_pool_update_threshold(fc_pool);
+}
+
+static struct mlx5_fc *
+mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
+ struct list_head *next_list,
+ bool move_non_full_bulk)
+{
+ struct mlx5_fc_bulk *bulk;
+ struct mlx5_fc *fc;
+
+ if (list_empty(src_list))
+ return ERR_PTR(-ENODATA);
+
+ bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
+ fc = mlx5_fc_bulk_acquire_fc(bulk);
+ if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
+ list_move(&bulk->pool_list, next_list);
+ return fc;
+}
+
+static struct mlx5_fc *
+mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
+{
+ struct mlx5_fc_bulk *new_bulk;
+ struct mlx5_fc *fc;
+
+ mutex_lock(&fc_pool->pool_lock);
+
+ fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
+ &fc_pool->fully_used, false);
+ if (IS_ERR(fc))
+ fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
+ &fc_pool->partially_used,
+ true);
+ if (IS_ERR(fc)) {
+ new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
+ if (IS_ERR(new_bulk)) {
+ fc = ERR_CAST(new_bulk);
+ goto out;
+ }
+ fc = mlx5_fc_bulk_acquire_fc(new_bulk);
+ list_add(&new_bulk->pool_list, &fc_pool->partially_used);
+ }
+ fc_pool->available_fcs--;
+ fc_pool->used_fcs++;
+
+out:
+ mutex_unlock(&fc_pool->pool_lock);
+ return fc;
+}
+
+static void
+mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
+{
+ struct mlx5_core_dev *dev = fc_pool->dev;
+ struct mlx5_fc_bulk *bulk = fc->bulk;
+ int bulk_free_fcs_amount;
+
+ mutex_lock(&fc_pool->pool_lock);
+
+ if (mlx5_fc_bulk_release_fc(bulk, fc)) {
+ mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
+ goto unlock;
+ }
+
+ fc_pool->available_fcs++;
+ fc_pool->used_fcs--;
+
+ bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
+ if (bulk_free_fcs_amount == 1)
+ list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
+ if (bulk_free_fcs_amount == bulk->bulk_len) {
+ list_del(&bulk->pool_list);
+ if (fc_pool->available_fcs > fc_pool->threshold)
+ mlx5_fc_pool_free_bulk(fc_pool, bulk);
+ else
+ list_add(&bulk->pool_list, &fc_pool->unused);
+ }
+
+unlock:
+ mutex_unlock(&fc_pool->pool_lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 3dfab91ae5f2..4be4d2d36218 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -87,7 +87,7 @@ void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
/* This function should only be called after mlx5_cmd_force_teardown_hca */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c
new file mode 100644
index 000000000000..583dc7e2aca8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2018 Mellanox Technologies
+
+#include <linux/hyperv.h>
+#include "mlx5_core.h"
+#include "lib/hv.h"
+
+static int mlx5_hv_config_common(struct mlx5_core_dev *dev, void *buf, int len,
+ int offset, bool read)
+{
+ int rc = -EOPNOTSUPP;
+ int bytes_returned;
+ int block_id;
+
+ if (offset % HV_CONFIG_BLOCK_SIZE_MAX || len != HV_CONFIG_BLOCK_SIZE_MAX)
+ return -EINVAL;
+
+ block_id = offset / HV_CONFIG_BLOCK_SIZE_MAX;
+
+ rc = read ?
+ hyperv_read_cfg_blk(dev->pdev, buf,
+ HV_CONFIG_BLOCK_SIZE_MAX, block_id,
+ &bytes_returned) :
+ hyperv_write_cfg_blk(dev->pdev, buf,
+ HV_CONFIG_BLOCK_SIZE_MAX, block_id);
+
+ /* Make sure len bytes were read successfully */
+ if (read && !rc && len != bytes_returned)
+ rc = -EIO;
+
+ if (rc) {
+ mlx5_core_err(dev, "Failed to %s hv config, err = %d, len = %d, offset = %d\n",
+ read ? "read" : "write", rc, len,
+ offset);
+ return rc;
+ }
+
+ return 0;
+}
+
+int mlx5_hv_read_config(struct mlx5_core_dev *dev, void *buf, int len,
+ int offset)
+{
+ return mlx5_hv_config_common(dev, buf, len, offset, true);
+}
+
+int mlx5_hv_write_config(struct mlx5_core_dev *dev, void *buf, int len,
+ int offset)
+{
+ return mlx5_hv_config_common(dev, buf, len, offset, false);
+}
+
+int mlx5_hv_register_invalidate(struct mlx5_core_dev *dev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask))
+{
+ return hyperv_reg_block_invalidate(dev->pdev, context,
+ block_invalidate);
+}
+
+void mlx5_hv_unregister_invalidate(struct mlx5_core_dev *dev)
+{
+ hyperv_reg_block_invalidate(dev->pdev, NULL, NULL);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h
new file mode 100644
index 000000000000..f9a45573f459
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __LIB_HV_H__
+#define __LIB_HV_H__
+
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
+
+#include <linux/hyperv.h>
+#include <linux/mlx5/driver.h>
+
+int mlx5_hv_read_config(struct mlx5_core_dev *dev, void *buf, int len,
+ int offset);
+int mlx5_hv_write_config(struct mlx5_core_dev *dev, void *buf, int len,
+ int offset);
+int mlx5_hv_register_invalidate(struct mlx5_core_dev *dev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask));
+void mlx5_hv_unregister_invalidate(struct mlx5_core_dev *dev);
+#endif
+
+#endif /* __LIB_HV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
new file mode 100644
index 000000000000..4047629a876b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2018 Mellanox Technologies
+
+#include <linux/hyperv.h>
+#include "mlx5_core.h"
+#include "lib/hv.h"
+#include "lib/hv_vhca.h"
+
+struct mlx5_hv_vhca {
+ struct mlx5_core_dev *dev;
+ struct workqueue_struct *work_queue;
+ struct mlx5_hv_vhca_agent *agents[MLX5_HV_VHCA_AGENT_MAX];
+ struct mutex agents_lock; /* Protect agents array */
+};
+
+struct mlx5_hv_vhca_work {
+ struct work_struct invalidate_work;
+ struct mlx5_hv_vhca *hv_vhca;
+ u64 block_mask;
+};
+
+struct mlx5_hv_vhca_data_block {
+ u16 sequence;
+ u16 offset;
+ u8 reserved[4];
+ u64 data[15];
+};
+
+struct mlx5_hv_vhca_agent {
+ enum mlx5_hv_vhca_agent_type type;
+ struct mlx5_hv_vhca *hv_vhca;
+ void *priv;
+ u16 seq;
+ void (*control)(struct mlx5_hv_vhca_agent *agent,
+ struct mlx5_hv_vhca_control_block *block);
+ void (*invalidate)(struct mlx5_hv_vhca_agent *agent,
+ u64 block_mask);
+ void (*cleanup)(struct mlx5_hv_vhca_agent *agent);
+};
+
+struct mlx5_hv_vhca *mlx5_hv_vhca_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_hv_vhca *hv_vhca = NULL;
+
+ hv_vhca = kzalloc(sizeof(*hv_vhca), GFP_KERNEL);
+ if (!hv_vhca)
+ return ERR_PTR(-ENOMEM);
+
+ hv_vhca->work_queue = create_singlethread_workqueue("mlx5_hv_vhca");
+ if (!hv_vhca->work_queue) {
+ kfree(hv_vhca);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ hv_vhca->dev = dev;
+ mutex_init(&hv_vhca->agents_lock);
+
+ return hv_vhca;
+}
+
+void mlx5_hv_vhca_destroy(struct mlx5_hv_vhca *hv_vhca)
+{
+ if (IS_ERR_OR_NULL(hv_vhca))
+ return;
+
+ destroy_workqueue(hv_vhca->work_queue);
+ kfree(hv_vhca);
+}
+
+static void mlx5_hv_vhca_invalidate_work(struct work_struct *work)
+{
+ struct mlx5_hv_vhca_work *hwork;
+ struct mlx5_hv_vhca *hv_vhca;
+ int i;
+
+ hwork = container_of(work, struct mlx5_hv_vhca_work, invalidate_work);
+ hv_vhca = hwork->hv_vhca;
+
+ mutex_lock(&hv_vhca->agents_lock);
+ for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) {
+ struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i];
+
+ if (!agent || !agent->invalidate)
+ continue;
+
+ if (!(BIT(agent->type) & hwork->block_mask))
+ continue;
+
+ agent->invalidate(agent, hwork->block_mask);
+ }
+ mutex_unlock(&hv_vhca->agents_lock);
+
+ kfree(hwork);
+}
+
+void mlx5_hv_vhca_invalidate(void *context, u64 block_mask)
+{
+ struct mlx5_hv_vhca *hv_vhca = (struct mlx5_hv_vhca *)context;
+ struct mlx5_hv_vhca_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->invalidate_work, mlx5_hv_vhca_invalidate_work);
+ work->hv_vhca = hv_vhca;
+ work->block_mask = block_mask;
+
+ queue_work(hv_vhca->work_queue, &work->invalidate_work);
+}
+
+#define AGENT_MASK(type) (type ? BIT(type - 1) : 0 /* control */)
+
+static void mlx5_hv_vhca_agents_control(struct mlx5_hv_vhca *hv_vhca,
+ struct mlx5_hv_vhca_control_block *block)
+{
+ int i;
+
+ for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) {
+ struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i];
+
+ if (!agent || !agent->control)
+ continue;
+
+ if (!(AGENT_MASK(agent->type) & block->control))
+ continue;
+
+ agent->control(agent, block);
+ }
+}
+
+static void mlx5_hv_vhca_capabilities(struct mlx5_hv_vhca *hv_vhca,
+ u32 *capabilities)
+{
+ int i;
+
+ for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) {
+ struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i];
+
+ if (agent)
+ *capabilities |= AGENT_MASK(agent->type);
+ }
+}
+
+static void
+mlx5_hv_vhca_control_agent_invalidate(struct mlx5_hv_vhca_agent *agent,
+ u64 block_mask)
+{
+ struct mlx5_hv_vhca *hv_vhca = agent->hv_vhca;
+ struct mlx5_core_dev *dev = hv_vhca->dev;
+ struct mlx5_hv_vhca_control_block *block;
+ u32 capabilities = 0;
+ int err;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return;
+
+ err = mlx5_hv_read_config(dev, block, sizeof(*block), 0);
+ if (err)
+ goto free_block;
+
+ mlx5_hv_vhca_capabilities(hv_vhca, &capabilities);
+
+ /* In case no capabilities, send empty block in return */
+ if (!capabilities) {
+ memset(block, 0, sizeof(*block));
+ goto write;
+ }
+
+ if (block->capabilities != capabilities)
+ block->capabilities = capabilities;
+
+ if (block->control & ~capabilities)
+ goto free_block;
+
+ mlx5_hv_vhca_agents_control(hv_vhca, block);
+ block->command_ack = block->command;
+
+write:
+ mlx5_hv_write_config(dev, block, sizeof(*block), 0);
+
+free_block:
+ kfree(block);
+}
+
+static struct mlx5_hv_vhca_agent *
+mlx5_hv_vhca_control_agent_create(struct mlx5_hv_vhca *hv_vhca)
+{
+ return mlx5_hv_vhca_agent_create(hv_vhca, MLX5_HV_VHCA_AGENT_CONTROL,
+ NULL,
+ mlx5_hv_vhca_control_agent_invalidate,
+ NULL, NULL);
+}
+
+static void mlx5_hv_vhca_control_agent_destroy(struct mlx5_hv_vhca_agent *agent)
+{
+ mlx5_hv_vhca_agent_destroy(agent);
+}
+
+int mlx5_hv_vhca_init(struct mlx5_hv_vhca *hv_vhca)
+{
+ struct mlx5_hv_vhca_agent *agent;
+ int err;
+
+ if (IS_ERR_OR_NULL(hv_vhca))
+ return IS_ERR_OR_NULL(hv_vhca);
+
+ err = mlx5_hv_register_invalidate(hv_vhca->dev, hv_vhca,
+ mlx5_hv_vhca_invalidate);
+ if (err)
+ return err;
+
+ agent = mlx5_hv_vhca_control_agent_create(hv_vhca);
+ if (IS_ERR_OR_NULL(agent)) {
+ mlx5_hv_unregister_invalidate(hv_vhca->dev);
+ return IS_ERR_OR_NULL(agent);
+ }
+
+ hv_vhca->agents[MLX5_HV_VHCA_AGENT_CONTROL] = agent;
+
+ return 0;
+}
+
+void mlx5_hv_vhca_cleanup(struct mlx5_hv_vhca *hv_vhca)
+{
+ struct mlx5_hv_vhca_agent *agent;
+ int i;
+
+ if (IS_ERR_OR_NULL(hv_vhca))
+ return;
+
+ agent = hv_vhca->agents[MLX5_HV_VHCA_AGENT_CONTROL];
+ if (agent)
+ mlx5_hv_vhca_control_agent_destroy(agent);
+
+ mutex_lock(&hv_vhca->agents_lock);
+ for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++)
+ WARN_ON(hv_vhca->agents[i]);
+
+ mutex_unlock(&hv_vhca->agents_lock);
+
+ mlx5_hv_unregister_invalidate(hv_vhca->dev);
+}
+
+static void mlx5_hv_vhca_agents_update(struct mlx5_hv_vhca *hv_vhca)
+{
+ mlx5_hv_vhca_invalidate(hv_vhca, BIT(MLX5_HV_VHCA_AGENT_CONTROL));
+}
+
+struct mlx5_hv_vhca_agent *
+mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
+ enum mlx5_hv_vhca_agent_type type,
+ void (*control)(struct mlx5_hv_vhca_agent*,
+ struct mlx5_hv_vhca_control_block *block),
+ void (*invalidate)(struct mlx5_hv_vhca_agent*,
+ u64 block_mask),
+ void (*cleaup)(struct mlx5_hv_vhca_agent *agent),
+ void *priv)
+{
+ struct mlx5_hv_vhca_agent *agent;
+
+ if (IS_ERR_OR_NULL(hv_vhca))
+ return ERR_PTR(-ENOMEM);
+
+ if (type >= MLX5_HV_VHCA_AGENT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&hv_vhca->agents_lock);
+ if (hv_vhca->agents[type]) {
+ mutex_unlock(&hv_vhca->agents_lock);
+ return ERR_PTR(-EINVAL);
+ }
+ mutex_unlock(&hv_vhca->agents_lock);
+
+ agent = kzalloc(sizeof(*agent), GFP_KERNEL);
+ if (!agent)
+ return ERR_PTR(-ENOMEM);
+
+ agent->type = type;
+ agent->hv_vhca = hv_vhca;
+ agent->priv = priv;
+ agent->control = control;
+ agent->invalidate = invalidate;
+ agent->cleanup = cleaup;
+
+ mutex_lock(&hv_vhca->agents_lock);
+ hv_vhca->agents[type] = agent;
+ mutex_unlock(&hv_vhca->agents_lock);
+
+ mlx5_hv_vhca_agents_update(hv_vhca);
+
+ return agent;
+}
+
+void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent)
+{
+ struct mlx5_hv_vhca *hv_vhca = agent->hv_vhca;
+
+ mutex_lock(&hv_vhca->agents_lock);
+
+ if (WARN_ON(agent != hv_vhca->agents[agent->type])) {
+ mutex_unlock(&hv_vhca->agents_lock);
+ return;
+ }
+
+ hv_vhca->agents[agent->type] = NULL;
+ mutex_unlock(&hv_vhca->agents_lock);
+
+ if (agent->cleanup)
+ agent->cleanup(agent);
+
+ kfree(agent);
+
+ mlx5_hv_vhca_agents_update(hv_vhca);
+}
+
+static int mlx5_hv_vhca_data_block_prepare(struct mlx5_hv_vhca_agent *agent,
+ struct mlx5_hv_vhca_data_block *data_block,
+ void *src, int len, int *offset)
+{
+ int bytes = min_t(int, (int)sizeof(data_block->data), len);
+
+ data_block->sequence = agent->seq;
+ data_block->offset = (*offset)++;
+ memcpy(data_block->data, src, bytes);
+
+ return bytes;
+}
+
+static void mlx5_hv_vhca_agent_seq_update(struct mlx5_hv_vhca_agent *agent)
+{
+ agent->seq++;
+}
+
+int mlx5_hv_vhca_agent_write(struct mlx5_hv_vhca_agent *agent,
+ void *buf, int len)
+{
+ int offset = agent->type * HV_CONFIG_BLOCK_SIZE_MAX;
+ int block_offset = 0;
+ int total = 0;
+ int err;
+
+ while (len) {
+ struct mlx5_hv_vhca_data_block data_block = {0};
+ int bytes;
+
+ bytes = mlx5_hv_vhca_data_block_prepare(agent, &data_block,
+ buf + total,
+ len, &block_offset);
+ if (!bytes)
+ return -ENOMEM;
+
+ err = mlx5_hv_write_config(agent->hv_vhca->dev, &data_block,
+ sizeof(data_block), offset);
+ if (err)
+ return err;
+
+ total += bytes;
+ len -= bytes;
+ }
+
+ mlx5_hv_vhca_agent_seq_update(agent);
+
+ return 0;
+}
+
+void *mlx5_hv_vhca_agent_priv(struct mlx5_hv_vhca_agent *agent)
+{
+ return agent->priv;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h
new file mode 100644
index 000000000000..4bad6a5fde56
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __LIB_HV_VHCA_H__
+#define __LIB_HV_VHCA_H__
+
+#include "en.h"
+#include "lib/hv.h"
+
+struct mlx5_hv_vhca_agent;
+struct mlx5_hv_vhca;
+struct mlx5_hv_vhca_control_block;
+
+enum mlx5_hv_vhca_agent_type {
+ MLX5_HV_VHCA_AGENT_CONTROL = 0,
+ MLX5_HV_VHCA_AGENT_STATS = 1,
+ MLX5_HV_VHCA_AGENT_MAX = 32,
+};
+
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
+
+struct mlx5_hv_vhca_control_block {
+ u32 capabilities;
+ u32 control;
+ u16 command;
+ u16 command_ack;
+ u16 version;
+ u16 rings;
+ u32 reserved1[28];
+};
+
+struct mlx5_hv_vhca *mlx5_hv_vhca_create(struct mlx5_core_dev *dev);
+void mlx5_hv_vhca_destroy(struct mlx5_hv_vhca *hv_vhca);
+int mlx5_hv_vhca_init(struct mlx5_hv_vhca *hv_vhca);
+void mlx5_hv_vhca_cleanup(struct mlx5_hv_vhca *hv_vhca);
+void mlx5_hv_vhca_invalidate(void *context, u64 block_mask);
+
+struct mlx5_hv_vhca_agent *
+mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
+ enum mlx5_hv_vhca_agent_type type,
+ void (*control)(struct mlx5_hv_vhca_agent*,
+ struct mlx5_hv_vhca_control_block *block),
+ void (*invalidate)(struct mlx5_hv_vhca_agent*,
+ u64 block_mask),
+ void (*cleanup)(struct mlx5_hv_vhca_agent *agent),
+ void *context);
+
+void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent);
+int mlx5_hv_vhca_agent_write(struct mlx5_hv_vhca_agent *agent,
+ void *buf, int len);
+void *mlx5_hv_vhca_agent_priv(struct mlx5_hv_vhca_agent *agent);
+
+#else
+
+static inline struct mlx5_hv_vhca *
+mlx5_hv_vhca_create(struct mlx5_core_dev *dev)
+{
+ return NULL;
+}
+
+static inline void mlx5_hv_vhca_destroy(struct mlx5_hv_vhca *hv_vhca)
+{
+}
+
+static inline int mlx5_hv_vhca_init(struct mlx5_hv_vhca *hv_vhca)
+{
+ return 0;
+}
+
+static inline void mlx5_hv_vhca_cleanup(struct mlx5_hv_vhca *hv_vhca)
+{
+}
+
+static inline void mlx5_hv_vhca_invalidate(void *context,
+ u64 block_mask)
+{
+}
+
+static inline struct mlx5_hv_vhca_agent *
+mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
+ enum mlx5_hv_vhca_agent_type type,
+ void (*control)(struct mlx5_hv_vhca_agent*,
+ struct mlx5_hv_vhca_control_block *block),
+ void (*invalidate)(struct mlx5_hv_vhca_agent*,
+ u64 block_mask),
+ void (*cleanup)(struct mlx5_hv_vhca_agent *agent),
+ void *context)
+{
+ return NULL;
+}
+
+static inline void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent)
+{
+}
+
+static inline int
+mlx5_hv_vhca_write_agent(struct mlx5_hv_vhca_agent *agent,
+ void *buf, int len)
+{
+ return 0;
+}
+#endif
+
+#endif /* __LIB_HV_VHCA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index b9d4f4e19ff9..148b55c3db7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/refcount.h>
#include <linux/mlx5/driver.h>
#include <net/vxlan.h>
#include "mlx5_core.h"
@@ -48,7 +49,7 @@ struct mlx5_vxlan {
struct mlx5_vxlan_port {
struct hlist_node hlist;
- atomic_t refcount;
+ refcount_t refcount;
u16 udp_port;
};
@@ -113,7 +114,7 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
vxlanp = mlx5_vxlan_lookup_port(vxlan, port);
if (vxlanp) {
- atomic_inc(&vxlanp->refcount);
+ refcount_inc(&vxlanp->refcount);
return 0;
}
@@ -137,7 +138,7 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
}
vxlanp->udp_port = port;
- atomic_set(&vxlanp->refcount, 1);
+ refcount_set(&vxlanp->refcount, 1);
spin_lock_bh(&vxlan->lock);
hash_add(vxlan->htable, &vxlanp->hlist, port);
@@ -170,7 +171,7 @@ int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
goto out_unlock;
}
- if (atomic_dec_and_test(&vxlanp->refcount)) {
+ if (refcount_dec_and_test(&vxlanp->refcount)) {
hash_del(&vxlanp->hlist);
remove = true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b15b27a497fc..61388ca7233b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -69,6 +69,7 @@
#include "lib/pci_vsc.h"
#include "diag/fw_tracer.h"
#include "ecpf.h"
+#include "lib/hv_vhca.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -826,11 +827,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_eq_cleanup;
}
- err = mlx5_cq_debugfs_init(dev);
- if (err) {
- mlx5_core_err(dev, "failed to initialize cq debugfs\n");
- goto err_events_cleanup;
- }
+ mlx5_cq_debugfs_init(dev);
mlx5_init_qp_table(dev);
@@ -874,6 +871,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
}
dev->tracer = mlx5_fw_tracer_create(dev);
+ dev->hv_vhca = mlx5_hv_vhca_create(dev);
return 0;
@@ -891,7 +889,6 @@ err_tables_cleanup:
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
-err_events_cleanup:
mlx5_events_cleanup(dev);
err_eq_cleanup:
mlx5_eq_table_cleanup(dev);
@@ -905,6 +902,7 @@ err_devcom:
static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{
+ mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
mlx5_fpga_cleanup(dev);
mlx5_eswitch_cleanup(dev->priv.eswitch);
@@ -1072,6 +1070,8 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_fw_tracer;
}
+ mlx5_hv_vhca_init(dev->hv_vhca);
+
err = mlx5_fpga_device_start(dev);
if (err) {
mlx5_core_err(dev, "fpga device start failed %d\n", err);
@@ -1127,6 +1127,7 @@ err_tls_start:
err_ipsec_start:
mlx5_fpga_device_stop(dev);
err_fpga_start:
+ mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_tracer_cleanup(dev->tracer);
err_fw_tracer:
mlx5_eq_table_destroy(dev);
@@ -1147,6 +1148,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
+ mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_eq_table_destroy(dev);
mlx5_irq_table_destroy(dev);
@@ -1217,8 +1219,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{
int err = 0;
- if (cleanup)
+ if (cleanup) {
+ mlx5_unregister_device(dev);
mlx5_drain_health_wq(dev);
+ }
mutex_lock(&dev->intf_state_mutex);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
@@ -1369,7 +1373,6 @@ static void remove_one(struct pci_dev *pdev)
mlx5_crdump_disable(dev);
mlx5_devlink_unregister(devlink);
- mlx5_unregister_device(dev);
if (mlx5_unload_one(dev, true)) {
mlx5_core_err(dev, "mlx5_unload_one failed\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 471bbc48bc1f..87b75b2207c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -146,7 +146,7 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
void mlx5_cmd_flush(struct mlx5_core_dev *dev);
-int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index c912d82ca64b..30f7848a6f88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -122,12 +122,13 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
u8 *min_inline_mode)
{
switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
+ case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+ if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode))
+ break;
+ /* fall through */
case MLX5_CAP_INLINE_MODE_L2:
*min_inline_mode = MLX5_INLINE_MODE_L2;
break;
- case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
- mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
- break;
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
*min_inline_mode = MLX5_INLINE_MODE_NONE;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 953cc8efba69..dd2315ce4441 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -44,6 +44,11 @@ u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
return wq->fbc.sz_m1 + 1;
}
+u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq)
+{
+ return wq->fbc.log_stride;
+}
+
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
{
return (u32)wq->fbc.sz_m1 + 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index f1ec58c9e9e3..55791f71a778 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -89,6 +89,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
+u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq);
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 06c80343d9ed..f458fd1ce9f8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -71,7 +71,7 @@ config MLXSW_SWITCHX2
module will be called mlxsw_switchx2.
config MLXSW_SPECTRUM
- tristate "Mellanox Technologies Spectrum support"
+ tristate "Mellanox Technologies Spectrum family support"
depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q
depends on PSAMPLE || PSAMPLE=n
depends on BRIDGE || BRIDGE=n
@@ -87,8 +87,8 @@ config MLXSW_SPECTRUM
select NET_PTP_CLASSIFY if PTP_1588_CLOCK
default m
---help---
- This driver supports Mellanox Technologies Spectrum Ethernet
- Switch ASICs.
+ This driver supports Mellanox Technologies
+ Spectrum/Spectrum-2/Spectrum-3 Ethernet Switch ASICs.
To compile this driver as a module, choose M here: the
module will be called mlxsw_spectrum.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 171b36bd8a4e..0e86a581d45b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -29,7 +29,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_mr_tcam.o spectrum_mr.o \
spectrum_qdisc.o spectrum_span.o \
spectrum_nve.o spectrum_nve_vxlan.o \
- spectrum_dpipe.o
+ spectrum_dpipe.o spectrum_trap.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 17ceac7505e5..963a2b4b61b1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1017,6 +1017,54 @@ static int mlxsw_devlink_flash_update(struct devlink *devlink,
component, extack);
}
+static int mlxsw_devlink_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_init)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx);
+}
+
+static void mlxsw_devlink_trap_fini(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_fini)
+ return;
+ mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx);
+}
+
+static int mlxsw_devlink_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_action_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_action_set(mlxsw_core, trap, action);
+}
+
+static int
+mlxsw_devlink_trap_group_init(struct devlink *devlink,
+ const struct devlink_trap_group *group)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_group_init)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_group_init(mlxsw_core, group);
+}
+
static const struct devlink_ops mlxsw_devlink_ops = {
.reload = mlxsw_devlink_core_bus_device_reload,
.port_type_set = mlxsw_devlink_port_type_set,
@@ -1034,6 +1082,10 @@ static const struct devlink_ops mlxsw_devlink_ops = {
.sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
.info_get = mlxsw_devlink_info_get,
.flash_update = mlxsw_devlink_flash_update,
+ .trap_init = mlxsw_devlink_trap_init,
+ .trap_fini = mlxsw_devlink_trap_fini,
+ .trap_action_set = mlxsw_devlink_trap_action_set,
+ .trap_group_init = mlxsw_devlink_trap_group_init,
};
static int
@@ -1477,6 +1529,18 @@ void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_trap_unregister);
+int mlxsw_core_trap_action_set(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ enum mlxsw_reg_hpkt_action action)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
+ listener->trap_group, listener->is_ctrl);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+}
+EXPORT_SYMBOL(mlxsw_core_trap_action_set);
+
static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
{
return atomic64_inc_return(&mlxsw_core->emad.tid);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 8efcff4b59cb..b65a17d49e43 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -128,6 +128,9 @@ int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
void *priv);
+int mlxsw_core_trap_action_set(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ enum mlxsw_reg_hpkt_action action);
typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload,
size_t payload_len, unsigned long cb_priv);
@@ -289,6 +292,15 @@ struct mlxsw_driver {
int (*flash_update)(struct mlxsw_core *mlxsw_core,
const char *file_name, const char *component,
struct netlink_ext_ack *extack);
+ int (*trap_init)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+ void (*trap_fini)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+ int (*trap_action_set)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action);
+ int (*trap_group_init)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
int (*resources_register)(struct mlxsw_core *mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
index 946339e13eb9..5b1323645a5d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -9,6 +9,7 @@
#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM3 0xcf70
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB 0xcb20
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB2 0xcf08
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index ead36702549a..baa20cdd65df 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5422,6 +5422,14 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
+
+ __MLXSW_REG_HTGT_TRAP_GROUP_MAX,
+ MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
+};
+
+enum mlxsw_reg_htgt_discard_trap_group {
+ MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
};
/* reg_htgt_trap_group
@@ -5559,6 +5567,8 @@ enum mlxsw_reg_hpkt_action {
MLXSW_REG_HPKT_ACTION_DISCARD,
MLXSW_REG_HPKT_ACTION_SOFT_DISCARD,
MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD,
+ MLXSW_REG_HPKT_ACTION_TRAP_EXCEPTION_TO_CPU,
+ MLXSW_REG_HPKT_ACTION_SET_FW_DEFAULT = 15,
};
/* reg_hpkt_action
@@ -5569,6 +5579,8 @@ enum mlxsw_reg_hpkt_action {
* 3 - Discard.
* 4 - Soft discard (allow other traps to act on the packet).
* 5 - Trap and soft discard (allow other traps to overwrite this trap).
+ * 6 - Trap to CPU (CPU receives sole copy) and count it as error.
+ * 15 - Restore the firmware's default action.
* Access: RW
*
* Note: Must be set to 0 (forward) for event trap IDs, as they are already
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index eda9c23e87b2..7de9833fc60b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -65,6 +65,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
+static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
static const char mlxsw_sp_driver_version[] = "1.0";
static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
@@ -1625,7 +1626,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
}
flow_block_cb_incref(block_cb);
err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
- mlxsw_sp_port, ingress);
+ mlxsw_sp_port, ingress, f->extack);
if (err)
goto err_block_bind;
@@ -4664,6 +4665,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_traps_init;
}
+ err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
+ goto err_devlink_traps_init;
+ }
+
err = mlxsw_sp_buffers_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
@@ -4797,6 +4804,8 @@ err_span_init:
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
+ mlxsw_sp_devlink_traps_fini(mlxsw_sp);
+err_devlink_traps_init:
mlxsw_sp_traps_fini(mlxsw_sp);
err_traps_init:
mlxsw_sp_fids_fini(mlxsw_sp);
@@ -4869,6 +4878,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
+ mlxsw_sp_devlink_traps_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_fids_fini(mlxsw_sp);
mlxsw_sp_kvdl_fini(mlxsw_sp);
@@ -5026,6 +5036,26 @@ static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
return 0;
}
+static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params kvd_size_params;
+ u32 kvd_size;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
+ return -EIO;
+
+ kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
+ MLXSW_SP_KVD_GRANULARITY,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ kvd_size, MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &kvd_size_params);
+}
+
static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
{
return mlxsw_sp1_resources_kvd_register(mlxsw_core);
@@ -5033,7 +5063,7 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
{
- return 0;
+ return mlxsw_sp2_resources_kvd_register(mlxsw_core);
}
static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -5230,6 +5260,10 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.flash_update = mlxsw_sp_flash_update,
+ .trap_init = mlxsw_sp_trap_init,
+ .trap_fini = mlxsw_sp_trap_fini,
+ .trap_action_set = mlxsw_sp_trap_action_set,
+ .trap_group_init = mlxsw_sp_trap_group_init,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
@@ -5260,6 +5294,43 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.flash_update = mlxsw_sp_flash_update,
+ .trap_init = mlxsw_sp_trap_init,
+ .trap_fini = mlxsw_sp_trap_fini,
+ .trap_action_set = mlxsw_sp_trap_action_set,
+ .trap_group_init = mlxsw_sp_trap_group_init,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp2_resources_register,
+ .params_register = mlxsw_sp2_params_register,
+ .params_unregister = mlxsw_sp2_params_unregister,
+ .ptp_transmitted = mlxsw_sp_ptp_transmitted,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp2_config_profile,
+ .res_query_enabled = true,
+};
+
+static struct mlxsw_driver mlxsw_sp3_driver = {
+ .kind = mlxsw_sp3_driver_name,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp2_init,
+ .fini = mlxsw_sp_fini,
+ .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .flash_update = mlxsw_sp_flash_update,
+ .trap_init = mlxsw_sp_trap_init,
+ .trap_fini = mlxsw_sp_trap_fini,
+ .trap_action_set = mlxsw_sp_trap_action_set,
+ .trap_group_init = mlxsw_sp_trap_group_init,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.params_register = mlxsw_sp2_params_register,
@@ -6304,6 +6375,16 @@ static struct pci_driver mlxsw_sp2_pci_driver = {
.id_table = mlxsw_sp2_pci_id_table,
};
+static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sp3_pci_driver = {
+ .name = mlxsw_sp3_driver_name,
+ .id_table = mlxsw_sp3_pci_id_table,
+};
+
static int __init mlxsw_sp_module_init(void)
{
int err;
@@ -6319,6 +6400,10 @@ static int __init mlxsw_sp_module_init(void)
if (err)
goto err_sp2_core_driver_register;
+ err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
+ if (err)
+ goto err_sp3_core_driver_register;
+
err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
if (err)
goto err_sp1_pci_driver_register;
@@ -6327,11 +6412,19 @@ static int __init mlxsw_sp_module_init(void)
if (err)
goto err_sp2_pci_driver_register;
+ err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
+ if (err)
+ goto err_sp3_pci_driver_register;
+
return 0;
+err_sp3_pci_driver_register:
+ mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
err_sp2_pci_driver_register:
mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
err_sp1_pci_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
+err_sp3_core_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
err_sp2_core_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
@@ -6343,8 +6436,10 @@ err_sp1_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
+ mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
@@ -6359,4 +6454,5 @@ MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Spectrum driver");
MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 6664119fb0c8..20c14bba9ccb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -623,7 +623,8 @@ struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
struct mlxsw_afa_block *act_block;
- u8 action_created:1;
+ u8 action_created:1,
+ egress_bind_blocker:1;
unsigned int counter_index;
};
@@ -642,6 +643,7 @@ struct mlxsw_sp_acl_block {
struct mlxsw_sp *mlxsw_sp;
unsigned int rule_count;
unsigned int disable_count;
+ unsigned int egress_blocker_rule_count;
struct net *net;
};
@@ -657,7 +659,8 @@ void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress);
+ bool ingress,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
@@ -955,4 +958,17 @@ void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp);
+/* spectrum_trap.c */
+int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action);
+int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 84a87d059333..150b3a144b83 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -239,7 +239,8 @@ mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress)
+ bool ingress,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_acl_block_binding *binding;
int err;
@@ -247,6 +248,11 @@ int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
return -EEXIST;
+ if (!ingress && block->egress_blocker_rule_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
+ return -EOPNOTSUPP;
+ }
+
binding = kzalloc(sizeof(*binding), GFP_KERNEL);
if (!binding)
return -ENOMEM;
@@ -672,6 +678,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
int err;
err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
@@ -689,14 +696,14 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
* one, to be directly bound to device. The rest of the
* rulesets are bound by "Goto action set".
*/
- err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset,
- ruleset->ht_key.block);
+ err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
if (err)
goto err_ruleset_block_bind;
}
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
- ruleset->ht_key.block->rule_count++;
+ block->rule_count++;
+ block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
return 0;
err_ruleset_block_bind:
@@ -712,7 +719,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+ block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
ruleset->ht_key.block->rule_count--;
list_del(&rule->list);
if (!ruleset->ht_key.chain_index &&
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 202e9a246019..0ad1a24abfc6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -78,6 +78,16 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid;
u16 fid_index;
+ if (mlxsw_sp_acl_block_is_egress_bound(block)) {
+ NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
+ return -EOPNOTSUPP;
+ }
+
+ /* Forbid block with this rulei to be bound
+ * to egress in future.
+ */
+ rulei->egress_bind_blocker = 1;
+
fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
fid_index = mlxsw_sp_fid_index(fid);
err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
@@ -257,6 +267,12 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
flow_rule_match_tcp(rule, &match);
+ if (match.mask->flags & htons(0x0E00)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits");
+ dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n");
+ return -EINVAL;
+ }
+
mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
ntohs(match.key->flags),
ntohs(match.mask->flags));
@@ -390,6 +406,12 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
return -EOPNOTSUPP;
}
+
+ /* Forbid block with this rulei to be bound
+ * to egress in future.
+ */
+ rulei->egress_bind_blocker = 1;
+
if (match.mask->vlan_id != 0)
mlxsw_sp_acl_rulei_keymask_u32(rulei,
MLXSW_AFK_ELEMENT_VID,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index e618be7ce6c6..a330b369e899 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2943,7 +2943,7 @@ static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
val = nh_grp->count;
for (i = 0; i < nh_grp->count; i++) {
nh = &nh_grp->nexthops[i];
- val ^= nh->ifindex;
+ val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
}
return jhash(&val, sizeof(val), seed);
default:
@@ -2961,7 +2961,7 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev;
- val ^= dev->ifindex;
+ val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
}
return jhash(&val, sizeof(val), seed);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
new file mode 100644
index 000000000000..899450b28621
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <net/devlink.h>
+#include <uapi/linux/devlink.h>
+
+#include "core.h"
+#include "reg.h"
+#include "spectrum.h"
+
+#define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
+
+static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
+ void *priv);
+
+#define MLXSW_SP_TRAP_DROP(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ MLXSW_SP_TRAP_METADATA)
+
+#define MLXSW_SP_RXL_DISCARD(_id, _group_id) \
+ MLXSW_RXL(mlxsw_sp_rx_drop_listener, DISCARD_##_id, SET_FW_DEFAULT, \
+ false, SP_##_group_id, DISCARD)
+
+static struct devlink_trap mlxsw_sp_traps_arr[] = {
+ MLXSW_SP_TRAP_DROP(SMAC_MC, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
+};
+
+static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
+ MLXSW_SP_RXL_DISCARD(ING_PACKET_SMAC_MC, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_VTAG_ALLOW, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_VLAN, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_STP, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_UC, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_MC_NULL, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_LB, L2_DISCARDS),
+};
+
+/* Mapping between hardware trap and devlink trap. Multiple hardware traps can
+ * be mapped to the same devlink trap. Order is according to
+ * 'mlxsw_sp_listeners_arr'.
+ */
+static u16 mlxsw_sp_listener_devlink_map[] = {
+ DEVLINK_TRAP_GENERIC_ID_SMAC_MC,
+ DEVLINK_TRAP_GENERIC_ID_VLAN_TAG_MISMATCH,
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_STP_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
+ DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
+ DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER,
+};
+
+static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+
+ if (unlikely(!mlxsw_sp_port)) {
+ dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ skb->dev = mlxsw_sp_port->dev;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ return 0;
+}
+
+static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ struct devlink_port *in_devlink_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+
+ mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ if (mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port))
+ return;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
+ consume_skb(skb);
+}
+
+int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ if (WARN_ON(ARRAY_SIZE(mlxsw_sp_listener_devlink_map) !=
+ ARRAY_SIZE(mlxsw_sp_listeners_arr)))
+ return -EINVAL;
+
+ return devlink_traps_register(devlink, mlxsw_sp_traps_arr,
+ ARRAY_SIZE(mlxsw_sp_traps_arr),
+ mlxsw_sp);
+}
+
+void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ devlink_traps_unregister(devlink, mlxsw_sp_traps_arr,
+ ARRAY_SIZE(mlxsw_sp_traps_arr));
+}
+
+int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
+ struct mlxsw_listener *listener;
+ int err;
+
+ if (mlxsw_sp_listener_devlink_map[i] != trap->id)
+ continue;
+ listener = &mlxsw_sp_listeners_arr[i];
+
+ err = mlxsw_core_trap_register(mlxsw_core, listener, trap_ctx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
+ struct mlxsw_listener *listener;
+
+ if (mlxsw_sp_listener_devlink_map[i] != trap->id)
+ continue;
+ listener = &mlxsw_sp_listeners_arr[i];
+
+ mlxsw_core_trap_unregister(mlxsw_core, listener, trap_ctx);
+ }
+}
+
+int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
+ enum mlxsw_reg_hpkt_action hw_action;
+ struct mlxsw_listener *listener;
+ int err;
+
+ if (mlxsw_sp_listener_devlink_map[i] != trap->id)
+ continue;
+ listener = &mlxsw_sp_listeners_arr[i];
+
+ switch (action) {
+ case DEVLINK_TRAP_ACTION_DROP:
+ hw_action = MLXSW_REG_HPKT_ACTION_SET_FW_DEFAULT;
+ break;
+ case DEVLINK_TRAP_ACTION_TRAP:
+ hw_action = MLXSW_REG_HPKT_ACTION_TRAP_EXCEPTION_TO_CPU;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = mlxsw_core_trap_action_set(mlxsw_core, listener,
+ hw_action);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+#define MLXSW_SP_DISCARD_POLICER_ID (MLXSW_REG_HTGT_TRAP_GROUP_MAX + 1)
+
+static int
+mlxsw_sp_trap_group_policer_init(struct mlxsw_sp *mlxsw_sp,
+ const struct devlink_trap_group *group)
+{
+ enum mlxsw_reg_qpcr_ir_units ir_units;
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
+ u16 policer_id;
+ u8 burst_size;
+ bool is_bytes;
+ u32 rate;
+
+ switch (group->id) {
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
+ policer_id = MLXSW_SP_DISCARD_POLICER_ID;
+ ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
+ is_bytes = false;
+ rate = 10 * 1024; /* 10Kpps */
+ burst_size = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mlxsw_reg_qpcr_pack(qpcr_pl, policer_id, ir_units, is_bytes, rate,
+ burst_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
+}
+
+static int
+__mlxsw_sp_trap_group_init(struct mlxsw_sp *mlxsw_sp,
+ const struct devlink_trap_group *group)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ u8 priority, tc, group_id;
+ u16 policer_id;
+
+ switch (group->id) {
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
+ group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS;
+ policer_id = MLXSW_SP_DISCARD_POLICER_ID;
+ priority = 0;
+ tc = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mlxsw_reg_htgt_pack(htgt_pl, group_id, policer_id, priority, tc);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+}
+
+int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ int err;
+
+ err = mlxsw_sp_trap_group_policer_init(mlxsw_sp, group);
+ if (err)
+ return err;
+
+ err = __mlxsw_sp_trap_group_init(mlxsw_sp, group);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 19202bdb5105..7618f084cae9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -66,6 +66,13 @@ enum {
MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
+ MLXSW_TRAP_ID_DISCARD_ING_PACKET_SMAC_MC = 0x140,
+ MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VTAG_ALLOW = 0x148,
+ MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VLAN = 0x149,
+ MLXSW_TRAP_ID_DISCARD_ING_SWITCH_STP = 0x14A,
+ MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_UC = 0x150,
+ MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_MC_NULL = 0x151,
+ MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_LB = 0x152,
MLXSW_TRAP_ID_ACL0 = 0x1C0,
/* Multicast trap used for routes with trap action */
MLXSW_TRAP_ID_ACL1 = 0x1C1,
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index ccd06702cc56..da329ca115cc 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -580,9 +580,7 @@ out:
dma_unmap_single(adapter->dev, sg_dma_address(sg),
DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
sg_dma_address(sg) = 0;
- if (ctl->skb)
- dev_kfree_skb(ctl->skb);
-
+ dev_kfree_skb(ctl->skb);
ctl->skb = NULL;
printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index e52b015e31a9..a41a90c589db 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1225,7 +1225,6 @@ MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids);
static int ks8851_probe(struct platform_device *pdev)
{
int err;
- struct resource *io_d, *io_c;
struct net_device *netdev;
struct ks_net *ks;
u16 id, data;
@@ -1240,15 +1239,13 @@ static int ks8851_probe(struct platform_device *pdev)
ks = netdev_priv(netdev);
ks->netdev = netdev;
- io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ks->hw_addr = devm_ioremap_resource(&pdev->dev, io_d);
+ ks->hw_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ks->hw_addr)) {
err = PTR_ERR(ks->hw_addr);
goto err_free;
}
- io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ks->hw_addr_cmd = devm_ioremap_resource(&pdev->dev, io_c);
+ ks->hw_addr_cmd = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ks->hw_addr_cmd)) {
err = PTR_ERR(ks->hw_addr_cmd);
goto err_free;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 13e6bf13ac4d..15a8be6bad27 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1434,7 +1434,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
}
static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
- const struct skb_frag_struct *fragment,
+ const skb_frag_t *fragment,
unsigned int frame_length)
{
/* called only from within lan743x_tx_xmit_frame
@@ -1607,9 +1607,8 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
goto finish;
for (j = 0; j < nr_frags; j++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]);
- frag = &(skb_shinfo(skb)->frags[j]);
if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
/* upon error no need to call
* lan743x_tx_frame_end
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index b2109eca81fd..57b26c2acf87 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -963,8 +963,7 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter)
index++) {
struct sk_buff *skb = ptp->tx_ts_skb_queue[index];
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
ptp->tx_ts_skb_queue[index] = NULL;
ptp->tx_ts_seconds_queue[index] = 0;
ptp->tx_ts_nseconds_queue[index] = 0;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 6932e615d4b0..4d1bce4389c7 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/skbuff.h>
#include <linux/iopoll.h>
#include <net/arp.h>
@@ -538,7 +539,7 @@ static int ocelot_port_stop(struct net_device *dev)
*/
static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
{
- ifh[0] = IFH_INJ_BYPASS;
+ ifh[0] = IFH_INJ_BYPASS | ((0x1ff & info->rew_op) << 21);
ifh[1] = (0xf00 & info->port) >> 8;
ifh[2] = (0xff & info->port) << 24;
ifh[3] = (info->tag_type << 16) | info->vid;
@@ -548,6 +549,7 @@ static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
struct ocelot_port *port = netdev_priv(dev);
struct ocelot *ocelot = port->ocelot;
u32 val, ifh[IFH_LEN];
@@ -566,6 +568,14 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
info.port = BIT(port->chip_port);
info.tag_type = IFH_TAG_TYPE_C;
info.vid = skb_vlan_tag_get(skb);
+
+ /* Check if timestamping is needed */
+ if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) {
+ info.rew_op = port->ptp_cmd;
+ if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
+ info.rew_op |= (port->ts_id % 4) << 3;
+ }
+
ocelot_gen_ifh(ifh, &info);
for (i = 0; i < IFH_LEN; i++)
@@ -596,11 +606,58 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev_kfree_skb_any(skb);
+ if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP &&
+ port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ struct ocelot_skb *oskb =
+ kzalloc(sizeof(struct ocelot_skb), GFP_ATOMIC);
+
+ if (unlikely(!oskb))
+ goto out;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ oskb->skb = skb;
+ oskb->id = port->ts_id % 4;
+ port->ts_id++;
+
+ list_add_tail(&oskb->head, &port->skbs);
+
+ return NETDEV_TX_OK;
+ }
+
+out:
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ /* Read current PTP time to get seconds */
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+ ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+
+ /* Read packet HW timestamp from FIFO */
+ val = ocelot_read(ocelot, SYS_PTP_TXSTAMP);
+ ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val);
+
+ /* Sec has incremented since the ts was registered */
+ if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC))
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+}
+EXPORT_SYMBOL(ocelot_get_hwtimestamp);
+
static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
{
struct ocelot_port *port = netdev_priv(dev);
@@ -917,6 +974,97 @@ static int ocelot_get_port_parent_id(struct net_device *dev,
return 0;
}
+static int ocelot_hwstamp_get(struct ocelot_port *port, struct ifreq *ifr)
+{
+ struct ocelot *ocelot = port->ocelot;
+
+ return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
+ sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
+}
+
+static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr)
+{
+ struct ocelot *ocelot = port->ocelot;
+ struct hwtstamp_config cfg;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (cfg.flags)
+ return -EINVAL;
+
+ /* Tx type sanity check */
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_ON:
+ port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
+ * need to update the origin time.
+ */
+ port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ port->ptp_cmd = 0;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ mutex_lock(&ocelot->ptp_lock);
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ mutex_unlock(&ocelot->ptp_lock);
+ return -ERANGE;
+ }
+
+ /* Commit back the result & save it */
+ memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
+ mutex_unlock(&ocelot->ptp_lock);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+
+ /* The function is only used for PTP operations for now */
+ if (!ocelot->ptp)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return ocelot_hwstamp_set(port, ifr);
+ case SIOCGHWTSTAMP:
+ return ocelot_hwstamp_get(port, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_open = ocelot_port_open,
.ndo_stop = ocelot_port_stop,
@@ -933,6 +1081,7 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_set_features = ocelot_set_features,
.ndo_get_port_parent_id = ocelot_get_port_parent_id,
.ndo_setup_tc = ocelot_setup_tc,
+ .ndo_do_ioctl = ocelot_ioctl,
};
static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -1014,12 +1163,37 @@ static int ocelot_get_sset_count(struct net_device *dev, int sset)
return ocelot->num_stats;
}
+static int ocelot_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct ocelot_port *ocelot_port = netdev_priv(dev);
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ if (!ocelot->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ info->phc_index = ocelot->ptp_clock ?
+ ptp_clock_index(ocelot->ptp_clock) : -1;
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static const struct ethtool_ops ocelot_ethtool_ops = {
.get_strings = ocelot_get_strings,
.get_ethtool_stats = ocelot_get_ethtool_stats,
.get_sset_count = ocelot_get_sset_count,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_ts_info = ocelot_get_ts_info,
};
static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
@@ -1629,6 +1803,196 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
};
EXPORT_SYMBOL(ocelot_switchdev_blocking_nb);
+int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ time64_t s;
+ u32 val;
+ s64 ns;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ s = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN) & 0xffff;
+ s <<= 32;
+ s += ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+ ns = ocelot_read_rix(ocelot, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if (ns >= 0x3ffffff0 && ns <= 0x3fffffff) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_gettime64);
+
+static int ocelot_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ ocelot_write_rix(ocelot, lower_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_LSB,
+ TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, upper_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_MSB,
+ TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, ts->tv_nsec, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_LOAD);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+}
+
+static int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, delta, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_DELTA);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using ocelot_ptp_settime64 which is not exact. */
+ struct timespec64 ts;
+ u64 now;
+
+ ocelot_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ ocelot_ptp_settime64(ptp, &ts);
+ }
+ return 0;
+}
+
+static int ocelot_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ u32 unit = 0, direction = 0;
+ unsigned long flags;
+ u64 adj = 0;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ if (!scaled_ppm)
+ goto disable_adj;
+
+ if (scaled_ppm < 0) {
+ direction = PTP_CFG_CLK_ADJ_CFG_DIR;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ adj = PSEC_PER_SEC << 16;
+ do_div(adj, scaled_ppm);
+ do_div(adj, 1000);
+
+ /* If the adjustment value is too large, use ns instead */
+ if (adj >= (1L << 30)) {
+ unit = PTP_CFG_CLK_ADJ_FREQ_NS;
+ do_div(adj, 1000);
+ }
+
+ /* Still too big */
+ if (adj >= (1L << 30))
+ goto disable_adj;
+
+ ocelot_write(ocelot, unit | adj, PTP_CLK_CFG_ADJ_FREQ);
+ ocelot_write(ocelot, PTP_CFG_CLK_ADJ_CFG_ENA | direction,
+ PTP_CLK_CFG_ADJ_CFG);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+
+disable_adj:
+ ocelot_write(ocelot, 0, PTP_CLK_CFG_ADJ_CFG);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+}
+
+static struct ptp_clock_info ocelot_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "ocelot ptp",
+ .max_adj = 0x7fffffff,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .gettime64 = ocelot_ptp_gettime64,
+ .settime64 = ocelot_ptp_settime64,
+ .adjtime = ocelot_ptp_adjtime,
+ .adjfine = ocelot_ptp_adjfine,
+};
+
+static int ocelot_init_timestamp(struct ocelot *ocelot)
+{
+ ocelot->ptp_info = ocelot_ptp_clock_info;
+ ocelot->ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev);
+ if (IS_ERR(ocelot->ptp_clock))
+ return PTR_ERR(ocelot->ptp_clock);
+ /* Check if PHC support is missing at the configuration level */
+ if (!ocelot->ptp_clock)
+ return 0;
+
+ ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG);
+ ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW);
+ ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH);
+
+ ocelot_write(ocelot, PTP_CFG_MISC_PTP_EN, PTP_CFG_MISC);
+
+ /* There is no device reconfiguration, PTP Rx stamping is always
+ * enabled.
+ */
+ ocelot->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+ return 0;
+}
+
int ocelot_probe_port(struct ocelot *ocelot, u8 port,
void __iomem *regs,
struct phy_device *phy)
@@ -1661,6 +2025,8 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
ENTRYTYPE_LOCKED);
+ INIT_LIST_HEAD(&ocelot_port->skbs);
+
err = register_netdev(dev);
if (err) {
dev_err(ocelot->dev, "register_netdev failed\n");
@@ -1684,7 +2050,7 @@ EXPORT_SYMBOL(ocelot_probe_port);
int ocelot_init(struct ocelot *ocelot)
{
u32 port;
- int i, cpu = ocelot->num_phys_ports;
+ int i, ret, cpu = ocelot->num_phys_ports;
char queue_name[32];
ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
@@ -1699,6 +2065,8 @@ int ocelot_init(struct ocelot *ocelot)
return -ENOMEM;
mutex_init(&ocelot->stats_lock);
+ mutex_init(&ocelot->ptp_lock);
+ spin_lock_init(&ocelot->ptp_clock_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(ocelot->dev));
ocelot->stats_queue = create_singlethread_workqueue(queue_name);
@@ -1812,16 +2180,43 @@ int ocelot_init(struct ocelot *ocelot)
INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
+
+ if (ocelot->ptp) {
+ ret = ocelot_init_timestamp(ocelot);
+ if (ret) {
+ dev_err(ocelot->dev,
+ "Timestamp initialization failed\n");
+ return ret;
+ }
+ }
+
return 0;
}
EXPORT_SYMBOL(ocelot_init);
void ocelot_deinit(struct ocelot *ocelot)
{
+ struct list_head *pos, *tmp;
+ struct ocelot_port *port;
+ struct ocelot_skb *entry;
+ int i;
+
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
ocelot_ace_deinit();
+
+ for (i = 0; i < ocelot->num_phys_ports; i++) {
+ port = ocelot->ports[i];
+
+ list_for_each_safe(pos, tmp, &port->skbs) {
+ entry = list_entry(pos, struct ocelot_skb, head);
+
+ list_del(pos);
+ dev_kfree_skb_any(entry->skb);
+ kfree(entry);
+ }
+ }
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index f7eeb4806897..e40773c01a44 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -11,9 +11,11 @@
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
+#include <linux/net_tstamp.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/regmap.h>
#include "ocelot_ana.h"
@@ -23,6 +25,7 @@
#include "ocelot_sys.h"
#include "ocelot_qs.h"
#include "ocelot_tc.h"
+#include "ocelot_ptp.h"
#define PGID_AGGR 64
#define PGID_SRC 80
@@ -38,14 +41,17 @@
#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
+#define OCELOT_PTP_QUEUE_SZ 128
+
#define IFH_LEN 4
struct frame_info {
u32 len;
u16 port;
u16 vid;
- u8 cpuq;
u8 tag_type;
+ u16 rew_op;
+ u32 timestamp; /* rew_val */
};
#define IFH_INJ_BYPASS BIT(31)
@@ -54,6 +60,12 @@ struct frame_info {
#define IFH_TAG_TYPE_C 0
#define IFH_TAG_TYPE_S 1
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_DSCP 0x1
+#define IFH_REW_OP_ONE_STEP_PTP 0x2
+#define IFH_REW_OP_TWO_STEP_PTP 0x3
+#define IFH_REW_OP_ORIGIN_PTP 0x5
+
#define OCELOT_SPEED_2500 0
#define OCELOT_SPEED_1000 1
#define OCELOT_SPEED_100 2
@@ -71,6 +83,7 @@ enum ocelot_target {
SYS,
S2,
HSIO,
+ PTP,
TARGET_MAX,
};
@@ -343,6 +356,13 @@ enum ocelot_reg {
S2_CACHE_ACTION_DAT,
S2_CACHE_CNT_DAT,
S2_CACHE_TG_DAT,
+ PTP_PIN_CFG = PTP << TARGET_OFFSET,
+ PTP_PIN_TOD_SEC_MSB,
+ PTP_PIN_TOD_SEC_LSB,
+ PTP_PIN_TOD_NSEC,
+ PTP_CFG_MISC,
+ PTP_CLK_CFG_ADJ_CFG,
+ PTP_CLK_CFG_ADJ_FREQ,
};
enum ocelot_regfield {
@@ -393,6 +413,13 @@ enum ocelot_regfield {
REGFIELD_MAX
};
+enum ocelot_clk_pins {
+ ALT_PPS_PIN = 1,
+ EXT_CLK_PIN,
+ ALT_LDST_PIN,
+ TOD_ACC_PIN
+};
+
struct ocelot_multicast {
struct list_head list;
unsigned char addr[ETH_ALEN];
@@ -442,6 +469,13 @@ struct ocelot {
u64 *stats;
struct delayed_work stats_work;
struct workqueue_struct *stats_queue;
+
+ u8 ptp:1;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_info;
+ struct hwtstamp_config hwtstamp_config;
+ struct mutex ptp_lock; /* Protects the PTP interface state */
+ spinlock_t ptp_clock_lock; /* Protects the PTP clock */
};
struct ocelot_port {
@@ -465,6 +499,16 @@ struct ocelot_port {
struct phy *serdes;
struct ocelot_port_tc tc;
+
+ u8 ptp_cmd;
+ struct list_head skbs;
+ u8 ts_id;
+};
+
+struct ocelot_skb {
+ struct list_head head;
+ struct sk_buff *skb;
+ u8 id;
};
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
@@ -509,4 +553,7 @@ extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
+int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
+void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts);
+
#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 2451d4a96490..b063eb78fa0c 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -16,24 +16,27 @@
#include "ocelot.h"
-static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info)
+#define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0))
+
+static int ocelot_parse_ifh(u32 *_ifh, struct frame_info *info)
{
- int i;
u8 llen, wlen;
+ u64 ifh[2];
+
+ ifh[0] = be64_to_cpu(((__force __be64 *)_ifh)[0]);
+ ifh[1] = be64_to_cpu(((__force __be64 *)_ifh)[1]);
- /* The IFH is in network order, switch to CPU order */
- for (i = 0; i < IFH_LEN; i++)
- ifh[i] = ntohl((__force __be32)ifh[i]);
+ wlen = IFH_EXTRACT_BITFIELD64(ifh[0], 7, 8);
+ llen = IFH_EXTRACT_BITFIELD64(ifh[0], 15, 6);
- wlen = (ifh[1] >> 7) & 0xff;
- llen = (ifh[1] >> 15) & 0x3f;
info->len = OCELOT_BUFFER_CELL_SZ * wlen + llen - 80;
- info->port = (ifh[2] & GENMASK(14, 11)) >> 11;
+ info->timestamp = IFH_EXTRACT_BITFIELD64(ifh[0], 21, 32);
+
+ info->port = IFH_EXTRACT_BITFIELD64(ifh[1], 43, 4);
- info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20;
- info->tag_type = (ifh[3] & BIT(16)) >> 16;
- info->vid = ifh[3] & GENMASK(11, 0);
+ info->tag_type = IFH_EXTRACT_BITFIELD64(ifh[1], 16, 1);
+ info->vid = IFH_EXTRACT_BITFIELD64(ifh[1], 0, 12);
return 0;
}
@@ -91,13 +94,14 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
return IRQ_NONE;
do {
- struct sk_buff *skb;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ u64 tod_in_ns, full_ts_in_ns;
+ struct frame_info info = {};
struct net_device *dev;
- u32 *buf;
+ u32 ifh[4], val, *buf;
+ struct timespec64 ts;
int sz, len, buf_len;
- u32 ifh[4];
- u32 val;
- struct frame_info info;
+ struct sk_buff *skb;
for (i = 0; i < IFH_LEN; i++) {
err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]);
@@ -144,6 +148,22 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
break;
}
+ if (ocelot->ptp) {
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+
+ tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+ if ((tod_in_ns & 0xffffffff) < info.timestamp)
+ full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
+ info.timestamp;
+ else
+ full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
+ info.timestamp;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+ }
+
/* Everything we see on an interface that is in the HW bridge
* has already been forwarded.
*/
@@ -163,6 +183,66 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static irqreturn_t ocelot_ptp_rdy_irq_handler(int irq, void *arg)
+{
+ int budget = OCELOT_PTP_QUEUE_SZ;
+ struct ocelot *ocelot = arg;
+
+ while (budget--) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct list_head *pos, *tmp;
+ struct sk_buff *skb = NULL;
+ struct ocelot_skb *entry;
+ struct ocelot_port *port;
+ struct timespec64 ts;
+ u32 val, id, txport;
+
+ val = ocelot_read(ocelot, SYS_PTP_STATUS);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
+ break;
+
+ WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
+
+ /* Retrieve the ts ID and Tx port */
+ id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
+ txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+
+ /* Retrieve its associated skb */
+ port = ocelot->ports[txport];
+
+ list_for_each_safe(pos, tmp, &port->skbs) {
+ entry = list_entry(pos, struct ocelot_skb, head);
+ if (entry->id != id)
+ continue;
+
+ skb = entry->skb;
+
+ list_del(pos);
+ kfree(entry);
+ }
+
+ /* Next ts */
+ ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+
+ if (unlikely(!skb))
+ continue;
+
+ /* Get the h/w timestamp */
+ ocelot_get_hwtimestamp(ocelot, &ts);
+
+ /* Set the timestamp into the skb */
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb, &shhwtstamps);
+
+ dev_kfree_skb_any(skb);
+ }
+
+ return IRQ_HANDLED;
+}
+
static const struct of_device_id mscc_ocelot_match[] = {
{ .compatible = "mscc,vsc7514-switch" },
{ }
@@ -171,17 +251,18 @@ MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
static int mscc_ocelot_probe(struct platform_device *pdev)
{
- int err, irq;
- unsigned int i;
struct device_node *np = pdev->dev.of_node;
struct device_node *ports, *portnp;
+ int err, irq_xtr, irq_ptp_rdy;
struct ocelot *ocelot;
struct regmap *hsio;
+ unsigned int i;
u32 val;
struct {
enum ocelot_target id;
char *name;
+ u8 optional:1;
} res[] = {
{ SYS, "sys" },
{ REW, "rew" },
@@ -189,6 +270,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
{ ANA, "ana" },
{ QS, "qs" },
{ S2, "s2" },
+ { PTP, "ptp", 1 },
};
if (!np && !pdev->dev.platform_data)
@@ -205,8 +287,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
struct regmap *target;
target = ocelot_io_platform_init(ocelot, pdev, res[i].name);
- if (IS_ERR(target))
+ if (IS_ERR(target)) {
+ if (res[i].optional) {
+ ocelot->targets[res[i].id] = NULL;
+ continue;
+ }
+
return PTR_ERR(target);
+ }
ocelot->targets[res[i].id] = target;
}
@@ -223,16 +311,29 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (err)
return err;
- irq = platform_get_irq_byname(pdev, "xtr");
- if (irq < 0)
+ irq_xtr = platform_get_irq_byname(pdev, "xtr");
+ if (irq_xtr < 0)
return -ENODEV;
- err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ err = devm_request_threaded_irq(&pdev->dev, irq_xtr, NULL,
ocelot_xtr_irq_handler, IRQF_ONESHOT,
"frame extraction", ocelot);
if (err)
return err;
+ irq_ptp_rdy = platform_get_irq_byname(pdev, "ptp_rdy");
+ if (irq_ptp_rdy > 0 && ocelot->targets[PTP]) {
+ err = devm_request_threaded_irq(&pdev->dev, irq_ptp_rdy, NULL,
+ ocelot_ptp_rdy_irq_handler,
+ IRQF_ONESHOT, "ptp ready",
+ ocelot);
+ if (err)
+ return err;
+
+ /* Both the PTP interrupt and the PTP bank are available */
+ ocelot->ptp = 1;
+ }
+
regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.h b/drivers/net/ethernet/mscc/ocelot_ptp.h
new file mode 100644
index 000000000000..9ede14a12573
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * License: Dual MIT/GPL
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_PTP_H_
+#define _MSCC_OCELOT_PTP_H_
+
+#define PTP_PIN_CFG_RSZ 0x20
+#define PTP_PIN_TOD_SEC_MSB_RSZ PTP_PIN_CFG_RSZ
+#define PTP_PIN_TOD_SEC_LSB_RSZ PTP_PIN_CFG_RSZ
+#define PTP_PIN_TOD_NSEC_RSZ PTP_PIN_CFG_RSZ
+
+#define PTP_PIN_CFG_DOM BIT(0)
+#define PTP_PIN_CFG_SYNC BIT(2)
+#define PTP_PIN_CFG_ACTION(x) ((x) << 3)
+#define PTP_PIN_CFG_ACTION_MASK PTP_PIN_CFG_ACTION(0x7)
+
+enum {
+ PTP_PIN_ACTION_IDLE = 0,
+ PTP_PIN_ACTION_LOAD,
+ PTP_PIN_ACTION_SAVE,
+ PTP_PIN_ACTION_CLOCK,
+ PTP_PIN_ACTION_DELTA,
+ PTP_PIN_ACTION_NOSYNC,
+ PTP_PIN_ACTION_SYNC,
+};
+
+#define PTP_CFG_MISC_PTP_EN BIT(2)
+
+#define PSEC_PER_SEC 1000000000000LL
+
+#define PTP_CFG_CLK_ADJ_CFG_ENA BIT(0)
+#define PTP_CFG_CLK_ADJ_CFG_DIR BIT(1)
+
+#define PTP_CFG_CLK_ADJ_FREQ_NS BIT(30)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index 6c387f994ec5..e59977d20400 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -234,6 +234,16 @@ static const u32 ocelot_s2_regmap[] = {
REG(S2_CACHE_TG_DAT, 0x000388),
};
+static const u32 ocelot_ptp_regmap[] = {
+ REG(PTP_PIN_CFG, 0x000000),
+ REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
+ REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
+ REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_CFG_MISC, 0x0000a0),
+ REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
+ REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
+};
+
static const u32 *ocelot_regmap[] = {
[ANA] = ocelot_ana_regmap,
[QS] = ocelot_qs_regmap,
@@ -241,6 +251,7 @@ static const u32 *ocelot_regmap[] = {
[REW] = ocelot_rew_regmap,
[SYS] = ocelot_sys_regmap,
[S2] = ocelot_s2_regmap,
+ [PTP] = ocelot_ptp_regmap,
};
static const struct reg_field ocelot_regfields[] = {
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 337b0cbfd153..c979f38a2e0c 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1286,7 +1286,7 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
{
u8 *va;
struct vlan_ethhdr *veh;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
__wsum vsum;
va = addr;
@@ -1306,8 +1306,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
skb->len -= VLAN_HLEN;
skb->data_len -= VLAN_HLEN;
frag = skb_shinfo(skb)->frags;
- frag->page_offset += VLAN_HLEN;
- skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN);
+ skb_frag_off_add(frag, VLAN_HLEN);
+ skb_frag_size_sub(frag, VLAN_HLEN);
}
}
@@ -1318,7 +1318,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
{
struct myri10ge_priv *mgp = ss->mgp;
struct sk_buff *skb;
- struct skb_frag_struct *rx_frags;
+ skb_frag_t *rx_frags;
struct myri10ge_rx_buf *rx;
int i, idx, remainder, bytes;
struct pci_dev *pdev = mgp->pdev;
@@ -1351,7 +1351,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
return 0;
}
rx_frags = skb_shinfo(skb)->frags;
- /* Fill skb_frag_struct(s) with data from our receive */
+ /* Fill skb_frag_t(s) with data from our receive */
for (i = 0, remainder = len; remainder > 0; i++) {
myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
skb_fill_page_desc(skb, i, rx->info[idx].page,
@@ -1364,8 +1364,8 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
}
/* remove padding */
- rx_frags[0].page_offset += MXGEFW_PAD;
- rx_frags[0].size -= MXGEFW_PAD;
+ skb_frag_off_add(&rx_frags[0], MXGEFW_PAD);
+ skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD);
len -= MXGEFW_PAD;
skb->len = len;
@@ -2628,7 +2628,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
struct myri10ge_slice_state *ss;
struct mcp_kreq_ether_send *req;
struct myri10ge_tx_buf *tx;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct netdev_queue *netdev_queue;
dma_addr_t bus;
u32 low;
@@ -3037,7 +3037,6 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
- int error = 0;
netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
if (mgp->running) {
@@ -3049,7 +3048,7 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
} else
dev->mtu = new_mtu;
- return error;
+ return 0;
}
/*
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 5a54fe848de4..1b019fdfcd97 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -2,10 +2,12 @@
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
+#include <linux/mpls.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_csum.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_mpls.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
@@ -25,6 +27,80 @@
NFP_FL_TUNNEL_KEY | \
NFP_FL_TUNNEL_GENEVE_OPT)
+static int
+nfp_fl_push_mpls(struct nfp_fl_push_mpls *push_mpls,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ size_t act_size = sizeof(struct nfp_fl_push_mpls);
+ u32 mpls_lse = 0;
+
+ push_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_MPLS;
+ push_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ /* BOS is optional in the TC action but required for offload. */
+ if (act->mpls_push.bos != ACT_MPLS_BOS_NOT_SET) {
+ mpls_lse |= act->mpls_push.bos << MPLS_LS_S_SHIFT;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: BOS field must explicitly be set for MPLS push");
+ return -EOPNOTSUPP;
+ }
+
+ /* Leave MPLS TC as a default value of 0 if not explicitly set. */
+ if (act->mpls_push.tc != ACT_MPLS_TC_NOT_SET)
+ mpls_lse |= act->mpls_push.tc << MPLS_LS_TC_SHIFT;
+
+ /* Proto, label and TTL are enforced and verified for MPLS push. */
+ mpls_lse |= act->mpls_push.label << MPLS_LS_LABEL_SHIFT;
+ mpls_lse |= act->mpls_push.ttl << MPLS_LS_TTL_SHIFT;
+ push_mpls->ethtype = act->mpls_push.proto;
+ push_mpls->lse = cpu_to_be32(mpls_lse);
+
+ return 0;
+}
+
+static void
+nfp_fl_pop_mpls(struct nfp_fl_pop_mpls *pop_mpls,
+ const struct flow_action_entry *act)
+{
+ size_t act_size = sizeof(struct nfp_fl_pop_mpls);
+
+ pop_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_POP_MPLS;
+ pop_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+ pop_mpls->ethtype = act->mpls_pop.proto;
+}
+
+static void
+nfp_fl_set_mpls(struct nfp_fl_set_mpls *set_mpls,
+ const struct flow_action_entry *act)
+{
+ size_t act_size = sizeof(struct nfp_fl_set_mpls);
+ u32 mpls_lse = 0, mpls_mask = 0;
+
+ set_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_SET_MPLS;
+ set_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ if (act->mpls_mangle.label != ACT_MPLS_LABEL_NOT_SET) {
+ mpls_lse |= act->mpls_mangle.label << MPLS_LS_LABEL_SHIFT;
+ mpls_mask |= MPLS_LS_LABEL_MASK;
+ }
+ if (act->mpls_mangle.tc != ACT_MPLS_TC_NOT_SET) {
+ mpls_lse |= act->mpls_mangle.tc << MPLS_LS_TC_SHIFT;
+ mpls_mask |= MPLS_LS_TC_MASK;
+ }
+ if (act->mpls_mangle.bos != ACT_MPLS_BOS_NOT_SET) {
+ mpls_lse |= act->mpls_mangle.bos << MPLS_LS_S_SHIFT;
+ mpls_mask |= MPLS_LS_S_MASK;
+ }
+ if (act->mpls_mangle.ttl) {
+ mpls_lse |= act->mpls_mangle.ttl << MPLS_LS_TTL_SHIFT;
+ mpls_mask |= MPLS_LS_TTL_MASK;
+ }
+
+ set_mpls->lse = cpu_to_be32(mpls_lse);
+ set_mpls->lse_mask = cpu_to_be32(mpls_mask);
+}
+
static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
{
size_t act_size = sizeof(struct nfp_fl_pop_vlan);
@@ -97,7 +173,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
struct nfp_fl_payload *nfp_flow,
bool last, struct net_device *in_dev,
enum nfp_flower_tun_type tun_type, int *tun_out_cnt,
- struct netlink_ext_ack *extack)
+ bool pkt_host, struct netlink_ext_ack *extack)
{
size_t act_size = sizeof(struct nfp_fl_output);
struct nfp_flower_priv *priv = app->priv;
@@ -142,6 +218,20 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
return gid;
}
output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
+ } else if (nfp_flower_internal_port_can_offload(app, out_dev)) {
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware");
+ return -EOPNOTSUPP;
+ }
+
+ if (nfp_flow->pre_tun_rule.dev || !pkt_host) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action");
+ return -EOPNOTSUPP;
+ }
+
+ nfp_flow->pre_tun_rule.dev = out_dev;
+
+ return 0;
} else {
/* Set action output parameters. */
output->flags = cpu_to_be16(tmp_flags);
@@ -809,7 +899,7 @@ nfp_flower_output_action(struct nfp_app *app,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt, u32 *csum_updated,
+ int *out_cnt, u32 *csum_updated, bool pkt_host,
struct netlink_ext_ack *extack)
{
struct nfp_flower_priv *priv = app->priv;
@@ -831,7 +921,7 @@ nfp_flower_output_action(struct nfp_app *app,
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
- tun_out_cnt, extack);
+ tun_out_cnt, pkt_host, extack);
if (err)
return err;
@@ -863,30 +953,37 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
int *out_cnt, u32 *csum_updated,
- struct nfp_flower_pedit_acts *set_act,
+ struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
struct netlink_ext_ack *extack, int act_idx)
{
struct nfp_fl_set_ipv4_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_push_vlan *psh_v;
+ struct nfp_fl_push_mpls *psh_m;
struct nfp_fl_pop_vlan *pop_v;
+ struct nfp_fl_pop_mpls *pop_m;
+ struct nfp_fl_set_mpls *set_m;
int err;
switch (act->id) {
case FLOW_ACTION_DROP:
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
break;
+ case FLOW_ACTION_REDIRECT_INGRESS:
case FLOW_ACTION_REDIRECT:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
true, tun_type, tun_out_cnt,
- out_cnt, csum_updated, extack);
+ out_cnt, csum_updated, *pkt_host,
+ extack);
if (err)
return err;
break;
+ case FLOW_ACTION_MIRRED_INGRESS:
case FLOW_ACTION_MIRRED:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
false, tun_type, tun_out_cnt,
- out_cnt, csum_updated, extack);
+ out_cnt, csum_updated, *pkt_host,
+ extack);
if (err)
return err;
break;
@@ -975,6 +1072,54 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
*/
*csum_updated &= ~act->csum_flags;
break;
+ case FLOW_ACTION_MPLS_PUSH:
+ if (*a_len +
+ sizeof(struct nfp_fl_push_mpls) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push MPLS");
+ return -EOPNOTSUPP;
+ }
+
+ psh_m = (struct nfp_fl_push_mpls *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ err = nfp_fl_push_mpls(psh_m, act, extack);
+ if (err)
+ return err;
+ *a_len += sizeof(struct nfp_fl_push_mpls);
+ break;
+ case FLOW_ACTION_MPLS_POP:
+ if (*a_len +
+ sizeof(struct nfp_fl_pop_mpls) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop MPLS");
+ return -EOPNOTSUPP;
+ }
+
+ pop_m = (struct nfp_fl_pop_mpls *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ nfp_fl_pop_mpls(pop_m, act);
+ *a_len += sizeof(struct nfp_fl_pop_mpls);
+ break;
+ case FLOW_ACTION_MPLS_MANGLE:
+ if (*a_len +
+ sizeof(struct nfp_fl_set_mpls) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at set MPLS");
+ return -EOPNOTSUPP;
+ }
+
+ set_m = (struct nfp_fl_set_mpls *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ nfp_fl_set_mpls(set_m, act);
+ *a_len += sizeof(struct nfp_fl_set_mpls);
+ break;
+ case FLOW_ACTION_PTYPE:
+ /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */
+ if (act->ptype != PACKET_HOST)
+ return -EOPNOTSUPP;
+
+ *pkt_host = true;
+ break;
default:
/* Currently we do not handle any other actions. */
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
@@ -1030,6 +1175,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
struct nfp_flower_pedit_acts set_act;
enum nfp_flower_tun_type tun_type;
struct flow_action_entry *act;
+ bool pkt_host = false;
u32 csum_updated = 0;
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
@@ -1046,7 +1192,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
&out_cnt, &csum_updated,
- &set_act, extack, i);
+ &set_act, &pkt_host, extack, i);
if (err)
return err;
act_cnt++;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 0f1706ae5bfc..7eb2ec8969c3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -68,8 +68,11 @@
#define NFP_FL_ACTION_OPCODE_OUTPUT 0
#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
+#define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3
+#define NFP_FL_ACTION_OPCODE_POP_MPLS 4
#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
+#define NFP_FL_ACTION_OPCODE_SET_MPLS 8
#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
#define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10
#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11
@@ -217,7 +220,8 @@ struct nfp_fl_set_ipv4_tun {
__be16 tun_flags;
u8 ttl;
u8 tos;
- __be32 extra;
+ __be16 outer_vlan_tpid;
+ __be16 outer_vlan_tci;
u8 tun_len;
u8 res2;
__be16 tun_proto;
@@ -232,6 +236,24 @@ struct nfp_fl_push_geneve {
u8 opt_data[];
};
+struct nfp_fl_push_mpls {
+ struct nfp_fl_act_head head;
+ __be16 ethtype;
+ __be32 lse;
+};
+
+struct nfp_fl_pop_mpls {
+ struct nfp_fl_act_head head;
+ __be16 ethtype;
+};
+
+struct nfp_fl_set_mpls {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be32 lse_mask;
+ __be32 lse;
+};
+
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
@@ -462,6 +484,7 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18,
NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
+ NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index eb846133943b..7a20447cca19 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -781,6 +781,7 @@ static int nfp_flower_init(struct nfp_app *app)
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
INIT_LIST_HEAD(&app_priv->non_repr_priv);
+ app_priv->pre_tun_rule_cnt = 0;
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index af9441d5787f..31d94592a7c0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -42,6 +42,7 @@ struct nfp_app;
#define NFP_FL_FEATS_VLAN_PCP BIT(3)
#define NFP_FL_FEATS_VF_RLIM BIT(4)
#define NFP_FL_FEATS_FLOW_MOD BIT(5)
+#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
#define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31)
@@ -162,6 +163,7 @@ struct nfp_fl_internal_ports {
* @qos_stats_work: Workqueue for qos stats processing
* @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates
+ * @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -193,6 +195,7 @@ struct nfp_flower_priv {
struct delayed_work qos_stats_work;
unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */
+ int pre_tun_rule_cnt;
};
/**
@@ -218,6 +221,7 @@ struct nfp_fl_qos {
* @block_shared: Flag indicating if offload applies to shared blocks
* @mac_list: List entry of reprs that share the same offloaded MAC
* @qos_table: Stored info on filters implementing qos
+ * @on_bridge: Indicates if the repr is attached to a bridge
*/
struct nfp_flower_repr_priv {
struct nfp_repr *nfp_repr;
@@ -227,6 +231,7 @@ struct nfp_flower_repr_priv {
bool block_shared;
struct list_head mac_list;
struct nfp_fl_qos qos_table;
+ bool on_bridge;
};
/**
@@ -280,6 +285,11 @@ struct nfp_fl_payload {
char *action_data;
struct list_head linked_flows;
bool in_hw;
+ struct {
+ struct net_device *dev;
+ __be16 vlan_tci;
+ __be16 port_idx;
+ } pre_tun_rule;
};
struct nfp_fl_payload_link {
@@ -333,6 +343,11 @@ static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay)
return flow_pay->tc_flower_cookie == (unsigned long)flow_pay;
}
+static inline bool nfp_flower_is_supported_bridge(struct net_device *netdev)
+{
+ return netif_is_ovs_master(netdev);
+}
+
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_ctx_split);
void nfp_flower_metadata_cleanup(struct nfp_app *app);
@@ -415,4 +430,8 @@ void
nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
struct net_device *netdev);
+int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow);
+int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 9917d64694c6..b0708460e342 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -61,6 +61,11 @@
NFP_FLOWER_LAYER_IPV4 | \
NFP_FLOWER_LAYER_IPV6)
+#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
+ (NFP_FLOWER_LAYER_PORT | \
+ NFP_FLOWER_LAYER_MAC | \
+ NFP_FLOWER_LAYER_IPV4)
+
struct nfp_flower_merge_check {
union {
struct {
@@ -489,6 +494,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
flow_pay->meta.flags = 0;
INIT_LIST_HEAD(&flow_pay->linked_flows);
flow_pay->in_hw = false;
+ flow_pay->pre_tun_rule.dev = NULL;
return flow_pay;
@@ -732,28 +738,62 @@ nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
return act_off;
}
-static int nfp_fl_verify_post_tun_acts(char *acts, int len)
+static int
+nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
{
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
while (act_off < len) {
a = (struct nfp_fl_act_head *)&acts[act_off];
- if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
+
+ if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
+ *vlan = (struct nfp_fl_push_vlan *)a;
+ else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
return -EOPNOTSUPP;
act_off += a->len_lw << NFP_FL_LW_SIZ;
}
+ /* Ensure any VLAN push also has an egress action. */
+ if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
+ return -EOPNOTSUPP;
+
return 0;
}
static int
+nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
+{
+ struct nfp_fl_set_ipv4_tun *tun;
+ struct nfp_fl_act_head *a;
+ unsigned int act_off = 0;
+
+ while (act_off < len) {
+ a = (struct nfp_fl_act_head *)&acts[act_off];
+
+ if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) {
+ tun = (struct nfp_fl_set_ipv4_tun *)a;
+ tun->outer_vlan_tpid = vlan->vlan_tpid;
+ tun->outer_vlan_tci = vlan->vlan_tci;
+
+ return 0;
+ }
+
+ act_off += a->len_lw << NFP_FL_LW_SIZ;
+ }
+
+ /* Return error if no tunnel action is found. */
+ return -EOPNOTSUPP;
+}
+
+static int
nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2,
struct nfp_fl_payload *merge_flow)
{
unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
+ struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
bool tunnel_act = false;
char *merge_act;
int err;
@@ -790,18 +830,36 @@ nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
sub2_act_len -= pre_off2;
/* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
- * a tunnel, sub_flow 2 can only have output actions for a valid merge.
+ * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
+ * valid merge.
*/
if (tunnel_act) {
char *post_tun_acts = &sub_flow2->action_data[pre_off2];
- err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len);
+ err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
+ &post_tun_push_vlan);
if (err)
return err;
+
+ if (post_tun_push_vlan) {
+ pre_off2 += sizeof(*post_tun_push_vlan);
+ sub2_act_len -= sizeof(*post_tun_push_vlan);
+ }
}
/* Copy remaining actions from sub_flows 1 and 2. */
memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
+
+ if (post_tun_push_vlan) {
+ /* Update tunnel action in merge to include VLAN push. */
+ err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
+ post_tun_push_vlan);
+ if (err)
+ return err;
+
+ merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
+ }
+
merge_act += sub1_act_len;
memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
@@ -945,6 +1003,106 @@ err_destroy_merge_flow:
}
/**
+ * nfp_flower_validate_pre_tun_rule()
+ * @app: Pointer to the APP handle
+ * @flow: Pointer to NFP flow representation of rule
+ * @extack: Netlink extended ACK report
+ *
+ * Verifies the flow as a pre-tunnel rule.
+ *
+ * Return: negative value on error, 0 if verified.
+ */
+static int
+nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
+ struct nfp_fl_payload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_flower_meta_tci *meta_tci;
+ struct nfp_flower_mac_mpls *mac;
+ struct nfp_fl_act_head *act;
+ u8 *mask = flow->mask_data;
+ bool vlan = false;
+ int act_offset;
+ u8 key_layer;
+
+ meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
+ if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
+ u16 vlan_tci = be16_to_cpu(meta_tci->tci);
+
+ vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
+ vlan = true;
+ } else {
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ }
+
+ key_layer = meta_tci->nfp_flow_key_layer;
+ if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
+ return -EOPNOTSUPP;
+ }
+
+ if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required");
+ return -EOPNOTSUPP;
+ }
+
+ /* Skip fields known to exist. */
+ mask += sizeof(struct nfp_flower_meta_tci);
+ mask += sizeof(struct nfp_flower_in_port);
+
+ /* Ensure destination MAC address is fully matched. */
+ mac = (struct nfp_flower_mac_mpls *)mask;
+ if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked");
+ return -EOPNOTSUPP;
+ }
+
+ if (key_layer & NFP_FLOWER_LAYER_IPV4) {
+ int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
+ int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
+ int i;
+
+ mask += sizeof(struct nfp_flower_mac_mpls);
+
+ /* Ensure proto and flags are the only IP layer fields. */
+ for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++)
+ if (mask[i] && i != ip_flags && i != ip_proto) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ /* Action must be a single egress or pop_vlan and egress. */
+ act_offset = 0;
+ act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
+ if (vlan) {
+ if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
+ return -EOPNOTSUPP;
+ }
+
+ act_offset += act->len_lw << NFP_FL_LW_SIZ;
+ act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
+ }
+
+ if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
+ return -EOPNOTSUPP;
+ }
+
+ act_offset += act->len_lw << NFP_FL_LW_SIZ;
+
+ /* Ensure there are no more actions after egress. */
+ if (act_offset != flow->meta.act_len) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
* nfp_flower_add_offload() - Adds a new flow to hardware.
* @app: Pointer to the APP handle
* @netdev: netdev structure.
@@ -994,6 +1152,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
+ if (flow_pay->pre_tun_rule.dev) {
+ err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack);
+ if (err)
+ goto err_destroy_flow;
+ }
+
err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
if (err)
goto err_destroy_flow;
@@ -1006,8 +1170,11 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_release_metadata;
}
- err = nfp_flower_xmit_flow(app, flow_pay,
- NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
+ if (flow_pay->pre_tun_rule.dev)
+ err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
+ else
+ err = nfp_flower_xmit_flow(app, flow_pay,
+ NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
if (err)
goto err_remove_rhash;
@@ -1149,8 +1316,11 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_merge_flow;
}
- err = nfp_flower_xmit_flow(app, nfp_flow,
- NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
+ if (nfp_flow->pre_tun_rule.dev)
+ err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
+ else
+ err = nfp_flower_xmit_flow(app, nfp_flow,
+ NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
/* Fall through on error. */
err_free_merge_flow:
@@ -1486,16 +1656,17 @@ int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
return NOTIFY_OK;
if (event == NETDEV_REGISTER) {
- err = __tc_indr_block_cb_register(netdev, app,
- nfp_flower_indr_setup_tc_cb,
- app);
+ err = __flow_indr_block_cb_register(netdev, app,
+ nfp_flower_indr_setup_tc_cb,
+ app);
if (err)
nfp_flower_cmsg_warn(app,
"Indirect block reg failed - %s\n",
netdev->name);
} else if (event == NETDEV_UNREGISTER) {
- __tc_indr_block_cb_unregister(netdev,
- nfp_flower_indr_setup_tc_cb, app);
+ __flow_indr_block_cb_unregister(netdev,
+ nfp_flower_indr_setup_tc_cb,
+ app);
}
return NOTIFY_OK;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index a7a80f4b722a..def8c198b016 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -15,6 +15,24 @@
#define NFP_FL_MAX_ROUTES 32
+#define NFP_TUN_PRE_TUN_RULE_LIMIT 32
+#define NFP_TUN_PRE_TUN_RULE_DEL 0x1
+#define NFP_TUN_PRE_TUN_IDX_BIT 0x8
+
+/**
+ * struct nfp_tun_pre_run_rule - rule matched before decap
+ * @flags: options for the rule offset
+ * @port_idx: index of destination MAC address for the rule
+ * @vlan_tci: VLAN info associated with MAC
+ * @host_ctx_id: stats context of rule to update
+ */
+struct nfp_tun_pre_tun_rule {
+ __be32 flags;
+ __be16 port_idx;
+ __be16 vlan_tci;
+ __be32 host_ctx_id;
+};
+
/**
* struct nfp_tun_active_tuns - periodic message of active tunnels
* @seq: sequence number of the message
@@ -124,11 +142,12 @@ enum nfp_flower_mac_offload_cmd {
/**
* struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
- * @ht_node: Hashtable entry
- * @addr: Offloaded MAC address
- * @index: Offloaded index for given MAC address
- * @ref_count: Number of devs using this MAC address
- * @repr_list: List of reprs sharing this MAC address
+ * @ht_node: Hashtable entry
+ * @addr: Offloaded MAC address
+ * @index: Offloaded index for given MAC address
+ * @ref_count: Number of devs using this MAC address
+ * @repr_list: List of reprs sharing this MAC address
+ * @bridge_count: Number of bridge/internal devs with MAC
*/
struct nfp_tun_offloaded_mac {
struct rhash_head ht_node;
@@ -136,6 +155,7 @@ struct nfp_tun_offloaded_mac {
u16 index;
int ref_count;
struct list_head repr_list;
+ int bridge_count;
};
static const struct rhashtable_params offloaded_macs_params = {
@@ -556,6 +576,8 @@ nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
list_del(&repr_priv->mac_list);
list_add_tail(&repr_priv->mac_list, &entry->repr_list);
+ } else if (nfp_flower_is_supported_bridge(netdev)) {
+ entry->bridge_count++;
}
entry->ref_count++;
@@ -572,20 +594,35 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
- nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
- return 0;
+ if (entry->bridge_count ||
+ !nfp_flower_is_supported_bridge(netdev)) {
+ nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
+ netdev, mod);
+ return 0;
+ }
+
+ /* MAC is global but matches need to go to pre_tun table. */
+ nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT;
}
- /* Assign a global index if non-repr or MAC address is now shared. */
- if (entry || !port) {
- ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
- NFP_MAX_MAC_INDEX, GFP_KERNEL);
- if (ida_idx < 0)
- return ida_idx;
+ if (!nfp_mac_idx) {
+ /* Assign a global index if non-repr or MAC is now shared. */
+ if (entry || !port) {
+ ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
+ NFP_MAX_MAC_INDEX, GFP_KERNEL);
+ if (ida_idx < 0)
+ return ida_idx;
- nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
- } else {
- nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
+ nfp_mac_idx =
+ nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
+
+ if (nfp_flower_is_supported_bridge(netdev))
+ nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT;
+
+ } else {
+ nfp_mac_idx =
+ nfp_tunnel_get_mac_idx_from_phy_port_id(port);
+ }
}
if (!entry) {
@@ -654,6 +691,25 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
list_del(&repr_priv->mac_list);
}
+ if (nfp_flower_is_supported_bridge(netdev)) {
+ entry->bridge_count--;
+
+ if (!entry->bridge_count && entry->ref_count) {
+ u16 nfp_mac_idx;
+
+ nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
+ if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
+ false)) {
+ nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
+ netdev_name(netdev));
+ return 0;
+ }
+
+ entry->index = nfp_mac_idx;
+ return 0;
+ }
+ }
+
/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
u16 nfp_mac_idx;
@@ -713,6 +769,9 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
return 0;
repr_priv = repr->app_priv;
+ if (repr_priv->on_bridge)
+ return 0;
+
mac_offloaded = &repr_priv->mac_offloaded;
off_mac = &repr_priv->offloaded_mac_addr[0];
port = nfp_repr_get_port_id(netdev);
@@ -828,10 +887,119 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
if (err)
nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
netdev_name(netdev));
+ } else if (event == NETDEV_CHANGEUPPER) {
+ /* If a repr is attached to a bridge then tunnel packets
+ * entering the physical port are directed through the bridge
+ * datapath and cannot be directly detunneled. Therefore,
+ * associated offloaded MACs and indexes should not be used
+ * by fw for detunneling.
+ */
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct net_device *upper = info->upper_dev;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_repr *repr;
+
+ if (!nfp_netdev_is_nfp_repr(netdev) ||
+ !nfp_flower_is_supported_bridge(upper))
+ return NOTIFY_OK;
+
+ repr = netdev_priv(netdev);
+ if (repr->app != app)
+ return NOTIFY_OK;
+
+ repr_priv = repr->app_priv;
+
+ if (info->linking) {
+ if (nfp_tunnel_offload_mac(app, netdev,
+ NFP_TUNNEL_MAC_OFFLOAD_DEL))
+ nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n",
+ netdev_name(netdev));
+ repr_priv->on_bridge = true;
+ } else {
+ repr_priv->on_bridge = false;
+
+ if (!(netdev->flags & IFF_UP))
+ return NOTIFY_OK;
+
+ if (nfp_tunnel_offload_mac(app, netdev,
+ NFP_TUNNEL_MAC_OFFLOAD_ADD))
+ nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
+ netdev_name(netdev));
+ }
}
return NOTIFY_OK;
}
+int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow)
+{
+ struct nfp_flower_priv *app_priv = app->priv;
+ struct nfp_tun_offloaded_mac *mac_entry;
+ struct nfp_tun_pre_tun_rule payload;
+ struct net_device *internal_dev;
+ int err;
+
+ if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT)
+ return -ENOSPC;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
+
+ internal_dev = flow->pre_tun_rule.dev;
+ payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
+ payload.host_ctx_id = flow->meta.host_ctx_id;
+
+ /* Lookup MAC index for the pre-tunnel rule egress device.
+ * Note that because the device is always an internal port, it will
+ * have a constant global index so does not need to be tracked.
+ */
+ mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
+ internal_dev->dev_addr);
+ if (!mac_entry)
+ return -ENOENT;
+
+ payload.port_idx = cpu_to_be16(mac_entry->index);
+
+ /* Copy mac id and vlan to flow - dev may not exist at delete time. */
+ flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
+ flow->pre_tun_rule.port_idx = payload.port_idx;
+
+ err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
+ sizeof(struct nfp_tun_pre_tun_rule),
+ (unsigned char *)&payload, GFP_KERNEL);
+ if (err)
+ return err;
+
+ app_priv->pre_tun_rule_cnt++;
+
+ return 0;
+}
+
+int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow)
+{
+ struct nfp_flower_priv *app_priv = app->priv;
+ struct nfp_tun_pre_tun_rule payload;
+ u32 tmp_flags = 0;
+ int err;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
+
+ tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL;
+ payload.flags = cpu_to_be32(tmp_flags);
+ payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
+ payload.port_idx = flow->pre_tun_rule.port_idx;
+
+ err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
+ sizeof(struct nfp_tun_pre_tun_rule),
+ (unsigned char *)&payload, GFP_KERNEL);
+ if (err)
+ return err;
+
+ app_priv->pre_tun_rule_cnt--;
+
+ return 0;
+}
+
int nfp_tunnel_config_start(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 60e57f08de80..81679647e842 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -815,6 +815,8 @@ static void __exit nfp_main_exit(void)
module_init(nfp_main_init);
module_exit(nfp_main_exit);
+MODULE_FIRMWARE("netronome/nic_AMDA0058-0011_2x40.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0058-0012_2x40.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_1x40.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_4x10.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0096-0001_2x10.nffw");
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9903805717da..6f97b554f7da 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -975,7 +975,7 @@ static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
int f, nr_frags, wr_idx, md_bytes;
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_r_vector *r_vec;
@@ -1155,7 +1155,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
while (todo--) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
struct nfp_net_tx_buf *tx_buf;
struct sk_buff *skb;
int fidx, nr_frags;
@@ -1270,7 +1270,7 @@ static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
static void
nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
struct netdev_queue *nd_q;
while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index ab7f2498e1c4..553c708694e8 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -159,19 +159,13 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
else
strcpy(name, "ctrl-vnic");
nn->debugfs_dir = debugfs_create_dir(name, ddir);
- if (IS_ERR_OR_NULL(nn->debugfs_dir))
- return;
/* Create queue debugging sub-tree */
queues = debugfs_create_dir("queue", nn->debugfs_dir);
- if (IS_ERR_OR_NULL(queues))
- return;
rx = debugfs_create_dir("rx", queues);
tx = debugfs_create_dir("tx", queues);
xdp = debugfs_create_dir("xdp", queues);
- if (IS_ERR_OR_NULL(rx) || IS_ERR_OR_NULL(tx) || IS_ERR_OR_NULL(xdp))
- return;
for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) {
sprintf(name, "%d", i);
@@ -190,16 +184,7 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
{
- struct dentry *dev_dir;
-
- if (IS_ERR_OR_NULL(nfp_dir))
- return NULL;
-
- dev_dir = debugfs_create_dir(pci_name(pdev), nfp_dir);
- if (IS_ERR_OR_NULL(dev_dir))
- return NULL;
-
- return dev_dir;
+ return debugfs_create_dir(pci_name(pdev), nfp_dir);
}
void nfp_net_debugfs_dir_clean(struct dentry **dir)
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 3d73970b3a2e..219b0b863c89 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -993,14 +993,12 @@ static int w90p910_ether_probe(struct platform_device *pdev)
ether->txirq = platform_get_irq(pdev, 0);
if (ether->txirq < 0) {
- dev_err(&pdev->dev, "failed to get ether tx irq\n");
error = -ENXIO;
goto failed_free_io;
}
ether->rxirq = platform_get_irq(pdev, 1);
if (ether->rxirq < 0) {
- dev_err(&pdev->dev, "failed to get ether rx irq\n");
error = -ENXIO;
goto failed_free_io;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index b327b29f5d57..ecca794c55e2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -6126,8 +6126,7 @@ static void nv_remove(struct pci_dev *pci_dev)
#ifdef CONFIG_PM_SLEEP
static int nv_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int i;
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 6f8d6584f809..5113ee647090 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1258,8 +1258,7 @@ static int yellowfin_close(struct net_device *dev)
yp->rx_skbuff[i] = NULL;
}
for (i = 0; i < TX_RING_SIZE; i++) {
- if (yp->tx_skbuff[i])
- dev_kfree_skb(yp->tx_skbuff[i]);
+ dev_kfree_skb(yp->tx_skbuff[i]);
yp->tx_skbuff[i] = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index a391cf6ee4b2..55a29ec76680 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -66,15 +66,6 @@ config QLCNIC_HWMON
This data is available via the hwmon sysfs interface.
-config QLGE
- tristate "QLogic QLGE 10Gb Ethernet Driver Support"
- depends on PCI
- ---help---
- This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
-
- To compile this driver as a module, choose M here: the module
- will be called qlge.
-
config NETXEN_NIC
tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
depends on PCI
diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile
index 6cd2e333a5fc..1ae4a0743bd5 100644
--- a/drivers/net/ethernet/qlogic/Makefile
+++ b/drivers/net/ethernet/qlogic/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_QLA3XXX) += qla3xxx.o
obj-$(CONFIG_QLCNIC) += qlcnic/
-obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_NETXEN_NIC) += netxen/
obj-$(CONFIG_QED) += qed/
obj-$(CONFIG_QEDE)+= qede/
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 58e2eaf77014..c692a41e4548 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1980,7 +1980,7 @@ netxen_map_tx_skb(struct pci_dev *pdev,
struct sk_buff *skb, struct netxen_cmd_buffer *pbuf)
{
struct netxen_skb_frag *nf;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int i, nr_frags;
dma_addr_t map;
@@ -2043,7 +2043,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct pci_dev *pdev;
int i, k;
int delta = 0;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
u32 producer;
int frag_count;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index e054f6c69e3a..557a12ef9815 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12580,6 +12580,8 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000
+#define DRV_MSG_CODE_GET_NVM_CFG_OPTION 0x003e0000
+#define DRV_MSG_CODE_SET_NVM_CFG_OPTION 0x003f0000
#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
@@ -12748,6 +12750,21 @@ struct public_drv_mb {
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000FFFF
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000
+
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
#define FW_MSG_CODE_UNSUPPORTED 0x00000000
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 9f36e7948222..1a5fc2ae351c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1631,10 +1631,9 @@ static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
}
}
-static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack void
+__qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct eth_pstorm_per_queue_stat pstats;
u32 pstats_addr = 0, pstats_len = 0;
@@ -1661,10 +1660,9 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(pstats.error_drop_pkts);
}
-static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack void
+__qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct tstorm_per_port_stat tstats;
u32 tstats_addr, tstats_len;
@@ -1709,10 +1707,9 @@ static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
}
}
-static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack
+void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct eth_ustorm_per_queue_stat ustats;
u32 ustats_addr = 0, ustats_len = 0;
@@ -1751,10 +1748,9 @@ static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
}
}
-static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack void
+__qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct eth_mstorm_per_queue_stat mstats;
u32 mstats_addr = 0, mstats_len = 0;
@@ -1780,9 +1776,9 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
-static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats)
+static noinline_for_stack void
+__qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats)
{
struct qed_eth_stats_common *p_common = &p_stats->common;
struct port_stats port_stats;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 1efff7f68ef6..7891f8c5a1bc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -67,6 +67,8 @@
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
#define QED_RDMA_SRQS QED_ROCE_QPS
+#define QED_NVM_CFG_SET_FLAGS 0xE
+#define QED_NVM_CFG_SET_PF_FLAGS 0x1E
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -1690,6 +1692,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
switch (media_type) {
case MEDIA_DA_TWINAX:
+ *if_capability |= QED_LM_FIBRE_BIT;
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
*if_capability |= QED_LM_20000baseKR2_Full_BIT;
/* For DAC media multiple speed capabilities are supported*/
@@ -1709,6 +1712,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
*if_capability |= QED_LM_100000baseCR4_Full_BIT;
break;
case MEDIA_BASE_T:
+ *if_capability |= QED_LM_TP_BIT;
if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
if (capability &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
@@ -1720,6 +1724,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
}
}
if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
+ *if_capability |= QED_LM_FIBRE_BIT;
if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET)
*if_capability |= QED_LM_1000baseT_Full_BIT;
if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET)
@@ -1730,6 +1735,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
case MEDIA_SFPP_10G_FIBER:
case MEDIA_XFP_FIBER:
case MEDIA_MODULE_FIBER:
+ *if_capability |= QED_LM_FIBRE_BIT;
if (capability &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) ||
@@ -1772,6 +1778,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
break;
case MEDIA_KR:
+ *if_capability |= QED_LM_Backplane_BIT;
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
*if_capability |= QED_LM_20000baseKR2_Full_BIT;
if (capability &
@@ -1823,7 +1830,6 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->link_up = true;
/* TODO - at the moment assume supported and advertised speed equal */
- if_link->supported_caps = QED_LM_FIBRE_BIT;
if (link_caps.default_speed_autoneg)
if_link->supported_caps |= QED_LM_Autoneg_BIT;
if (params.pause.autoneg ||
@@ -2229,6 +2235,69 @@ static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
return 0;
}
+/* Binary file format -
+ * /----------------------------------------------------------------------\
+ * 0B | 0x5 [command index] |
+ * 4B | Entity ID | Reserved | Number of config attributes |
+ * 8B | Config ID | Length | Value |
+ * | |
+ * \----------------------------------------------------------------------/
+ * There can be several cfg_id-Length-Value sets as specified by 'Number of...'.
+ * Entity ID - A non zero entity value for which the config need to be updated.
+ *
+ * The API parses config attributes from the user provided buffer and flashes
+ * them to the respective NVM path using Management FW inerface.
+ */
+static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ u8 entity_id, len, buf[32];
+ struct qed_ptt *ptt;
+ u16 cfg_id, count;
+ int rc = 0, i;
+ u32 flags;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ /* NVM CFG ID attribute header */
+ *data += 4;
+ entity_id = **data;
+ *data += 2;
+ count = *((u16 *)*data);
+ *data += 2;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Read config ids: entity id %02x num _attrs = %0d\n",
+ entity_id, count);
+ /* NVM CFG ID attributes */
+ for (i = 0; i < count; i++) {
+ cfg_id = *((u16 *)*data);
+ *data += 2;
+ len = **data;
+ (*data)++;
+ memcpy(buf, *data, len);
+ *data += len;
+
+ flags = entity_id ? QED_NVM_CFG_SET_PF_FLAGS :
+ QED_NVM_CFG_SET_FLAGS;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "cfg_id = %d len = %d\n", cfg_id, len);
+ rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags,
+ buf, len);
+ if (rc) {
+ DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id);
+ break;
+ }
+ }
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
{
const struct firmware *image;
@@ -2270,6 +2339,9 @@ static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
rc = qed_nvm_flash_image_access(cdev, &data,
&check_resp);
break;
+ case QED_NVM_FLASH_CMD_NVM_CFG_ID:
+ rc = qed_nvm_flash_cfg_write(cdev, &data);
+ break;
default:
DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
rc = -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 758702c1ce9c..89462c4a5022 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -3750,3 +3750,35 @@ int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return 0;
}
+
+int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 len)
+{
+ u32 mb_param = 0, resp, param;
+
+ QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
+ if (flags & QED_NVM_CFG_OPTION_ALL)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1);
+ if (flags & QED_NVM_CFG_OPTION_INIT)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
+ if (flags & QED_NVM_CFG_OPTION_COMMIT)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1);
+ if (flags & QED_NVM_CFG_OPTION_FREE)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
+ if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
+ entity_id);
+ }
+
+ return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_SET_NVM_CFG_OPTION,
+ mb_param, &resp, &param, len, (u32 *)p_buf);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index e4f8fe4bd062..83649a82977b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -251,6 +251,12 @@ union qed_mfw_tlv_data {
struct qed_mfw_tlv_iscsi iscsi;
};
+#define QED_NVM_CFG_OPTION_ALL BIT(0)
+#define QED_NVM_CFG_OPTION_INIT BIT(1)
+#define QED_NVM_CFG_OPTION_COMMIT BIT(2)
+#define QED_NVM_CFG_OPTION_FREE BIT(3)
+#define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4)
+
/**
* @brief - returns the link params of the hw function
*
@@ -1202,4 +1208,18 @@ int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
*/
int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+/**
+ * @brief Set NVM config attribute value.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param option_id
+ * @param entity_id
+ * @param flags
+ * @param p_buf
+ * @param len
+ */
+int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 len);
#endif
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index e85f9fef930c..abcee474909a 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -424,12 +424,13 @@ struct qede_link_mode_mapping {
};
static const struct qede_link_mode_mapping qed_lm_map[] = {
+ {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT},
{QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT},
{QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT},
{QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT},
{QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
{QED_LM_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT},
- {QED_LM_2500baseX_Full_BIT, ETHTOOL_LINK_MODE_2500baseX_Full_BIT},
+ {QED_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT},
{QED_LM_Backplane_BIT, ETHTOOL_LINK_MODE_Backplane_BIT},
{QED_LM_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
{QED_LM_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT},
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 14f26bf3b388..ac61f614de37 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -581,7 +581,7 @@ static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
struct qlcnic_cmd_buffer *pbuf)
{
struct qlcnic_skb_frag *nf;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int i, nr_frags;
dma_addr_t map;
diff --git a/drivers/net/ethernet/qlogic/qlge/Makefile b/drivers/net/ethernet/qlogic/qlge/Makefile
deleted file mode 100644
index 1dc2568e820c..000000000000
--- a/drivers/net/ethernet/qlogic/qlge/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the Qlogic 10GbE PCI Express ethernet driver
-#
-
-obj-$(CONFIG_QLGE) += qlge.o
-
-qlge-objs := qlge_main.o qlge_dbg.o qlge_mpi.o qlge_ethtool.o
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
deleted file mode 100644
index ad7c5eb8a3b6..000000000000
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ /dev/null
@@ -1,2353 +0,0 @@
-/*
- * QLogic QLA41xx NIC HBA Driver
- * Copyright (c) 2003-2006 QLogic Corporation
- *
- * See LICENSE.qlge for copyright and licensing details.
- */
-#ifndef _QLGE_H_
-#define _QLGE_H_
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/rtnetlink.h>
-#include <linux/if_vlan.h>
-
-/*
- * General definitions...
- */
-#define DRV_NAME "qlge"
-#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "1.00.00.35"
-
-#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
-
-#define QLGE_VENDOR_ID 0x1077
-#define QLGE_DEVICE_ID_8012 0x8012
-#define QLGE_DEVICE_ID_8000 0x8000
-#define QLGE_MEZZ_SSYS_ID_068 0x0068
-#define QLGE_MEZZ_SSYS_ID_180 0x0180
-#define MAX_CPUS 8
-#define MAX_TX_RINGS MAX_CPUS
-#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
-
-#define NUM_TX_RING_ENTRIES 256
-#define NUM_RX_RING_ENTRIES 256
-
-#define NUM_SMALL_BUFFERS 512
-#define NUM_LARGE_BUFFERS 512
-#define DB_PAGE_SIZE 4096
-
-/* Calculate the number of (4k) pages required to
- * contain a buffer queue of the given length.
- */
-#define MAX_DB_PAGES_PER_BQ(x) \
- (((x * sizeof(u64)) / DB_PAGE_SIZE) + \
- (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
-
-#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
-#define LARGE_BUFFER_MAX_SIZE 8192
-#define LARGE_BUFFER_MIN_SIZE 2048
-
-#define MAX_CQ 128
-#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
-#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
-#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
-#define UDELAY_COUNT 3
-#define UDELAY_DELAY 100
-
-
-#define TX_DESC_PER_IOCB 8
-
-#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
-#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
-#else /* all other page sizes */
-#define TX_DESC_PER_OAL 0
-#endif
-
-/* Word shifting for converting 64-bit
- * address to a series of 16-bit words.
- * This is used for some MPI firmware
- * mailbox commands.
- */
-#define LSW(x) ((u16)(x))
-#define MSW(x) ((u16)((u32)(x) >> 16))
-#define LSD(x) ((u32)((u64)(x)))
-#define MSD(x) ((u32)((((u64)(x)) >> 32)))
-
-/* MPI test register definitions. This register
- * is used for determining alternate NIC function's
- * PCI->func number.
- */
-enum {
- MPI_TEST_FUNC_PORT_CFG = 0x1002,
- MPI_TEST_FUNC_PRB_CTL = 0x100e,
- MPI_TEST_FUNC_PRB_EN = 0x18a20000,
- MPI_TEST_FUNC_RST_STS = 0x100a,
- MPI_TEST_FUNC_RST_FRC = 0x00000003,
- MPI_TEST_NIC_FUNC_MASK = 0x00000007,
- MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
- MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
- MPI_TEST_NIC1_FUNC_SHIFT = 1,
- MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
- MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
- MPI_TEST_NIC2_FUNC_SHIFT = 5,
- MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
- MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00,
- MPI_TEST_FC1_FUNCTION_SHIFT = 9,
- MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
- MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
- MPI_TEST_FC2_FUNCTION_SHIFT = 13,
-
- MPI_NIC_READ = 0x00000000,
- MPI_NIC_REG_BLOCK = 0x00020000,
- MPI_NIC_FUNCTION_SHIFT = 6,
-};
-
-/*
- * Processor Address Register (PROC_ADDR) bit definitions.
- */
-enum {
-
- /* Misc. stuff */
- MAILBOX_COUNT = 16,
- MAILBOX_TIMEOUT = 5,
-
- PROC_ADDR_RDY = (1 << 31),
- PROC_ADDR_R = (1 << 30),
- PROC_ADDR_ERR = (1 << 29),
- PROC_ADDR_DA = (1 << 28),
- PROC_ADDR_FUNC0_MBI = 0x00001180,
- PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT),
- PROC_ADDR_FUNC0_CTL = 0x000011a1,
- PROC_ADDR_FUNC2_MBI = 0x00001280,
- PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT),
- PROC_ADDR_FUNC2_CTL = 0x000012a1,
- PROC_ADDR_MPI_RISC = 0x00000000,
- PROC_ADDR_MDE = 0x00010000,
- PROC_ADDR_REGBLOCK = 0x00020000,
- PROC_ADDR_RISC_REG = 0x00030000,
-};
-
-/*
- * System Register (SYS) bit definitions.
- */
-enum {
- SYS_EFE = (1 << 0),
- SYS_FAE = (1 << 1),
- SYS_MDC = (1 << 2),
- SYS_DST = (1 << 3),
- SYS_DWC = (1 << 4),
- SYS_EVW = (1 << 5),
- SYS_OMP_DLY_MASK = 0x3f000000,
- /*
- * There are no values defined as of edit #15.
- */
- SYS_ODI = (1 << 14),
-};
-
-/*
- * Reset/Failover Register (RST_FO) bit definitions.
- */
-enum {
- RST_FO_TFO = (1 << 0),
- RST_FO_RR_MASK = 0x00060000,
- RST_FO_RR_CQ_CAM = 0x00000000,
- RST_FO_RR_DROP = 0x00000002,
- RST_FO_RR_DQ = 0x00000004,
- RST_FO_RR_RCV_FUNC_CQ = 0x00000006,
- RST_FO_FRB = (1 << 12),
- RST_FO_MOP = (1 << 13),
- RST_FO_REG = (1 << 14),
- RST_FO_FR = (1 << 15),
-};
-
-/*
- * Function Specific Control Register (FSC) bit definitions.
- */
-enum {
- FSC_DBRST_MASK = 0x00070000,
- FSC_DBRST_256 = 0x00000000,
- FSC_DBRST_512 = 0x00000001,
- FSC_DBRST_768 = 0x00000002,
- FSC_DBRST_1024 = 0x00000003,
- FSC_DBL_MASK = 0x00180000,
- FSC_DBL_DBRST = 0x00000000,
- FSC_DBL_MAX_PLD = 0x00000008,
- FSC_DBL_MAX_BRST = 0x00000010,
- FSC_DBL_128_BYTES = 0x00000018,
- FSC_EC = (1 << 5),
- FSC_EPC_MASK = 0x00c00000,
- FSC_EPC_INBOUND = (1 << 6),
- FSC_EPC_OUTBOUND = (1 << 7),
- FSC_VM_PAGESIZE_MASK = 0x07000000,
- FSC_VM_PAGE_2K = 0x00000100,
- FSC_VM_PAGE_4K = 0x00000200,
- FSC_VM_PAGE_8K = 0x00000300,
- FSC_VM_PAGE_64K = 0x00000600,
- FSC_SH = (1 << 11),
- FSC_DSB = (1 << 12),
- FSC_STE = (1 << 13),
- FSC_FE = (1 << 15),
-};
-
-/*
- * Host Command Status Register (CSR) bit definitions.
- */
-enum {
- CSR_ERR_STS_MASK = 0x0000003f,
- /*
- * There are no valued defined as of edit #15.
- */
- CSR_RR = (1 << 8),
- CSR_HRI = (1 << 9),
- CSR_RP = (1 << 10),
- CSR_CMD_PARM_SHIFT = 22,
- CSR_CMD_NOP = 0x00000000,
- CSR_CMD_SET_RST = 0x10000000,
- CSR_CMD_CLR_RST = 0x20000000,
- CSR_CMD_SET_PAUSE = 0x30000000,
- CSR_CMD_CLR_PAUSE = 0x40000000,
- CSR_CMD_SET_H2R_INT = 0x50000000,
- CSR_CMD_CLR_H2R_INT = 0x60000000,
- CSR_CMD_PAR_EN = 0x70000000,
- CSR_CMD_SET_BAD_PAR = 0x80000000,
- CSR_CMD_CLR_BAD_PAR = 0x90000000,
- CSR_CMD_CLR_R2PCI_INT = 0xa0000000,
-};
-
-/*
- * Configuration Register (CFG) bit definitions.
- */
-enum {
- CFG_LRQ = (1 << 0),
- CFG_DRQ = (1 << 1),
- CFG_LR = (1 << 2),
- CFG_DR = (1 << 3),
- CFG_LE = (1 << 5),
- CFG_LCQ = (1 << 6),
- CFG_DCQ = (1 << 7),
- CFG_Q_SHIFT = 8,
- CFG_Q_MASK = 0x7f000000,
-};
-
-/*
- * Status Register (STS) bit definitions.
- */
-enum {
- STS_FE = (1 << 0),
- STS_PI = (1 << 1),
- STS_PL0 = (1 << 2),
- STS_PL1 = (1 << 3),
- STS_PI0 = (1 << 4),
- STS_PI1 = (1 << 5),
- STS_FUNC_ID_MASK = 0x000000c0,
- STS_FUNC_ID_SHIFT = 6,
- STS_F0E = (1 << 8),
- STS_F1E = (1 << 9),
- STS_F2E = (1 << 10),
- STS_F3E = (1 << 11),
- STS_NFE = (1 << 12),
-};
-
-/*
- * Interrupt Enable Register (INTR_EN) bit definitions.
- */
-enum {
- INTR_EN_INTR_MASK = 0x007f0000,
- INTR_EN_TYPE_MASK = 0x03000000,
- INTR_EN_TYPE_ENABLE = 0x00000100,
- INTR_EN_TYPE_DISABLE = 0x00000200,
- INTR_EN_TYPE_READ = 0x00000300,
- INTR_EN_IHD = (1 << 13),
- INTR_EN_IHD_MASK = (INTR_EN_IHD << 16),
- INTR_EN_EI = (1 << 14),
- INTR_EN_EN = (1 << 15),
-};
-
-/*
- * Interrupt Mask Register (INTR_MASK) bit definitions.
- */
-enum {
- INTR_MASK_PI = (1 << 0),
- INTR_MASK_HL0 = (1 << 1),
- INTR_MASK_LH0 = (1 << 2),
- INTR_MASK_HL1 = (1 << 3),
- INTR_MASK_LH1 = (1 << 4),
- INTR_MASK_SE = (1 << 5),
- INTR_MASK_LSC = (1 << 6),
- INTR_MASK_MC = (1 << 7),
- INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC,
-};
-
-/*
- * Register (REV_ID) bit definitions.
- */
-enum {
- REV_ID_MASK = 0x0000000f,
- REV_ID_NICROLL_SHIFT = 0,
- REV_ID_NICREV_SHIFT = 4,
- REV_ID_XGROLL_SHIFT = 8,
- REV_ID_XGREV_SHIFT = 12,
- REV_ID_CHIPREV_SHIFT = 28,
-};
-
-/*
- * Force ECC Error Register (FRC_ECC_ERR) bit definitions.
- */
-enum {
- FRC_ECC_ERR_VW = (1 << 12),
- FRC_ECC_ERR_VB = (1 << 13),
- FRC_ECC_ERR_NI = (1 << 14),
- FRC_ECC_ERR_NO = (1 << 15),
- FRC_ECC_PFE_SHIFT = 16,
- FRC_ECC_ERR_DO = (1 << 18),
- FRC_ECC_P14 = (1 << 19),
-};
-
-/*
- * Error Status Register (ERR_STS) bit definitions.
- */
-enum {
- ERR_STS_NOF = (1 << 0),
- ERR_STS_NIF = (1 << 1),
- ERR_STS_DRP = (1 << 2),
- ERR_STS_XGP = (1 << 3),
- ERR_STS_FOU = (1 << 4),
- ERR_STS_FOC = (1 << 5),
- ERR_STS_FOF = (1 << 6),
- ERR_STS_FIU = (1 << 7),
- ERR_STS_FIC = (1 << 8),
- ERR_STS_FIF = (1 << 9),
- ERR_STS_MOF = (1 << 10),
- ERR_STS_TA = (1 << 11),
- ERR_STS_MA = (1 << 12),
- ERR_STS_MPE = (1 << 13),
- ERR_STS_SCE = (1 << 14),
- ERR_STS_STE = (1 << 15),
- ERR_STS_FOW = (1 << 16),
- ERR_STS_UE = (1 << 17),
- ERR_STS_MCH = (1 << 26),
- ERR_STS_LOC_SHIFT = 27,
-};
-
-/*
- * RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
- */
-enum {
- RAM_DBG_ADDR_FW = (1 << 30),
- RAM_DBG_ADDR_FR = (1 << 31),
-};
-
-/*
- * Semaphore Register (SEM) bit definitions.
- */
-enum {
- /*
- * Example:
- * reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
- */
- SEM_CLEAR = 0,
- SEM_SET = 1,
- SEM_FORCE = 3,
- SEM_XGMAC0_SHIFT = 0,
- SEM_XGMAC1_SHIFT = 2,
- SEM_ICB_SHIFT = 4,
- SEM_MAC_ADDR_SHIFT = 6,
- SEM_FLASH_SHIFT = 8,
- SEM_PROBE_SHIFT = 10,
- SEM_RT_IDX_SHIFT = 12,
- SEM_PROC_REG_SHIFT = 14,
- SEM_XGMAC0_MASK = 0x00030000,
- SEM_XGMAC1_MASK = 0x000c0000,
- SEM_ICB_MASK = 0x00300000,
- SEM_MAC_ADDR_MASK = 0x00c00000,
- SEM_FLASH_MASK = 0x03000000,
- SEM_PROBE_MASK = 0x0c000000,
- SEM_RT_IDX_MASK = 0x30000000,
- SEM_PROC_REG_MASK = 0xc0000000,
-};
-
-/*
- * 10G MAC Address Register (XGMAC_ADDR) bit definitions.
- */
-enum {
- XGMAC_ADDR_RDY = (1 << 31),
- XGMAC_ADDR_R = (1 << 30),
- XGMAC_ADDR_XME = (1 << 29),
-
- /* XGMAC control registers */
- PAUSE_SRC_LO = 0x00000100,
- PAUSE_SRC_HI = 0x00000104,
- GLOBAL_CFG = 0x00000108,
- GLOBAL_CFG_RESET = (1 << 0),
- GLOBAL_CFG_JUMBO = (1 << 6),
- GLOBAL_CFG_TX_STAT_EN = (1 << 10),
- GLOBAL_CFG_RX_STAT_EN = (1 << 11),
- TX_CFG = 0x0000010c,
- TX_CFG_RESET = (1 << 0),
- TX_CFG_EN = (1 << 1),
- TX_CFG_PREAM = (1 << 2),
- RX_CFG = 0x00000110,
- RX_CFG_RESET = (1 << 0),
- RX_CFG_EN = (1 << 1),
- RX_CFG_PREAM = (1 << 2),
- FLOW_CTL = 0x0000011c,
- PAUSE_OPCODE = 0x00000120,
- PAUSE_TIMER = 0x00000124,
- PAUSE_FRM_DEST_LO = 0x00000128,
- PAUSE_FRM_DEST_HI = 0x0000012c,
- MAC_TX_PARAMS = 0x00000134,
- MAC_TX_PARAMS_JUMBO = (1 << 31),
- MAC_TX_PARAMS_SIZE_SHIFT = 16,
- MAC_RX_PARAMS = 0x00000138,
- MAC_SYS_INT = 0x00000144,
- MAC_SYS_INT_MASK = 0x00000148,
- MAC_MGMT_INT = 0x0000014c,
- MAC_MGMT_IN_MASK = 0x00000150,
- EXT_ARB_MODE = 0x000001fc,
-
- /* XGMAC TX statistics registers */
- TX_PKTS = 0x00000200,
- TX_BYTES = 0x00000208,
- TX_MCAST_PKTS = 0x00000210,
- TX_BCAST_PKTS = 0x00000218,
- TX_UCAST_PKTS = 0x00000220,
- TX_CTL_PKTS = 0x00000228,
- TX_PAUSE_PKTS = 0x00000230,
- TX_64_PKT = 0x00000238,
- TX_65_TO_127_PKT = 0x00000240,
- TX_128_TO_255_PKT = 0x00000248,
- TX_256_511_PKT = 0x00000250,
- TX_512_TO_1023_PKT = 0x00000258,
- TX_1024_TO_1518_PKT = 0x00000260,
- TX_1519_TO_MAX_PKT = 0x00000268,
- TX_UNDERSIZE_PKT = 0x00000270,
- TX_OVERSIZE_PKT = 0x00000278,
-
- /* XGMAC statistics control registers */
- RX_HALF_FULL_DET = 0x000002a0,
- TX_HALF_FULL_DET = 0x000002a4,
- RX_OVERFLOW_DET = 0x000002a8,
- TX_OVERFLOW_DET = 0x000002ac,
- RX_HALF_FULL_MASK = 0x000002b0,
- TX_HALF_FULL_MASK = 0x000002b4,
- RX_OVERFLOW_MASK = 0x000002b8,
- TX_OVERFLOW_MASK = 0x000002bc,
- STAT_CNT_CTL = 0x000002c0,
- STAT_CNT_CTL_CLEAR_TX = (1 << 0),
- STAT_CNT_CTL_CLEAR_RX = (1 << 1),
- AUX_RX_HALF_FULL_DET = 0x000002d0,
- AUX_TX_HALF_FULL_DET = 0x000002d4,
- AUX_RX_OVERFLOW_DET = 0x000002d8,
- AUX_TX_OVERFLOW_DET = 0x000002dc,
- AUX_RX_HALF_FULL_MASK = 0x000002f0,
- AUX_TX_HALF_FULL_MASK = 0x000002f4,
- AUX_RX_OVERFLOW_MASK = 0x000002f8,
- AUX_TX_OVERFLOW_MASK = 0x000002fc,
-
- /* XGMAC RX statistics registers */
- RX_BYTES = 0x00000300,
- RX_BYTES_OK = 0x00000308,
- RX_PKTS = 0x00000310,
- RX_PKTS_OK = 0x00000318,
- RX_BCAST_PKTS = 0x00000320,
- RX_MCAST_PKTS = 0x00000328,
- RX_UCAST_PKTS = 0x00000330,
- RX_UNDERSIZE_PKTS = 0x00000338,
- RX_OVERSIZE_PKTS = 0x00000340,
- RX_JABBER_PKTS = 0x00000348,
- RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
- RX_DROP_EVENTS = 0x00000358,
- RX_FCERR_PKTS = 0x00000360,
- RX_ALIGN_ERR = 0x00000368,
- RX_SYMBOL_ERR = 0x00000370,
- RX_MAC_ERR = 0x00000378,
- RX_CTL_PKTS = 0x00000380,
- RX_PAUSE_PKTS = 0x00000388,
- RX_64_PKTS = 0x00000390,
- RX_65_TO_127_PKTS = 0x00000398,
- RX_128_255_PKTS = 0x000003a0,
- RX_256_511_PKTS = 0x000003a8,
- RX_512_TO_1023_PKTS = 0x000003b0,
- RX_1024_TO_1518_PKTS = 0x000003b8,
- RX_1519_TO_MAX_PKTS = 0x000003c0,
- RX_LEN_ERR_PKTS = 0x000003c8,
-
- /* XGMAC MDIO control registers */
- MDIO_TX_DATA = 0x00000400,
- MDIO_RX_DATA = 0x00000410,
- MDIO_CMD = 0x00000420,
- MDIO_PHY_ADDR = 0x00000430,
- MDIO_PORT = 0x00000440,
- MDIO_STATUS = 0x00000450,
-
- XGMAC_REGISTER_END = 0x00000740,
-};
-
-/*
- * Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
- */
-enum {
- ETS_QUEUE_SHIFT = 29,
- ETS_REF = (1 << 26),
- ETS_RS = (1 << 27),
- ETS_P = (1 << 28),
- ETS_FC_COS_SHIFT = 23,
-};
-
-/*
- * Flash Address Register (FLASH_ADDR) bit definitions.
- */
-enum {
- FLASH_ADDR_RDY = (1 << 31),
- FLASH_ADDR_R = (1 << 30),
- FLASH_ADDR_ERR = (1 << 29),
-};
-
-/*
- * Stop CQ Processing Register (CQ_STOP) bit definitions.
- */
-enum {
- CQ_STOP_QUEUE_MASK = (0x007f0000),
- CQ_STOP_TYPE_MASK = (0x03000000),
- CQ_STOP_TYPE_START = 0x00000100,
- CQ_STOP_TYPE_STOP = 0x00000200,
- CQ_STOP_TYPE_READ = 0x00000300,
- CQ_STOP_EN = (1 << 15),
-};
-
-/*
- * MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
- */
-enum {
- MAC_ADDR_IDX_SHIFT = 4,
- MAC_ADDR_TYPE_SHIFT = 16,
- MAC_ADDR_TYPE_COUNT = 10,
- MAC_ADDR_TYPE_MASK = 0x000f0000,
- MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
- MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
- MAC_ADDR_TYPE_VLAN = 0x00020000,
- MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000,
- MAC_ADDR_TYPE_FC_MAC = 0x00040000,
- MAC_ADDR_TYPE_MGMT_MAC = 0x00050000,
- MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000,
- MAC_ADDR_TYPE_MGMT_V4 = 0x00070000,
- MAC_ADDR_TYPE_MGMT_V6 = 0x00080000,
- MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000,
- MAC_ADDR_ADR = (1 << 25),
- MAC_ADDR_RS = (1 << 26),
- MAC_ADDR_E = (1 << 27),
- MAC_ADDR_MR = (1 << 30),
- MAC_ADDR_MW = (1 << 31),
- MAX_MULTICAST_ENTRIES = 32,
-
- /* Entry count and words per entry
- * for each address type in the filter.
- */
- MAC_ADDR_MAX_CAM_ENTRIES = 512,
- MAC_ADDR_MAX_CAM_WCOUNT = 3,
- MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
- MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
- MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
- MAC_ADDR_MAX_VLAN_WCOUNT = 1,
- MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
- MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
- MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
- MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
- MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
- MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
- MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
- MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
- MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
- MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
- MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
- MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
- MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
- MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
-};
-
-/*
- * MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
- */
-enum {
- SPLT_HDR_EP = (1 << 31),
-};
-
-/*
- * FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
- */
-enum {
- FC_RCV_CFG_ECT = (1 << 15),
- FC_RCV_CFG_DFH = (1 << 20),
- FC_RCV_CFG_DVF = (1 << 21),
- FC_RCV_CFG_RCE = (1 << 27),
- FC_RCV_CFG_RFE = (1 << 28),
- FC_RCV_CFG_TEE = (1 << 29),
- FC_RCV_CFG_TCE = (1 << 30),
- FC_RCV_CFG_TFE = (1 << 31),
-};
-
-/*
- * NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
- */
-enum {
- NIC_RCV_CFG_PPE = (1 << 0),
- NIC_RCV_CFG_VLAN_MASK = 0x00060000,
- NIC_RCV_CFG_VLAN_ALL = 0x00000000,
- NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
- NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
- NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
- NIC_RCV_CFG_RV = (1 << 3),
- NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
- NIC_RCV_CFG_DFQ_SHIFT = 8,
- NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */
-};
-
-/*
- * Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
- */
-enum {
- MGMT_RCV_CFG_ARP = (1 << 0),
- MGMT_RCV_CFG_DHC = (1 << 1),
- MGMT_RCV_CFG_DHS = (1 << 2),
- MGMT_RCV_CFG_NP = (1 << 3),
- MGMT_RCV_CFG_I6N = (1 << 4),
- MGMT_RCV_CFG_I6R = (1 << 5),
- MGMT_RCV_CFG_DH6 = (1 << 6),
- MGMT_RCV_CFG_UD1 = (1 << 7),
- MGMT_RCV_CFG_UD0 = (1 << 8),
- MGMT_RCV_CFG_BCT = (1 << 9),
- MGMT_RCV_CFG_MCT = (1 << 10),
- MGMT_RCV_CFG_DM = (1 << 11),
- MGMT_RCV_CFG_RM = (1 << 12),
- MGMT_RCV_CFG_STL = (1 << 13),
- MGMT_RCV_CFG_VLAN_MASK = 0xc0000000,
- MGMT_RCV_CFG_VLAN_ALL = 0x00000000,
- MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000,
- MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000,
- MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000,
-};
-
-/*
- * Routing Index Register (RT_IDX) bit definitions.
- */
-enum {
- RT_IDX_IDX_SHIFT = 8,
- RT_IDX_TYPE_MASK = 0x000f0000,
- RT_IDX_TYPE_SHIFT = 16,
- RT_IDX_TYPE_RT = 0x00000000,
- RT_IDX_TYPE_RT_INV = 0x00010000,
- RT_IDX_TYPE_NICQ = 0x00020000,
- RT_IDX_TYPE_NICQ_INV = 0x00030000,
- RT_IDX_DST_MASK = 0x00700000,
- RT_IDX_DST_RSS = 0x00000000,
- RT_IDX_DST_CAM_Q = 0x00100000,
- RT_IDX_DST_COS_Q = 0x00200000,
- RT_IDX_DST_DFLT_Q = 0x00300000,
- RT_IDX_DST_DEST_Q = 0x00400000,
- RT_IDX_RS = (1 << 26),
- RT_IDX_E = (1 << 27),
- RT_IDX_MR = (1 << 30),
- RT_IDX_MW = (1 << 31),
-
- /* Nic Queue format - type 2 bits */
- RT_IDX_BCAST = (1 << 0),
- RT_IDX_MCAST = (1 << 1),
- RT_IDX_MCAST_MATCH = (1 << 2),
- RT_IDX_MCAST_REG_MATCH = (1 << 3),
- RT_IDX_MCAST_HASH_MATCH = (1 << 4),
- RT_IDX_FC_MACH = (1 << 5),
- RT_IDX_ETH_FCOE = (1 << 6),
- RT_IDX_CAM_HIT = (1 << 7),
- RT_IDX_CAM_BIT0 = (1 << 8),
- RT_IDX_CAM_BIT1 = (1 << 9),
- RT_IDX_VLAN_TAG = (1 << 10),
- RT_IDX_VLAN_MATCH = (1 << 11),
- RT_IDX_VLAN_FILTER = (1 << 12),
- RT_IDX_ETH_SKIP1 = (1 << 13),
- RT_IDX_ETH_SKIP2 = (1 << 14),
- RT_IDX_BCAST_MCAST_MATCH = (1 << 15),
- RT_IDX_802_3 = (1 << 16),
- RT_IDX_LLDP = (1 << 17),
- RT_IDX_UNUSED018 = (1 << 18),
- RT_IDX_UNUSED019 = (1 << 19),
- RT_IDX_UNUSED20 = (1 << 20),
- RT_IDX_UNUSED21 = (1 << 21),
- RT_IDX_ERR = (1 << 22),
- RT_IDX_VALID = (1 << 23),
- RT_IDX_TU_CSUM_ERR = (1 << 24),
- RT_IDX_IP_CSUM_ERR = (1 << 25),
- RT_IDX_MAC_ERR = (1 << 26),
- RT_IDX_RSS_TCP6 = (1 << 27),
- RT_IDX_RSS_TCP4 = (1 << 28),
- RT_IDX_RSS_IPV6 = (1 << 29),
- RT_IDX_RSS_IPV4 = (1 << 30),
- RT_IDX_RSS_MATCH = (1 << 31),
-
- /* Hierarchy for the NIC Queue Mask */
- RT_IDX_ALL_ERR_SLOT = 0,
- RT_IDX_MAC_ERR_SLOT = 0,
- RT_IDX_IP_CSUM_ERR_SLOT = 1,
- RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
- RT_IDX_BCAST_SLOT = 3,
- RT_IDX_MCAST_MATCH_SLOT = 4,
- RT_IDX_ALLMULTI_SLOT = 5,
- RT_IDX_UNUSED6_SLOT = 6,
- RT_IDX_UNUSED7_SLOT = 7,
- RT_IDX_RSS_MATCH_SLOT = 8,
- RT_IDX_RSS_IPV4_SLOT = 8,
- RT_IDX_RSS_IPV6_SLOT = 9,
- RT_IDX_RSS_TCP4_SLOT = 10,
- RT_IDX_RSS_TCP6_SLOT = 11,
- RT_IDX_CAM_HIT_SLOT = 12,
- RT_IDX_UNUSED013 = 13,
- RT_IDX_UNUSED014 = 14,
- RT_IDX_PROMISCUOUS_SLOT = 15,
- RT_IDX_MAX_RT_SLOTS = 8,
- RT_IDX_MAX_NIC_SLOTS = 16,
-};
-
-/*
- * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
- */
-enum {
- XG_SERDES_ADDR_RDY = (1 << 31),
- XG_SERDES_ADDR_R = (1 << 30),
-
- XG_SERDES_ADDR_STS = 0x00001E06,
- XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
- XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
- XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
-
- /* Serdes coredump definitions. */
- XG_SERDES_XAUI_AN_START = 0x00000000,
- XG_SERDES_XAUI_AN_END = 0x00000034,
- XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
- XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
- XG_SERDES_XFI_AN_START = 0x00001000,
- XG_SERDES_XFI_AN_END = 0x00001034,
- XG_SERDES_XFI_TRAIN_START = 0x10001050,
- XG_SERDES_XFI_TRAIN_END = 0x1000107C,
- XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
- XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
- XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
- XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
- XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
- XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
- XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
- XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
-};
-
-/*
- * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
- */
-enum {
- PRB_MX_ADDR_ARE = (1 << 16),
- PRB_MX_ADDR_UP = (1 << 15),
- PRB_MX_ADDR_SWP = (1 << 14),
-
- /* Module select values. */
- PRB_MX_ADDR_MAX_MODS = 21,
- PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
- PRB_MX_ADDR_MOD_SEL_TBD = 0,
- PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
- PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
- PRB_MX_ADDR_MOD_SEL_FRB = 3,
- PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
- PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
- PRB_MX_ADDR_MOD_SEL_DA1 = 6,
- PRB_MX_ADDR_MOD_SEL_DA2 = 7,
- PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
- PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
- PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
- PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
- PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
- PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
- PRB_MX_ADDR_MOD_SEL_REG = 14,
- PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
- PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
- PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
- PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
- PRB_MX_ADDR_MOD_SEL_MOP = 20,
- /* Bit fields indicating which modules
- * are valid for each clock domain.
- */
- PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
- PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
- PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
- PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
- PRB_MX_ADDR_VALID_TOTAL = 34,
-
- /* Clock domain values. */
- PRB_MX_ADDR_CLOCK_SHIFT = 6,
- PRB_MX_ADDR_SYS_CLOCK = 0,
- PRB_MX_ADDR_PCI_CLOCK = 2,
- PRB_MX_ADDR_FC_CLOCK = 5,
- PRB_MX_ADDR_XGM_CLOCK = 6,
-
- PRB_MX_ADDR_MAX_MUX = 64,
-};
-
-/*
- * Control Register Set Map
- */
-enum {
- PROC_ADDR = 0, /* Use semaphore */
- PROC_DATA = 0x04, /* Use semaphore */
- SYS = 0x08,
- RST_FO = 0x0c,
- FSC = 0x10,
- CSR = 0x14,
- LED = 0x18,
- ICB_RID = 0x1c, /* Use semaphore */
- ICB_L = 0x20, /* Use semaphore */
- ICB_H = 0x24, /* Use semaphore */
- CFG = 0x28,
- BIOS_ADDR = 0x2c,
- STS = 0x30,
- INTR_EN = 0x34,
- INTR_MASK = 0x38,
- ISR1 = 0x3c,
- ISR2 = 0x40,
- ISR3 = 0x44,
- ISR4 = 0x48,
- REV_ID = 0x4c,
- FRC_ECC_ERR = 0x50,
- ERR_STS = 0x54,
- RAM_DBG_ADDR = 0x58,
- RAM_DBG_DATA = 0x5c,
- ECC_ERR_CNT = 0x60,
- SEM = 0x64,
- GPIO_1 = 0x68, /* Use semaphore */
- GPIO_2 = 0x6c, /* Use semaphore */
- GPIO_3 = 0x70, /* Use semaphore */
- RSVD2 = 0x74,
- XGMAC_ADDR = 0x78, /* Use semaphore */
- XGMAC_DATA = 0x7c, /* Use semaphore */
- NIC_ETS = 0x80,
- CNA_ETS = 0x84,
- FLASH_ADDR = 0x88, /* Use semaphore */
- FLASH_DATA = 0x8c, /* Use semaphore */
- CQ_STOP = 0x90,
- PAGE_TBL_RID = 0x94,
- WQ_PAGE_TBL_LO = 0x98,
- WQ_PAGE_TBL_HI = 0x9c,
- CQ_PAGE_TBL_LO = 0xa0,
- CQ_PAGE_TBL_HI = 0xa4,
- MAC_ADDR_IDX = 0xa8, /* Use semaphore */
- MAC_ADDR_DATA = 0xac, /* Use semaphore */
- COS_DFLT_CQ1 = 0xb0,
- COS_DFLT_CQ2 = 0xb4,
- ETYPE_SKIP1 = 0xb8,
- ETYPE_SKIP2 = 0xbc,
- SPLT_HDR = 0xc0,
- FC_PAUSE_THRES = 0xc4,
- NIC_PAUSE_THRES = 0xc8,
- FC_ETHERTYPE = 0xcc,
- FC_RCV_CFG = 0xd0,
- NIC_RCV_CFG = 0xd4,
- FC_COS_TAGS = 0xd8,
- NIC_COS_TAGS = 0xdc,
- MGMT_RCV_CFG = 0xe0,
- RT_IDX = 0xe4,
- RT_DATA = 0xe8,
- RSVD7 = 0xec,
- XG_SERDES_ADDR = 0xf0,
- XG_SERDES_DATA = 0xf4,
- PRB_MX_ADDR = 0xf8, /* Use semaphore */
- PRB_MX_DATA = 0xfc, /* Use semaphore */
-};
-
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#define SMALL_BUFFER_SIZE 256
-#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
-#define SPLT_SETTING FSC_DBRST_1024
-#define SPLT_LEN 0
-#define QLGE_SB_PAD 0
-#else
-#define SMALL_BUFFER_SIZE 512
-#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
-#define SPLT_SETTING FSC_SH
-#define SPLT_LEN (SPLT_HDR_EP | \
- min(SMALL_BUF_MAP_SIZE, 1023))
-#define QLGE_SB_PAD 32
-#endif
-
-/*
- * CAM output format.
- */
-enum {
- CAM_OUT_ROUTE_FC = 0,
- CAM_OUT_ROUTE_NIC = 1,
- CAM_OUT_FUNC_SHIFT = 2,
- CAM_OUT_RV = (1 << 4),
- CAM_OUT_SH = (1 << 15),
- CAM_OUT_CQ_ID_SHIFT = 5,
-};
-
-/*
- * Mailbox definitions
- */
-enum {
- /* Asynchronous Event Notifications */
- AEN_SYS_ERR = 0x00008002,
- AEN_LINK_UP = 0x00008011,
- AEN_LINK_DOWN = 0x00008012,
- AEN_IDC_CMPLT = 0x00008100,
- AEN_IDC_REQ = 0x00008101,
- AEN_IDC_EXT = 0x00008102,
- AEN_DCBX_CHG = 0x00008110,
- AEN_AEN_LOST = 0x00008120,
- AEN_AEN_SFP_IN = 0x00008130,
- AEN_AEN_SFP_OUT = 0x00008131,
- AEN_FW_INIT_DONE = 0x00008400,
- AEN_FW_INIT_FAIL = 0x00008401,
-
- /* Mailbox Command Opcodes. */
- MB_CMD_NOP = 0x00000000,
- MB_CMD_EX_FW = 0x00000002,
- MB_CMD_MB_TEST = 0x00000006,
- MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */
- MB_CMD_ABOUT_FW = 0x00000008,
- MB_CMD_COPY_RISC_RAM = 0x0000000a,
- MB_CMD_LOAD_RISC_RAM = 0x0000000b,
- MB_CMD_DUMP_RISC_RAM = 0x0000000c,
- MB_CMD_WRITE_RAM = 0x0000000d,
- MB_CMD_INIT_RISC_RAM = 0x0000000e,
- MB_CMD_READ_RAM = 0x0000000f,
- MB_CMD_STOP_FW = 0x00000014,
- MB_CMD_MAKE_SYS_ERR = 0x0000002a,
- MB_CMD_WRITE_SFP = 0x00000030,
- MB_CMD_READ_SFP = 0x00000031,
- MB_CMD_INIT_FW = 0x00000060,
- MB_CMD_GET_IFCB = 0x00000061,
- MB_CMD_GET_FW_STATE = 0x00000069,
- MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */
- MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */
- MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */
- MB_WOL_DISABLE = 0,
- MB_WOL_MAGIC_PKT = (1 << 1),
- MB_WOL_FLTR = (1 << 2),
- MB_WOL_UCAST = (1 << 3),
- MB_WOL_MCAST = (1 << 4),
- MB_WOL_BCAST = (1 << 5),
- MB_WOL_LINK_UP = (1 << 6),
- MB_WOL_LINK_DOWN = (1 << 7),
- MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */
- MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
- MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
- MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
- MB_CMD_CLEAR_WOL_MAGIC = 0x00000114,/* Wake On Lan Magic Packet */
- MB_CMD_SET_WOL_IMMED = 0x00000115,
- MB_CMD_PORT_RESET = 0x00000120,
- MB_CMD_SET_PORT_CFG = 0x00000122,
- MB_CMD_GET_PORT_CFG = 0x00000123,
- MB_CMD_GET_LINK_STS = 0x00000124,
- MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
- QL_LED_BLINK = 0x03e803e8,
- MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
- MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
- MB_SET_MPI_TFK_STOP = (1 << 0),
- MB_SET_MPI_TFK_RESUME = (1 << 1),
- MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
- MB_GET_MPI_TFK_STOPPED = (1 << 0),
- MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
- /* Sub-commands for IDC request.
- * This describes the reason for the
- * IDC request.
- */
- MB_CMD_IOP_NONE = 0x0000,
- MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001,
- MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002,
- MB_CMD_IOP_PREP_LINK_DOWN = 0x0010,
- MB_CMD_IOP_DVR_START = 0x0100,
- MB_CMD_IOP_FLASH_ACC = 0x0101,
- MB_CMD_IOP_RESTART_MPI = 0x0102,
- MB_CMD_IOP_CORE_DUMP_MPI = 0x0103,
-
- /* Mailbox Command Status. */
- MB_CMD_STS_GOOD = 0x00004000, /* Success. */
- MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */
- MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */
- MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */
- MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */
- MB_CMD_STS_ERR = 0x00004005, /* System Error. */
- MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */
-};
-
-struct mbox_params {
- u32 mbox_in[MAILBOX_COUNT];
- u32 mbox_out[MAILBOX_COUNT];
- int in_count;
- int out_count;
-};
-
-struct flash_params_8012 {
- u8 dev_id_str[4];
- __le16 size;
- __le16 csum;
- __le16 ver;
- __le16 sub_dev_id;
- u8 mac_addr[6];
- __le16 res;
-};
-
-/* 8000 device's flash is a different structure
- * at a different offset in flash.
- */
-#define FUNC0_FLASH_OFFSET 0x140200
-#define FUNC1_FLASH_OFFSET 0x140600
-
-/* Flash related data structures. */
-struct flash_params_8000 {
- u8 dev_id_str[4]; /* "8000" */
- __le16 ver;
- __le16 size;
- __le16 csum;
- __le16 reserved0;
- __le16 total_size;
- __le16 entry_count;
- u8 data_type0;
- u8 data_size0;
- u8 mac_addr[6];
- u8 data_type1;
- u8 data_size1;
- u8 mac_addr1[6];
- u8 data_type2;
- u8 data_size2;
- __le16 vlan_id;
- u8 data_type3;
- u8 data_size3;
- __le16 last;
- u8 reserved1[464];
- __le16 subsys_ven_id;
- __le16 subsys_dev_id;
- u8 reserved2[4];
-};
-
-union flash_params {
- struct flash_params_8012 flash_params_8012;
- struct flash_params_8000 flash_params_8000;
-};
-
-/*
- * doorbell space for the rx ring context
- */
-struct rx_doorbell_context {
- u32 cnsmr_idx; /* 0x00 */
- u32 valid; /* 0x04 */
- u32 reserved[4]; /* 0x08-0x14 */
- u32 lbq_prod_idx; /* 0x18 */
- u32 sbq_prod_idx; /* 0x1c */
-};
-
-/*
- * doorbell space for the tx ring context
- */
-struct tx_doorbell_context {
- u32 prod_idx; /* 0x00 */
- u32 valid; /* 0x04 */
- u32 reserved[4]; /* 0x08-0x14 */
- u32 lbq_prod_idx; /* 0x18 */
- u32 sbq_prod_idx; /* 0x1c */
-};
-
-/* DATA STRUCTURES SHARED WITH HARDWARE. */
-struct tx_buf_desc {
- __le64 addr;
- __le32 len;
-#define TX_DESC_LEN_MASK 0x000fffff
-#define TX_DESC_C 0x40000000
-#define TX_DESC_E 0x80000000
-} __packed;
-
-/*
- * IOCB Definitions...
- */
-
-#define OPCODE_OB_MAC_IOCB 0x01
-#define OPCODE_OB_MAC_TSO_IOCB 0x02
-#define OPCODE_IB_MAC_IOCB 0x20
-#define OPCODE_IB_MPI_IOCB 0x21
-#define OPCODE_IB_AE_IOCB 0x3f
-
-struct ob_mac_iocb_req {
- u8 opcode;
- u8 flags1;
-#define OB_MAC_IOCB_REQ_OI 0x01
-#define OB_MAC_IOCB_REQ_I 0x02
-#define OB_MAC_IOCB_REQ_D 0x08
-#define OB_MAC_IOCB_REQ_F 0x10
- u8 flags2;
- u8 flags3;
-#define OB_MAC_IOCB_DFP 0x02
-#define OB_MAC_IOCB_V 0x04
- __le32 reserved1[2];
- __le16 frame_len;
-#define OB_MAC_IOCB_LEN_MASK 0x3ffff
- __le16 reserved2;
- u32 tid;
- u32 txq_idx;
- __le32 reserved3;
- __le16 vlan_tci;
- __le16 reserved4;
- struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
-} __packed;
-
-struct ob_mac_iocb_rsp {
- u8 opcode; /* */
- u8 flags1; /* */
-#define OB_MAC_IOCB_RSP_OI 0x01 /* */
-#define OB_MAC_IOCB_RSP_I 0x02 /* */
-#define OB_MAC_IOCB_RSP_E 0x08 /* */
-#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */
-#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */
-#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */
- u8 flags2; /* */
- u8 flags3; /* */
-#define OB_MAC_IOCB_RSP_B 0x80 /* */
- u32 tid;
- u32 txq_idx;
- __le32 reserved[13];
-} __packed;
-
-struct ob_mac_tso_iocb_req {
- u8 opcode;
- u8 flags1;
-#define OB_MAC_TSO_IOCB_OI 0x01
-#define OB_MAC_TSO_IOCB_I 0x02
-#define OB_MAC_TSO_IOCB_D 0x08
-#define OB_MAC_TSO_IOCB_IP4 0x40
-#define OB_MAC_TSO_IOCB_IP6 0x80
- u8 flags2;
-#define OB_MAC_TSO_IOCB_LSO 0x20
-#define OB_MAC_TSO_IOCB_UC 0x40
-#define OB_MAC_TSO_IOCB_TC 0x80
- u8 flags3;
-#define OB_MAC_TSO_IOCB_IC 0x01
-#define OB_MAC_TSO_IOCB_DFP 0x02
-#define OB_MAC_TSO_IOCB_V 0x04
- __le32 reserved1[2];
- __le32 frame_len;
- u32 tid;
- u32 txq_idx;
- __le16 total_hdrs_len;
- __le16 net_trans_offset;
-#define OB_MAC_TRANSPORT_HDR_SHIFT 6
- __le16 vlan_tci;
- __le16 mss;
- struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
-} __packed;
-
-struct ob_mac_tso_iocb_rsp {
- u8 opcode;
- u8 flags1;
-#define OB_MAC_TSO_IOCB_RSP_OI 0x01
-#define OB_MAC_TSO_IOCB_RSP_I 0x02
-#define OB_MAC_TSO_IOCB_RSP_E 0x08
-#define OB_MAC_TSO_IOCB_RSP_S 0x10
-#define OB_MAC_TSO_IOCB_RSP_L 0x20
-#define OB_MAC_TSO_IOCB_RSP_P 0x40
- u8 flags2; /* */
- u8 flags3; /* */
-#define OB_MAC_TSO_IOCB_RSP_B 0x8000
- u32 tid;
- u32 txq_idx;
- __le32 reserved2[13];
-} __packed;
-
-struct ib_mac_iocb_rsp {
- u8 opcode; /* 0x20 */
- u8 flags1;
-#define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */
-#define IB_MAC_IOCB_RSP_I 0x02 /* Disable Intr Generation */
-#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */
-#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
-#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
-#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */
-#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */
-#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */
-#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */
-#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */
-#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */
-#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */
- u8 flags2;
-#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */
-#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */
-#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */
-#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04
-#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08
-#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10
-#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14
-#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18
-#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c
-#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */
-#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */
-#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */
- u8 flags3;
-#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */
-#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */
-#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */
-#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */
-#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */
-#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */
-#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */
-#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */
-#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */
-#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
-#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
- __le32 data_len; /* */
- __le64 data_addr; /* */
- __le32 rss; /* */
- __le16 vlan_id; /* 12 bits */
-#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
-#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */
-#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff
-
- __le16 reserved1;
- __le32 reserved2[6];
- u8 reserved3[3];
- u8 flags4;
-#define IB_MAC_IOCB_RSP_HV 0x20
-#define IB_MAC_IOCB_RSP_HS 0x40
-#define IB_MAC_IOCB_RSP_HL 0x80
- __le32 hdr_len; /* */
- __le64 hdr_addr; /* */
-} __packed;
-
-struct ib_ae_iocb_rsp {
- u8 opcode;
- u8 flags1;
-#define IB_AE_IOCB_RSP_OI 0x01
-#define IB_AE_IOCB_RSP_I 0x02
- u8 event;
-#define LINK_UP_EVENT 0x00
-#define LINK_DOWN_EVENT 0x01
-#define CAM_LOOKUP_ERR_EVENT 0x06
-#define SOFT_ECC_ERROR_EVENT 0x07
-#define MGMT_ERR_EVENT 0x08
-#define TEN_GIG_MAC_EVENT 0x09
-#define GPI0_H2L_EVENT 0x10
-#define GPI0_L2H_EVENT 0x20
-#define GPI1_H2L_EVENT 0x11
-#define GPI1_L2H_EVENT 0x21
-#define PCI_ERR_ANON_BUF_RD 0x40
- u8 q_id;
- __le32 reserved[15];
-} __packed;
-
-/*
- * These three structures are for generic
- * handling of ib and ob iocbs.
- */
-struct ql_net_rsp_iocb {
- u8 opcode;
- u8 flags0;
- __le16 length;
- __le32 tid;
- __le32 reserved[14];
-} __packed;
-
-struct net_req_iocb {
- u8 opcode;
- u8 flags0;
- __le16 flags1;
- __le32 tid;
- __le32 reserved1[30];
-} __packed;
-
-/*
- * tx ring initialization control block for chip.
- * It is defined as:
- * "Work Queue Initialization Control Block"
- */
-struct wqicb {
- __le16 len;
-#define Q_LEN_V (1 << 4)
-#define Q_LEN_CPP_CONT 0x0000
-#define Q_LEN_CPP_16 0x0001
-#define Q_LEN_CPP_32 0x0002
-#define Q_LEN_CPP_64 0x0003
-#define Q_LEN_CPP_512 0x0006
- __le16 flags;
-#define Q_PRI_SHIFT 1
-#define Q_FLAGS_LC 0x1000
-#define Q_FLAGS_LB 0x2000
-#define Q_FLAGS_LI 0x4000
-#define Q_FLAGS_LO 0x8000
- __le16 cq_id_rss;
-#define Q_CQ_ID_RSS_RV 0x8000
- __le16 rid;
- __le64 addr;
- __le64 cnsmr_idx_addr;
-} __packed;
-
-/*
- * rx ring initialization control block for chip.
- * It is defined as:
- * "Completion Queue Initialization Control Block"
- */
-struct cqicb {
- u8 msix_vect;
- u8 reserved1;
- u8 reserved2;
- u8 flags;
-#define FLAGS_LV 0x08
-#define FLAGS_LS 0x10
-#define FLAGS_LL 0x20
-#define FLAGS_LI 0x40
-#define FLAGS_LC 0x80
- __le16 len;
-#define LEN_V (1 << 4)
-#define LEN_CPP_CONT 0x0000
-#define LEN_CPP_32 0x0001
-#define LEN_CPP_64 0x0002
-#define LEN_CPP_128 0x0003
- __le16 rid;
- __le64 addr;
- __le64 prod_idx_addr;
- __le16 pkt_delay;
- __le16 irq_delay;
- __le64 lbq_addr;
- __le16 lbq_buf_size;
- __le16 lbq_len; /* entry count */
- __le64 sbq_addr;
- __le16 sbq_buf_size;
- __le16 sbq_len; /* entry count */
-} __packed;
-
-struct ricb {
- u8 base_cq;
-#define RSS_L4K 0x80
- u8 flags;
-#define RSS_L6K 0x01
-#define RSS_LI 0x02
-#define RSS_LB 0x04
-#define RSS_LM 0x08
-#define RSS_RI4 0x10
-#define RSS_RT4 0x20
-#define RSS_RI6 0x40
-#define RSS_RT6 0x80
- __le16 mask;
- u8 hash_cq_id[1024];
- __le32 ipv6_hash_key[10];
- __le32 ipv4_hash_key[4];
-} __packed;
-
-/* SOFTWARE/DRIVER DATA STRUCTURES. */
-
-struct oal {
- struct tx_buf_desc oal[TX_DESC_PER_OAL];
-};
-
-struct map_list {
- DEFINE_DMA_UNMAP_ADDR(mapaddr);
- DEFINE_DMA_UNMAP_LEN(maplen);
-};
-
-struct tx_ring_desc {
- struct sk_buff *skb;
- struct ob_mac_iocb_req *queue_entry;
- u32 index;
- struct oal oal;
- struct map_list map[MAX_SKB_FRAGS + 2];
- int map_cnt;
- struct tx_ring_desc *next;
-};
-
-struct page_chunk {
- struct page *page; /* master page */
- char *va; /* virt addr for this chunk */
- u64 map; /* mapping for master */
- unsigned int offset; /* offset for this chunk */
- unsigned int last_flag; /* flag set for last chunk in page */
-};
-
-struct bq_desc {
- union {
- struct page_chunk pg_chunk;
- struct sk_buff *skb;
- } p;
- __le64 *addr;
- u32 index;
- DEFINE_DMA_UNMAP_ADDR(mapaddr);
- DEFINE_DMA_UNMAP_LEN(maplen);
-};
-
-#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
-
-struct tx_ring {
- /*
- * queue info.
- */
- struct wqicb wqicb; /* structure used to inform chip of new queue */
- void *wq_base; /* pci_alloc:virtual addr for tx */
- dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */
- __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */
- dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */
- u32 wq_size; /* size in bytes of queue area */
- u32 wq_len; /* number of entries in queue */
- void __iomem *prod_idx_db_reg; /* doorbell area index reg at offset 0x00 */
- void __iomem *valid_db_reg; /* doorbell area valid reg at offset 0x04 */
- u16 prod_idx; /* current value for prod idx */
- u16 cq_id; /* completion (rx) queue for tx completions */
- u8 wq_id; /* queue id for this entry */
- u8 reserved1[3];
- struct tx_ring_desc *q; /* descriptor list for the queue */
- spinlock_t lock;
- atomic_t tx_count; /* counts down for every outstanding IO */
- struct delayed_work tx_work;
- struct ql_adapter *qdev;
- u64 tx_packets;
- u64 tx_bytes;
- u64 tx_errors;
-};
-
-/*
- * Type of inbound queue.
- */
-enum {
- DEFAULT_Q = 2, /* Handles slow queue and chip/MPI events. */
- TX_Q = 3, /* Handles outbound completions. */
- RX_Q = 4, /* Handles inbound completions. */
-};
-
-struct rx_ring {
- struct cqicb cqicb; /* The chip's completion queue init control block. */
-
- /* Completion queue elements. */
- void *cq_base;
- dma_addr_t cq_base_dma;
- u32 cq_size;
- u32 cq_len;
- u16 cq_id;
- __le32 *prod_idx_sh_reg; /* Shadowed producer register. */
- dma_addr_t prod_idx_sh_reg_dma;
- void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
- u32 cnsmr_idx; /* current sw idx */
- struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */
- void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
-
- /* Large buffer queue elements. */
- u32 lbq_len; /* entry count */
- u32 lbq_size; /* size in bytes of queue */
- u32 lbq_buf_size;
- void *lbq_base;
- dma_addr_t lbq_base_dma;
- void *lbq_base_indirect;
- dma_addr_t lbq_base_indirect_dma;
- struct page_chunk pg_chunk; /* current page for chunks */
- struct bq_desc *lbq; /* array of control blocks */
- void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
- u32 lbq_prod_idx; /* current sw prod idx */
- u32 lbq_curr_idx; /* next entry we expect */
- u32 lbq_clean_idx; /* beginning of new descs */
- u32 lbq_free_cnt; /* free buffer desc cnt */
-
- /* Small buffer queue elements. */
- u32 sbq_len; /* entry count */
- u32 sbq_size; /* size in bytes of queue */
- u32 sbq_buf_size;
- void *sbq_base;
- dma_addr_t sbq_base_dma;
- void *sbq_base_indirect;
- dma_addr_t sbq_base_indirect_dma;
- struct bq_desc *sbq; /* array of control blocks */
- void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
- u32 sbq_prod_idx; /* current sw prod idx */
- u32 sbq_curr_idx; /* next entry we expect */
- u32 sbq_clean_idx; /* beginning of new descs */
- u32 sbq_free_cnt; /* free buffer desc cnt */
-
- /* Misc. handler elements. */
- u32 type; /* Type of queue, tx, rx. */
- u32 irq; /* Which vector this ring is assigned. */
- u32 cpu; /* Which CPU this should run on. */
- char name[IFNAMSIZ + 5];
- struct napi_struct napi;
- u8 reserved;
- struct ql_adapter *qdev;
- u64 rx_packets;
- u64 rx_multicast;
- u64 rx_bytes;
- u64 rx_dropped;
- u64 rx_errors;
-};
-
-/*
- * RSS Initialization Control Block
- */
-struct hash_id {
- u8 value[4];
-};
-
-struct nic_stats {
- /*
- * These stats come from offset 200h to 278h
- * in the XGMAC register.
- */
- u64 tx_pkts;
- u64 tx_bytes;
- u64 tx_mcast_pkts;
- u64 tx_bcast_pkts;
- u64 tx_ucast_pkts;
- u64 tx_ctl_pkts;
- u64 tx_pause_pkts;
- u64 tx_64_pkt;
- u64 tx_65_to_127_pkt;
- u64 tx_128_to_255_pkt;
- u64 tx_256_511_pkt;
- u64 tx_512_to_1023_pkt;
- u64 tx_1024_to_1518_pkt;
- u64 tx_1519_to_max_pkt;
- u64 tx_undersize_pkt;
- u64 tx_oversize_pkt;
-
- /*
- * These stats come from offset 300h to 3C8h
- * in the XGMAC register.
- */
- u64 rx_bytes;
- u64 rx_bytes_ok;
- u64 rx_pkts;
- u64 rx_pkts_ok;
- u64 rx_bcast_pkts;
- u64 rx_mcast_pkts;
- u64 rx_ucast_pkts;
- u64 rx_undersize_pkts;
- u64 rx_oversize_pkts;
- u64 rx_jabber_pkts;
- u64 rx_undersize_fcerr_pkts;
- u64 rx_drop_events;
- u64 rx_fcerr_pkts;
- u64 rx_align_err;
- u64 rx_symbol_err;
- u64 rx_mac_err;
- u64 rx_ctl_pkts;
- u64 rx_pause_pkts;
- u64 rx_64_pkts;
- u64 rx_65_to_127_pkts;
- u64 rx_128_255_pkts;
- u64 rx_256_511_pkts;
- u64 rx_512_to_1023_pkts;
- u64 rx_1024_to_1518_pkts;
- u64 rx_1519_to_max_pkts;
- u64 rx_len_err_pkts;
- /* Receive Mac Err stats */
- u64 rx_code_err;
- u64 rx_oversize_err;
- u64 rx_undersize_err;
- u64 rx_preamble_err;
- u64 rx_frame_len_err;
- u64 rx_crc_err;
- u64 rx_err_count;
- /*
- * These stats come from offset 500h to 5C8h
- * in the XGMAC register.
- */
- u64 tx_cbfc_pause_frames0;
- u64 tx_cbfc_pause_frames1;
- u64 tx_cbfc_pause_frames2;
- u64 tx_cbfc_pause_frames3;
- u64 tx_cbfc_pause_frames4;
- u64 tx_cbfc_pause_frames5;
- u64 tx_cbfc_pause_frames6;
- u64 tx_cbfc_pause_frames7;
- u64 rx_cbfc_pause_frames0;
- u64 rx_cbfc_pause_frames1;
- u64 rx_cbfc_pause_frames2;
- u64 rx_cbfc_pause_frames3;
- u64 rx_cbfc_pause_frames4;
- u64 rx_cbfc_pause_frames5;
- u64 rx_cbfc_pause_frames6;
- u64 rx_cbfc_pause_frames7;
- u64 rx_nic_fifo_drop;
-};
-
-/* Firmware coredump internal register address/length pairs. */
-enum {
- MPI_CORE_REGS_ADDR = 0x00030000,
- MPI_CORE_REGS_CNT = 127,
- MPI_CORE_SH_REGS_CNT = 16,
- TEST_REGS_ADDR = 0x00001000,
- TEST_REGS_CNT = 23,
- RMII_REGS_ADDR = 0x00001040,
- RMII_REGS_CNT = 64,
- FCMAC1_REGS_ADDR = 0x00001080,
- FCMAC2_REGS_ADDR = 0x000010c0,
- FCMAC_REGS_CNT = 64,
- FC1_MBX_REGS_ADDR = 0x00001100,
- FC2_MBX_REGS_ADDR = 0x00001240,
- FC_MBX_REGS_CNT = 64,
- IDE_REGS_ADDR = 0x00001140,
- IDE_REGS_CNT = 64,
- NIC1_MBX_REGS_ADDR = 0x00001180,
- NIC2_MBX_REGS_ADDR = 0x00001280,
- NIC_MBX_REGS_CNT = 64,
- SMBUS_REGS_ADDR = 0x00001200,
- SMBUS_REGS_CNT = 64,
- I2C_REGS_ADDR = 0x00001fc0,
- I2C_REGS_CNT = 64,
- MEMC_REGS_ADDR = 0x00003000,
- MEMC_REGS_CNT = 256,
- PBUS_REGS_ADDR = 0x00007c00,
- PBUS_REGS_CNT = 256,
- MDE_REGS_ADDR = 0x00010000,
- MDE_REGS_CNT = 6,
- CODE_RAM_ADDR = 0x00020000,
- CODE_RAM_CNT = 0x2000,
- MEMC_RAM_ADDR = 0x00100000,
- MEMC_RAM_CNT = 0x2000,
-};
-
-#define MPI_COREDUMP_COOKIE 0x5555aaaa
-struct mpi_coredump_global_header {
- u32 cookie;
- u8 idString[16];
- u32 timeLo;
- u32 timeHi;
- u32 imageSize;
- u32 headerSize;
- u8 info[220];
-};
-
-struct mpi_coredump_segment_header {
- u32 cookie;
- u32 segNum;
- u32 segSize;
- u32 extra;
- u8 description[16];
-};
-
-/* Firmware coredump header segment numbers. */
-enum {
- CORE_SEG_NUM = 1,
- TEST_LOGIC_SEG_NUM = 2,
- RMII_SEG_NUM = 3,
- FCMAC1_SEG_NUM = 4,
- FCMAC2_SEG_NUM = 5,
- FC1_MBOX_SEG_NUM = 6,
- IDE_SEG_NUM = 7,
- NIC1_MBOX_SEG_NUM = 8,
- SMBUS_SEG_NUM = 9,
- FC2_MBOX_SEG_NUM = 10,
- NIC2_MBOX_SEG_NUM = 11,
- I2C_SEG_NUM = 12,
- MEMC_SEG_NUM = 13,
- PBUS_SEG_NUM = 14,
- MDE_SEG_NUM = 15,
- NIC1_CONTROL_SEG_NUM = 16,
- NIC2_CONTROL_SEG_NUM = 17,
- NIC1_XGMAC_SEG_NUM = 18,
- NIC2_XGMAC_SEG_NUM = 19,
- WCS_RAM_SEG_NUM = 20,
- MEMC_RAM_SEG_NUM = 21,
- XAUI_AN_SEG_NUM = 22,
- XAUI_HSS_PCS_SEG_NUM = 23,
- XFI_AN_SEG_NUM = 24,
- XFI_TRAIN_SEG_NUM = 25,
- XFI_HSS_PCS_SEG_NUM = 26,
- XFI_HSS_TX_SEG_NUM = 27,
- XFI_HSS_RX_SEG_NUM = 28,
- XFI_HSS_PLL_SEG_NUM = 29,
- MISC_NIC_INFO_SEG_NUM = 30,
- INTR_STATES_SEG_NUM = 31,
- CAM_ENTRIES_SEG_NUM = 32,
- ROUTING_WORDS_SEG_NUM = 33,
- ETS_SEG_NUM = 34,
- PROBE_DUMP_SEG_NUM = 35,
- ROUTING_INDEX_SEG_NUM = 36,
- MAC_PROTOCOL_SEG_NUM = 37,
- XAUI2_AN_SEG_NUM = 38,
- XAUI2_HSS_PCS_SEG_NUM = 39,
- XFI2_AN_SEG_NUM = 40,
- XFI2_TRAIN_SEG_NUM = 41,
- XFI2_HSS_PCS_SEG_NUM = 42,
- XFI2_HSS_TX_SEG_NUM = 43,
- XFI2_HSS_RX_SEG_NUM = 44,
- XFI2_HSS_PLL_SEG_NUM = 45,
- SEM_REGS_SEG_NUM = 50
-
-};
-
-/* There are 64 generic NIC registers. */
-#define NIC_REGS_DUMP_WORD_COUNT 64
-/* XGMAC word count. */
-#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
-/* Word counts for the SERDES blocks. */
-#define XG_SERDES_XAUI_AN_COUNT 14
-#define XG_SERDES_XAUI_HSS_PCS_COUNT 33
-#define XG_SERDES_XFI_AN_COUNT 14
-#define XG_SERDES_XFI_TRAIN_COUNT 12
-#define XG_SERDES_XFI_HSS_PCS_COUNT 15
-#define XG_SERDES_XFI_HSS_TX_COUNT 32
-#define XG_SERDES_XFI_HSS_RX_COUNT 32
-#define XG_SERDES_XFI_HSS_PLL_COUNT 32
-
-/* There are 2 CNA ETS and 8 NIC ETS registers. */
-#define ETS_REGS_DUMP_WORD_COUNT 10
-
-/* Each probe mux entry stores the probe type plus 64 entries
- * that are each each 64-bits in length. There are a total of
- * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
- */
-#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
-#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
- PRB_MX_ADDR_VALID_TOTAL)
-/* Each routing entry consists of 4 32-bit words.
- * They are route type, index, index word, and result.
- * There are 2 route blocks with 8 entries each and
- * 2 NIC blocks with 16 entries each.
- * The totol entries is 48 with 4 words each.
- */
-#define RT_IDX_DUMP_ENTRIES 48
-#define RT_IDX_DUMP_WORDS_PER_ENTRY 4
-#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
- RT_IDX_DUMP_WORDS_PER_ENTRY)
-/* There are 10 address blocks in filter, each with
- * different entry counts and different word-count-per-entry.
- */
-#define MAC_ADDR_DUMP_ENTRIES \
- ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
- (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
- (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
- (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
- (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
- (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
- (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
- (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
- (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
- (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
-#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
-#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
- MAC_ADDR_DUMP_WORDS_PER_ENTRY)
-/* Maximum of 4 functions whose semaphore registeres are
- * in the coredump.
- */
-#define MAX_SEMAPHORE_FUNCTIONS 4
-/* Defines for access the MPI shadow registers. */
-#define RISC_124 0x0003007c
-#define RISC_127 0x0003007f
-#define SHADOW_OFFSET 0xb0000000
-#define SHADOW_REG_SHIFT 20
-
-struct ql_nic_misc {
- u32 rx_ring_count;
- u32 tx_ring_count;
- u32 intr_count;
- u32 function;
-};
-
-struct ql_reg_dump {
-
- /* segment 0 */
- struct mpi_coredump_global_header mpi_global_header;
-
- /* segment 16 */
- struct mpi_coredump_segment_header nic_regs_seg_hdr;
- u32 nic_regs[64];
-
- /* segment 30 */
- struct mpi_coredump_segment_header misc_nic_seg_hdr;
- struct ql_nic_misc misc_nic_info;
-
- /* segment 31 */
- /* one interrupt state for each CQ */
- struct mpi_coredump_segment_header intr_states_seg_hdr;
- u32 intr_states[MAX_CPUS];
-
- /* segment 32 */
- /* 3 cam words each for 16 unicast,
- * 2 cam words for each of 32 multicast.
- */
- struct mpi_coredump_segment_header cam_entries_seg_hdr;
- u32 cam_entries[(16 * 3) + (32 * 3)];
-
- /* segment 33 */
- struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
- u32 nic_routing_words[16];
-
- /* segment 34 */
- struct mpi_coredump_segment_header ets_seg_hdr;
- u32 ets[8+2];
-};
-
-struct ql_mpi_coredump {
- /* segment 0 */
- struct mpi_coredump_global_header mpi_global_header;
-
- /* segment 1 */
- struct mpi_coredump_segment_header core_regs_seg_hdr;
- u32 mpi_core_regs[MPI_CORE_REGS_CNT];
- u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
-
- /* segment 2 */
- struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
- u32 test_logic_regs[TEST_REGS_CNT];
-
- /* segment 3 */
- struct mpi_coredump_segment_header rmii_regs_seg_hdr;
- u32 rmii_regs[RMII_REGS_CNT];
-
- /* segment 4 */
- struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
- u32 fcmac1_regs[FCMAC_REGS_CNT];
-
- /* segment 5 */
- struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
- u32 fcmac2_regs[FCMAC_REGS_CNT];
-
- /* segment 6 */
- struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
- u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
-
- /* segment 7 */
- struct mpi_coredump_segment_header ide_regs_seg_hdr;
- u32 ide_regs[IDE_REGS_CNT];
-
- /* segment 8 */
- struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
- u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
-
- /* segment 9 */
- struct mpi_coredump_segment_header smbus_regs_seg_hdr;
- u32 smbus_regs[SMBUS_REGS_CNT];
-
- /* segment 10 */
- struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
- u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
-
- /* segment 11 */
- struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
- u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
-
- /* segment 12 */
- struct mpi_coredump_segment_header i2c_regs_seg_hdr;
- u32 i2c_regs[I2C_REGS_CNT];
- /* segment 13 */
- struct mpi_coredump_segment_header memc_regs_seg_hdr;
- u32 memc_regs[MEMC_REGS_CNT];
-
- /* segment 14 */
- struct mpi_coredump_segment_header pbus_regs_seg_hdr;
- u32 pbus_regs[PBUS_REGS_CNT];
-
- /* segment 15 */
- struct mpi_coredump_segment_header mde_regs_seg_hdr;
- u32 mde_regs[MDE_REGS_CNT];
-
- /* segment 16 */
- struct mpi_coredump_segment_header nic_regs_seg_hdr;
- u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
-
- /* segment 17 */
- struct mpi_coredump_segment_header nic2_regs_seg_hdr;
- u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
-
- /* segment 18 */
- struct mpi_coredump_segment_header xgmac1_seg_hdr;
- u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
-
- /* segment 19 */
- struct mpi_coredump_segment_header xgmac2_seg_hdr;
- u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
-
- /* segment 20 */
- struct mpi_coredump_segment_header code_ram_seg_hdr;
- u32 code_ram[CODE_RAM_CNT];
-
- /* segment 21 */
- struct mpi_coredump_segment_header memc_ram_seg_hdr;
- u32 memc_ram[MEMC_RAM_CNT];
-
- /* segment 22 */
- struct mpi_coredump_segment_header xaui_an_hdr;
- u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
-
- /* segment 23 */
- struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
- u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
-
- /* segment 24 */
- struct mpi_coredump_segment_header xfi_an_hdr;
- u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
-
- /* segment 25 */
- struct mpi_coredump_segment_header xfi_train_hdr;
- u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
-
- /* segment 26 */
- struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
- u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
-
- /* segment 27 */
- struct mpi_coredump_segment_header xfi_hss_tx_hdr;
- u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
-
- /* segment 28 */
- struct mpi_coredump_segment_header xfi_hss_rx_hdr;
- u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
-
- /* segment 29 */
- struct mpi_coredump_segment_header xfi_hss_pll_hdr;
- u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
-
- /* segment 30 */
- struct mpi_coredump_segment_header misc_nic_seg_hdr;
- struct ql_nic_misc misc_nic_info;
-
- /* segment 31 */
- /* one interrupt state for each CQ */
- struct mpi_coredump_segment_header intr_states_seg_hdr;
- u32 intr_states[MAX_RX_RINGS];
-
- /* segment 32 */
- /* 3 cam words each for 16 unicast,
- * 2 cam words for each of 32 multicast.
- */
- struct mpi_coredump_segment_header cam_entries_seg_hdr;
- u32 cam_entries[(16 * 3) + (32 * 3)];
-
- /* segment 33 */
- struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
- u32 nic_routing_words[16];
- /* segment 34 */
- struct mpi_coredump_segment_header ets_seg_hdr;
- u32 ets[ETS_REGS_DUMP_WORD_COUNT];
-
- /* segment 35 */
- struct mpi_coredump_segment_header probe_dump_seg_hdr;
- u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
-
- /* segment 36 */
- struct mpi_coredump_segment_header routing_reg_seg_hdr;
- u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
-
- /* segment 37 */
- struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
- u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
-
- /* segment 38 */
- struct mpi_coredump_segment_header xaui2_an_hdr;
- u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
-
- /* segment 39 */
- struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
- u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
-
- /* segment 40 */
- struct mpi_coredump_segment_header xfi2_an_hdr;
- u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
-
- /* segment 41 */
- struct mpi_coredump_segment_header xfi2_train_hdr;
- u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
-
- /* segment 42 */
- struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
- u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
-
- /* segment 43 */
- struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
- u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
-
- /* segment 44 */
- struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
- u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
-
- /* segment 45 */
- struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
- u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
-
- /* segment 50 */
- /* semaphore register for all 5 functions */
- struct mpi_coredump_segment_header sem_regs_seg_hdr;
- u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
-};
-
-/*
- * intr_context structure is used during initialization
- * to hook the interrupts. It is also used in a single
- * irq environment as a context to the ISR.
- */
-struct intr_context {
- struct ql_adapter *qdev;
- u32 intr;
- u32 irq_mask; /* Mask of which rings the vector services. */
- u32 hooked;
- u32 intr_en_mask; /* value/mask used to enable this intr */
- u32 intr_dis_mask; /* value/mask used to disable this intr */
- u32 intr_read_mask; /* value/mask used to read this intr */
- char name[IFNAMSIZ * 2];
- atomic_t irq_cnt; /* irq_cnt is used in single vector
- * environment. It's incremented for each
- * irq handler that is scheduled. When each
- * handler finishes it decrements irq_cnt and
- * enables interrupts if it's zero. */
- irq_handler_t handler;
-};
-
-/* adapter flags definitions. */
-enum {
- QL_ADAPTER_UP = 0, /* Adapter has been brought up. */
- QL_LEGACY_ENABLED = 1,
- QL_MSI_ENABLED = 2,
- QL_MSIX_ENABLED = 3,
- QL_DMA64 = 4,
- QL_PROMISCUOUS = 5,
- QL_ALLMULTI = 6,
- QL_PORT_CFG = 7,
- QL_CAM_RT_SET = 8,
- QL_SELFTEST = 9,
- QL_LB_LINK_UP = 10,
- QL_FRC_COREDUMP = 11,
- QL_EEH_FATAL = 12,
- QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */
-};
-
-/* link_status bit definitions */
-enum {
- STS_LOOPBACK_MASK = 0x00000700,
- STS_LOOPBACK_PCS = 0x00000100,
- STS_LOOPBACK_HSS = 0x00000200,
- STS_LOOPBACK_EXT = 0x00000300,
- STS_PAUSE_MASK = 0x000000c0,
- STS_PAUSE_STD = 0x00000040,
- STS_PAUSE_PRI = 0x00000080,
- STS_SPEED_MASK = 0x00000038,
- STS_SPEED_100Mb = 0x00000000,
- STS_SPEED_1Gb = 0x00000008,
- STS_SPEED_10Gb = 0x00000010,
- STS_LINK_TYPE_MASK = 0x00000007,
- STS_LINK_TYPE_XFI = 0x00000001,
- STS_LINK_TYPE_XAUI = 0x00000002,
- STS_LINK_TYPE_XFI_BP = 0x00000003,
- STS_LINK_TYPE_XAUI_BP = 0x00000004,
- STS_LINK_TYPE_10GBASET = 0x00000005,
-};
-
-/* link_config bit definitions */
-enum {
- CFG_JUMBO_FRAME_SIZE = 0x00010000,
- CFG_PAUSE_MASK = 0x00000060,
- CFG_PAUSE_STD = 0x00000020,
- CFG_PAUSE_PRI = 0x00000040,
- CFG_DCBX = 0x00000010,
- CFG_LOOPBACK_MASK = 0x00000007,
- CFG_LOOPBACK_PCS = 0x00000002,
- CFG_LOOPBACK_HSS = 0x00000004,
- CFG_LOOPBACK_EXT = 0x00000006,
- CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580,
-};
-
-struct nic_operations {
-
- int (*get_flash) (struct ql_adapter *);
- int (*port_initialize) (struct ql_adapter *);
-};
-
-/*
- * The main Adapter structure definition.
- * This structure has all fields relevant to the hardware.
- */
-struct ql_adapter {
- struct ricb ricb;
- unsigned long flags;
- u32 wol;
-
- struct nic_stats nic_stats;
-
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-
- /* PCI Configuration information for this device */
- struct pci_dev *pdev;
- struct net_device *ndev; /* Parent NET device */
-
- /* Hardware information */
- u32 chip_rev_id;
- u32 fw_rev_id;
- u32 func; /* PCI function for this adapter */
- u32 alt_func; /* PCI function for alternate adapter */
- u32 port; /* Port number this adapter */
-
- spinlock_t adapter_lock;
- spinlock_t hw_lock;
- spinlock_t stats_lock;
-
- /* PCI Bus Relative Register Addresses */
- void __iomem *reg_base;
- void __iomem *doorbell_area;
- u32 doorbell_area_size;
-
- u32 msg_enable;
-
- /* Page for Shadow Registers */
- void *rx_ring_shadow_reg_area;
- dma_addr_t rx_ring_shadow_reg_dma;
- void *tx_ring_shadow_reg_area;
- dma_addr_t tx_ring_shadow_reg_dma;
-
- u32 mailbox_in;
- u32 mailbox_out;
- struct mbox_params idc_mbc;
- struct mutex mpi_mutex;
-
- int tx_ring_size;
- int rx_ring_size;
- u32 intr_count;
- struct msix_entry *msi_x_entry;
- struct intr_context intr_context[MAX_RX_RINGS];
-
- int tx_ring_count; /* One per online CPU. */
- u32 rss_ring_count; /* One per irq vector. */
- /*
- * rx_ring_count =
- * (CPU count * outbound completion rx_ring) +
- * (irq_vector_cnt * inbound (RSS) completion rx_ring)
- */
- int rx_ring_count;
- int ring_mem_size;
- void *ring_mem;
-
- struct rx_ring rx_ring[MAX_RX_RINGS];
- struct tx_ring tx_ring[MAX_TX_RINGS];
- unsigned int lbq_buf_order;
-
- int rx_csum;
- u32 default_rx_queue;
-
- u16 rx_coalesce_usecs; /* cqicb->int_delay */
- u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */
- u16 tx_coalesce_usecs; /* cqicb->int_delay */
- u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */
-
- u32 xg_sem_mask;
- u32 port_link_up;
- u32 port_init;
- u32 link_status;
- struct ql_mpi_coredump *mpi_coredump;
- u32 core_is_dumped;
- u32 link_config;
- u32 led_config;
- u32 max_frame_size;
-
- union flash_params flash;
-
- struct workqueue_struct *workqueue;
- struct delayed_work asic_reset_work;
- struct delayed_work mpi_reset_work;
- struct delayed_work mpi_work;
- struct delayed_work mpi_port_cfg_work;
- struct delayed_work mpi_idc_work;
- struct delayed_work mpi_core_to_log;
- struct completion ide_completion;
- const struct nic_operations *nic_ops;
- u16 device_id;
- struct timer_list timer;
- atomic_t lb_count;
- /* Keep local copy of current mac address. */
- char current_mac_addr[ETH_ALEN];
-};
-
-/*
- * Typical Register accessor for memory mapped device.
- */
-static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
-{
- return readl(qdev->reg_base + reg);
-}
-
-/*
- * Typical Register accessor for memory mapped device.
- */
-static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
-{
- writel(val, qdev->reg_base + reg);
-}
-
-/*
- * Doorbell Registers:
- * Doorbell registers are virtual registers in the PCI memory space.
- * The space is allocated by the chip during PCI initialization. The
- * device driver finds the doorbell address in BAR 3 in PCI config space.
- * The registers are used to control outbound and inbound queues. For
- * example, the producer index for an outbound queue. Each queue uses
- * 1 4k chunk of memory. The lower half of the space is for outbound
- * queues. The upper half is for inbound queues.
- */
-static inline void ql_write_db_reg(u32 val, void __iomem *addr)
-{
- writel(val, addr);
-}
-
-/*
- * Doorbell Registers:
- * Doorbell registers are virtual registers in the PCI memory space.
- * The space is allocated by the chip during PCI initialization. The
- * device driver finds the doorbell address in BAR 3 in PCI config space.
- * The registers are used to control outbound and inbound queues. For
- * example, the producer index for an outbound queue. Each queue uses
- * 1 4k chunk of memory. The lower half of the space is for outbound
- * queues. The upper half is for inbound queues.
- * Caller has to guarantee ordering.
- */
-static inline void ql_write_db_reg_relaxed(u32 val, void __iomem *addr)
-{
- writel_relaxed(val, addr);
-}
-
-/*
- * Shadow Registers:
- * Outbound queues have a consumer index that is maintained by the chip.
- * Inbound queues have a producer index that is maintained by the chip.
- * For lower overhead, these registers are "shadowed" to host memory
- * which allows the device driver to track the queue progress without
- * PCI reads. When an entry is placed on an inbound queue, the chip will
- * update the relevant index register and then copy the value to the
- * shadow register in host memory.
- */
-static inline u32 ql_read_sh_reg(__le32 *addr)
-{
- u32 reg;
- reg = le32_to_cpu(*addr);
- rmb();
- return reg;
-}
-
-extern char qlge_driver_name[];
-extern const char qlge_driver_version[];
-extern const struct ethtool_ops qlge_ethtool_ops;
-
-int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
-void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
-int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
-int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
- u32 *value);
-int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
-int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
- u16 q_id);
-void ql_queue_fw_error(struct ql_adapter *qdev);
-void ql_mpi_work(struct work_struct *work);
-void ql_mpi_reset_work(struct work_struct *work);
-void ql_mpi_core_to_log(struct work_struct *work);
-int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
-void ql_queue_asic_error(struct ql_adapter *qdev);
-u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
-void ql_set_ethtool_ops(struct net_device *ndev);
-int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
-void ql_mpi_idc_work(struct work_struct *work);
-void ql_mpi_port_cfg_work(struct work_struct *work);
-int ql_mb_get_fw_state(struct ql_adapter *qdev);
-int ql_cam_route_initialize(struct ql_adapter *qdev);
-int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
-int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
-int ql_unpause_mpi_risc(struct ql_adapter *qdev);
-int ql_pause_mpi_risc(struct ql_adapter *qdev);
-int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
-int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
-int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
- int word_count);
-int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
-int ql_mb_about_fw(struct ql_adapter *qdev);
-int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
-int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
-int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
-int ql_mb_get_led_cfg(struct ql_adapter *qdev);
-void ql_link_on(struct ql_adapter *qdev);
-void ql_link_off(struct ql_adapter *qdev);
-int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
-int ql_mb_get_port_cfg(struct ql_adapter *qdev);
-int ql_mb_set_port_cfg(struct ql_adapter *qdev);
-int ql_wait_fifo_empty(struct ql_adapter *qdev);
-void ql_get_dump(struct ql_adapter *qdev, void *buff);
-netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
-void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
-int ql_own_firmware(struct ql_adapter *qdev);
-int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
-
-/* #define QL_ALL_DUMP */
-/* #define QL_REG_DUMP */
-/* #define QL_DEV_DUMP */
-/* #define QL_CB_DUMP */
-/* #define QL_IB_DUMP */
-/* #define QL_OB_DUMP */
-
-#ifdef QL_REG_DUMP
-void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
-void ql_dump_routing_entries(struct ql_adapter *qdev);
-void ql_dump_regs(struct ql_adapter *qdev);
-#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
-#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
-#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
-#else
-#define QL_DUMP_REGS(qdev)
-#define QL_DUMP_ROUTE(qdev)
-#define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
-#endif
-
-#ifdef QL_STAT_DUMP
-void ql_dump_stat(struct ql_adapter *qdev);
-#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
-#else
-#define QL_DUMP_STAT(qdev)
-#endif
-
-#ifdef QL_DEV_DUMP
-void ql_dump_qdev(struct ql_adapter *qdev);
-#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
-#else
-#define QL_DUMP_QDEV(qdev)
-#endif
-
-#ifdef QL_CB_DUMP
-void ql_dump_wqicb(struct wqicb *wqicb);
-void ql_dump_tx_ring(struct tx_ring *tx_ring);
-void ql_dump_ricb(struct ricb *ricb);
-void ql_dump_cqicb(struct cqicb *cqicb);
-void ql_dump_rx_ring(struct rx_ring *rx_ring);
-void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
-#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
-#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
-#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
-#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
-#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
-#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
- ql_dump_hw_cb(qdev, size, bit, q_id)
-#else
-#define QL_DUMP_RICB(ricb)
-#define QL_DUMP_WQICB(wqicb)
-#define QL_DUMP_TX_RING(tx_ring)
-#define QL_DUMP_CQICB(cqicb)
-#define QL_DUMP_RX_RING(rx_ring)
-#define QL_DUMP_HW_CB(qdev, size, bit, q_id)
-#endif
-
-#ifdef QL_OB_DUMP
-void ql_dump_tx_desc(struct tx_buf_desc *tbd);
-void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
-void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
-#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
-#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
-#else
-#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
-#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
-#endif
-
-#ifdef QL_IB_DUMP
-void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
-#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
-#else
-#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
-#endif
-
-#ifdef QL_ALL_DUMP
-void ql_dump_all(struct ql_adapter *qdev);
-#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
-#else
-#define QL_DUMP_ALL(qdev)
-#endif
-
-#endif /* _QLGE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
deleted file mode 100644
index 31389ab8bdf7..000000000000
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ /dev/null
@@ -1,2024 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/slab.h>
-
-#include "qlge.h"
-
-/* Read a NIC register from the alternate function. */
-static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
- u32 reg)
-{
- u32 register_to_read;
- u32 reg_val;
- unsigned int status = 0;
-
- register_to_read = MPI_NIC_REG_BLOCK
- | MPI_NIC_READ
- | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
- | reg;
- status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
- if (status != 0)
- return 0xffffffff;
-
- return reg_val;
-}
-
-/* Write a NIC register from the alternate function. */
-static int ql_write_other_func_reg(struct ql_adapter *qdev,
- u32 reg, u32 reg_val)
-{
- u32 register_to_read;
- int status = 0;
-
- register_to_read = MPI_NIC_REG_BLOCK
- | MPI_NIC_READ
- | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
- | reg;
- status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
-
- return status;
-}
-
-static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
- u32 bit, u32 err_bit)
-{
- u32 temp;
- int count = 10;
-
- while (count) {
- temp = ql_read_other_func_reg(qdev, reg);
-
- /* check for errors */
- if (temp & err_bit)
- return -1;
- else if (temp & bit)
- return 0;
- mdelay(10);
- count--;
- }
- return -1;
-}
-
-static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
- u32 *data)
-{
- int status;
-
- /* wait for reg to come ready */
- status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
- XG_SERDES_ADDR_RDY, 0);
- if (status)
- goto exit;
-
- /* set up for reg read */
- ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
-
- /* wait for reg to come ready */
- status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
- XG_SERDES_ADDR_RDY, 0);
- if (status)
- goto exit;
-
- /* get the data */
- *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
-exit:
- return status;
-}
-
-/* Read out the SERDES registers */
-static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
-{
- int status;
-
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
- if (status)
- goto exit;
-
- /* set up for reg read */
- ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
-
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
- if (status)
- goto exit;
-
- /* get the data */
- *data = ql_read32(qdev, XG_SERDES_DATA);
-exit:
- return status;
-}
-
-static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
- u32 *direct_ptr, u32 *indirect_ptr,
- unsigned int direct_valid, unsigned int indirect_valid)
-{
- unsigned int status;
-
- status = 1;
- if (direct_valid)
- status = ql_read_serdes_reg(qdev, addr, direct_ptr);
- /* Dead fill any failures or invalids. */
- if (status)
- *direct_ptr = 0xDEADBEEF;
-
- status = 1;
- if (indirect_valid)
- status = ql_read_other_func_serdes_reg(
- qdev, addr, indirect_ptr);
- /* Dead fill any failures or invalids. */
- if (status)
- *indirect_ptr = 0xDEADBEEF;
-}
-
-static int ql_get_serdes_regs(struct ql_adapter *qdev,
- struct ql_mpi_coredump *mpi_coredump)
-{
- int status;
- unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
- unsigned int xaui_indirect_valid, i;
- u32 *direct_ptr, temp;
- u32 *indirect_ptr;
-
- xfi_direct_valid = xfi_indirect_valid = 0;
- xaui_direct_valid = xaui_indirect_valid = 1;
-
- /* The XAUI needs to be read out per port */
- status = ql_read_other_func_serdes_reg(qdev,
- XG_SERDES_XAUI_HSS_PCS_START, &temp);
- if (status)
- temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
-
- if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
- XG_SERDES_ADDR_XAUI_PWR_DOWN)
- xaui_indirect_valid = 0;
-
- status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
-
- if (status)
- temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
-
- if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
- XG_SERDES_ADDR_XAUI_PWR_DOWN)
- xaui_direct_valid = 0;
-
- /*
- * XFI register is shared so only need to read one
- * functions and then check the bits.
- */
- status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
- if (status)
- temp = 0;
-
- if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
- XG_SERDES_ADDR_XFI1_PWR_UP) {
- /* now see if i'm NIC 1 or NIC 2 */
- if (qdev->func & 1)
- /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
- xfi_indirect_valid = 1;
- else
- xfi_direct_valid = 1;
- }
- if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
- XG_SERDES_ADDR_XFI2_PWR_UP) {
- /* now see if i'm NIC 1 or NIC 2 */
- if (qdev->func & 1)
- /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
- xfi_direct_valid = 1;
- else
- xfi_indirect_valid = 1;
- }
-
- /* Get XAUI_AN register block. */
- if (qdev->func & 1) {
- /* Function 2 is direct */
- direct_ptr = mpi_coredump->serdes2_xaui_an;
- indirect_ptr = mpi_coredump->serdes_xaui_an;
- } else {
- /* Function 1 is direct */
- direct_ptr = mpi_coredump->serdes_xaui_an;
- indirect_ptr = mpi_coredump->serdes2_xaui_an;
- }
-
- for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xaui_direct_valid, xaui_indirect_valid);
-
- /* Get XAUI_HSS_PCS register block. */
- if (qdev->func & 1) {
- direct_ptr =
- mpi_coredump->serdes2_xaui_hss_pcs;
- indirect_ptr =
- mpi_coredump->serdes_xaui_hss_pcs;
- } else {
- direct_ptr =
- mpi_coredump->serdes_xaui_hss_pcs;
- indirect_ptr =
- mpi_coredump->serdes2_xaui_hss_pcs;
- }
-
- for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xaui_direct_valid, xaui_indirect_valid);
-
- /* Get XAUI_XFI_AN register block. */
- if (qdev->func & 1) {
- direct_ptr = mpi_coredump->serdes2_xfi_an;
- indirect_ptr = mpi_coredump->serdes_xfi_an;
- } else {
- direct_ptr = mpi_coredump->serdes_xfi_an;
- indirect_ptr = mpi_coredump->serdes2_xfi_an;
- }
-
- for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
-
- /* Get XAUI_XFI_TRAIN register block. */
- if (qdev->func & 1) {
- direct_ptr = mpi_coredump->serdes2_xfi_train;
- indirect_ptr =
- mpi_coredump->serdes_xfi_train;
- } else {
- direct_ptr = mpi_coredump->serdes_xfi_train;
- indirect_ptr =
- mpi_coredump->serdes2_xfi_train;
- }
-
- for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
-
- /* Get XAUI_XFI_HSS_PCS register block. */
- if (qdev->func & 1) {
- direct_ptr =
- mpi_coredump->serdes2_xfi_hss_pcs;
- indirect_ptr =
- mpi_coredump->serdes_xfi_hss_pcs;
- } else {
- direct_ptr =
- mpi_coredump->serdes_xfi_hss_pcs;
- indirect_ptr =
- mpi_coredump->serdes2_xfi_hss_pcs;
- }
-
- for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
-
- /* Get XAUI_XFI_HSS_TX register block. */
- if (qdev->func & 1) {
- direct_ptr =
- mpi_coredump->serdes2_xfi_hss_tx;
- indirect_ptr =
- mpi_coredump->serdes_xfi_hss_tx;
- } else {
- direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
- indirect_ptr =
- mpi_coredump->serdes2_xfi_hss_tx;
- }
- for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
-
- /* Get XAUI_XFI_HSS_RX register block. */
- if (qdev->func & 1) {
- direct_ptr =
- mpi_coredump->serdes2_xfi_hss_rx;
- indirect_ptr =
- mpi_coredump->serdes_xfi_hss_rx;
- } else {
- direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
- indirect_ptr =
- mpi_coredump->serdes2_xfi_hss_rx;
- }
-
- for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
-
-
- /* Get XAUI_XFI_HSS_PLL register block. */
- if (qdev->func & 1) {
- direct_ptr =
- mpi_coredump->serdes2_xfi_hss_pll;
- indirect_ptr =
- mpi_coredump->serdes_xfi_hss_pll;
- } else {
- direct_ptr =
- mpi_coredump->serdes_xfi_hss_pll;
- indirect_ptr =
- mpi_coredump->serdes2_xfi_hss_pll;
- }
- for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
- ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
- xfi_direct_valid, xfi_indirect_valid);
- return 0;
-}
-
-static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
- u32 *data)
-{
- int status = 0;
-
- /* wait for reg to come ready */
- status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
- XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- goto exit;
-
- /* set up for reg read */
- ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
-
- /* wait for reg to come ready */
- status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
- XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- goto exit;
-
- /* get the data */
- *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
-exit:
- return status;
-}
-
-/* Read the 400 xgmac control/statistics registers
- * skipping unused locations.
- */
-static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
- unsigned int other_function)
-{
- int status = 0;
- int i;
-
- for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
- /* We're reading 400 xgmac registers, but we filter out
- * serveral locations that are non-responsive to reads.
- */
- if ((i == 0x00000114) ||
- (i == 0x00000118) ||
- (i == 0x0000013c) ||
- (i == 0x00000140) ||
- (i > 0x00000150 && i < 0x000001fc) ||
- (i > 0x00000278 && i < 0x000002a0) ||
- (i > 0x000002c0 && i < 0x000002cf) ||
- (i > 0x000002dc && i < 0x000002f0) ||
- (i > 0x000003c8 && i < 0x00000400) ||
- (i > 0x00000400 && i < 0x00000410) ||
- (i > 0x00000410 && i < 0x00000420) ||
- (i > 0x00000420 && i < 0x00000430) ||
- (i > 0x00000430 && i < 0x00000440) ||
- (i > 0x00000440 && i < 0x00000450) ||
- (i > 0x00000450 && i < 0x00000500) ||
- (i > 0x0000054c && i < 0x00000568) ||
- (i > 0x000005c8 && i < 0x00000600)) {
- if (other_function)
- status =
- ql_read_other_func_xgmac_reg(qdev, i, buf);
- else
- status = ql_read_xgmac_reg(qdev, i, buf);
-
- if (status)
- *buf = 0xdeadbeef;
- break;
- }
- }
- return status;
-}
-
-static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
-{
- int status = 0;
- int i;
-
- for (i = 0; i < 8; i++, buf++) {
- ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
- *buf = ql_read32(qdev, NIC_ETS);
- }
-
- for (i = 0; i < 2; i++, buf++) {
- ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
- *buf = ql_read32(qdev, CNA_ETS);
- }
-
- return status;
-}
-
-static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
-{
- int i;
-
- for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
- ql_write32(qdev, INTR_EN,
- qdev->intr_context[i].intr_read_mask);
- *buf = ql_read32(qdev, INTR_EN);
- }
-}
-
-static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
-{
- int i, status;
- u32 value[3];
-
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
-
- for (i = 0; i < 16; i++) {
- status = ql_get_mac_addr_reg(qdev,
- MAC_ADDR_TYPE_CAM_MAC, i, value);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed read of mac index register\n");
- goto err;
- }
- *buf++ = value[0]; /* lower MAC address */
- *buf++ = value[1]; /* upper MAC address */
- *buf++ = value[2]; /* output */
- }
- for (i = 0; i < 32; i++) {
- status = ql_get_mac_addr_reg(qdev,
- MAC_ADDR_TYPE_MULTI_MAC, i, value);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed read of mac index register\n");
- goto err;
- }
- *buf++ = value[0]; /* lower Mcast address */
- *buf++ = value[1]; /* upper Mcast address */
- }
-err:
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- return status;
-}
-
-static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
-{
- int status;
- u32 value, i;
-
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return status;
-
- for (i = 0; i < 16; i++) {
- status = ql_get_routing_reg(qdev, i, &value);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed read of routing index register\n");
- goto err;
- } else {
- *buf++ = value;
- }
- }
-err:
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
- return status;
-}
-
-/* Read the MPI Processor shadow registers */
-static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
-{
- u32 i;
- int status;
-
- for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
- status = ql_write_mpi_reg(qdev, RISC_124,
- (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
- if (status)
- goto end;
- status = ql_read_mpi_reg(qdev, RISC_127, buf);
- if (status)
- goto end;
- }
-end:
- return status;
-}
-
-/* Read the MPI Processor core registers */
-static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
- u32 offset, u32 count)
-{
- int i, status = 0;
- for (i = 0; i < count; i++, buf++) {
- status = ql_read_mpi_reg(qdev, offset + i, buf);
- if (status)
- return status;
- }
- return status;
-}
-
-/* Read the ASIC probe dump */
-static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
- u32 valid, u32 *buf)
-{
- u32 module, mux_sel, probe, lo_val, hi_val;
-
- for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
- if (!((valid >> module) & 1))
- continue;
- for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
- probe = clock
- | PRB_MX_ADDR_ARE
- | mux_sel
- | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
- ql_write32(qdev, PRB_MX_ADDR, probe);
- lo_val = ql_read32(qdev, PRB_MX_DATA);
- if (mux_sel == 0) {
- *buf = probe;
- buf++;
- }
- probe |= PRB_MX_ADDR_UP;
- ql_write32(qdev, PRB_MX_ADDR, probe);
- hi_val = ql_read32(qdev, PRB_MX_DATA);
- *buf = lo_val;
- buf++;
- *buf = hi_val;
- buf++;
- }
- }
- return buf;
-}
-
-static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
-{
- /* First we have to enable the probe mux */
- ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
- buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
- PRB_MX_ADDR_VALID_SYS_MOD, buf);
- buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
- PRB_MX_ADDR_VALID_PCI_MOD, buf);
- buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
- PRB_MX_ADDR_VALID_XGM_MOD, buf);
- buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
- PRB_MX_ADDR_VALID_FC_MOD, buf);
- return 0;
-
-}
-
-/* Read out the routing index registers */
-static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
-{
- int status;
- u32 type, index, index_max;
- u32 result_index;
- u32 result_data;
- u32 val;
-
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return status;
-
- for (type = 0; type < 4; type++) {
- if (type < 2)
- index_max = 8;
- else
- index_max = 16;
- for (index = 0; index < index_max; index++) {
- val = RT_IDX_RS
- | (type << RT_IDX_TYPE_SHIFT)
- | (index << RT_IDX_IDX_SHIFT);
- ql_write32(qdev, RT_IDX, val);
- result_index = 0;
- while ((result_index & RT_IDX_MR) == 0)
- result_index = ql_read32(qdev, RT_IDX);
- result_data = ql_read32(qdev, RT_DATA);
- *buf = type;
- buf++;
- *buf = index;
- buf++;
- *buf = result_index;
- buf++;
- *buf = result_data;
- buf++;
- }
- }
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
- return status;
-}
-
-/* Read out the MAC protocol registers */
-static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
-{
- u32 result_index, result_data;
- u32 type;
- u32 index;
- u32 offset;
- u32 val;
- u32 initial_val = MAC_ADDR_RS;
- u32 max_index;
- u32 max_offset;
-
- for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
- switch (type) {
-
- case 0: /* CAM */
- initial_val |= MAC_ADDR_ADR;
- max_index = MAC_ADDR_MAX_CAM_ENTRIES;
- max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
- break;
- case 1: /* Multicast MAC Address */
- max_index = MAC_ADDR_MAX_CAM_WCOUNT;
- max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
- break;
- case 2: /* VLAN filter mask */
- case 3: /* MC filter mask */
- max_index = MAC_ADDR_MAX_CAM_WCOUNT;
- max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
- break;
- case 4: /* FC MAC addresses */
- max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
- max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
- break;
- case 5: /* Mgmt MAC addresses */
- max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
- max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
- break;
- case 6: /* Mgmt VLAN addresses */
- max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
- max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
- break;
- case 7: /* Mgmt IPv4 address */
- max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
- max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
- break;
- case 8: /* Mgmt IPv6 address */
- max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
- max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
- break;
- case 9: /* Mgmt TCP/UDP Dest port */
- max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
- max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
- break;
- default:
- pr_err("Bad type!!! 0x%08x\n", type);
- max_index = 0;
- max_offset = 0;
- break;
- }
- for (index = 0; index < max_index; index++) {
- for (offset = 0; offset < max_offset; offset++) {
- val = initial_val
- | (type << MAC_ADDR_TYPE_SHIFT)
- | (index << MAC_ADDR_IDX_SHIFT)
- | (offset);
- ql_write32(qdev, MAC_ADDR_IDX, val);
- result_index = 0;
- while ((result_index & MAC_ADDR_MR) == 0) {
- result_index = ql_read32(qdev,
- MAC_ADDR_IDX);
- }
- result_data = ql_read32(qdev, MAC_ADDR_DATA);
- *buf = result_index;
- buf++;
- *buf = result_data;
- buf++;
- }
- }
- }
-}
-
-static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
-{
- u32 func_num, reg, reg_val;
- int status;
-
- for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
- reg = MPI_NIC_REG_BLOCK
- | (func_num << MPI_NIC_FUNCTION_SHIFT)
- | (SEM / 4);
- status = ql_read_mpi_reg(qdev, reg, &reg_val);
- *buf = reg_val;
- /* if the read failed then dead fill the element. */
- if (!status)
- *buf = 0xdeadbeef;
- buf++;
- }
-}
-
-/* Create a coredump segment header */
-static void ql_build_coredump_seg_header(
- struct mpi_coredump_segment_header *seg_hdr,
- u32 seg_number, u32 seg_size, u8 *desc)
-{
- memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
- seg_hdr->cookie = MPI_COREDUMP_COOKIE;
- seg_hdr->segNum = seg_number;
- seg_hdr->segSize = seg_size;
- strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
-}
-
-/*
- * This function should be called when a coredump / probedump
- * is to be extracted from the HBA. It is assumed there is a
- * qdev structure that contains the base address of the register
- * space for this function as well as a coredump structure that
- * will contain the dump.
- */
-int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
-{
- int status;
- int i;
-
- if (!mpi_coredump) {
- netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
- return -EINVAL;
- }
-
- /* Try to get the spinlock, but dont worry if
- * it isn't available. If the firmware died it
- * might be holding the sem.
- */
- ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
-
- status = ql_pause_mpi_risc(qdev);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed RISC pause. Status = 0x%.08x\n", status);
- goto err;
- }
-
- /* Insert the global header */
- memset(&(mpi_coredump->mpi_global_header), 0,
- sizeof(struct mpi_coredump_global_header));
- mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
- mpi_coredump->mpi_global_header.headerSize =
- sizeof(struct mpi_coredump_global_header);
- mpi_coredump->mpi_global_header.imageSize =
- sizeof(struct ql_mpi_coredump);
- strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
- sizeof(mpi_coredump->mpi_global_header.idString));
-
- /* Get generic NIC reg dump */
- ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
- NIC1_CONTROL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
- NIC2_CONTROL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
-
- /* Get XGMac registers. (Segment 18, Rev C. step 21) */
- ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
- NIC1_XGMAC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
- NIC2_XGMAC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
-
- if (qdev->func & 1) {
- /* Odd means our function is NIC 2 */
- for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
- mpi_coredump->nic2_regs[i] =
- ql_read32(qdev, i * sizeof(u32));
-
- for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
- mpi_coredump->nic_regs[i] =
- ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
-
- ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
- ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
- } else {
- /* Even means our function is NIC 1 */
- for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
- mpi_coredump->nic_regs[i] =
- ql_read32(qdev, i * sizeof(u32));
- for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
- mpi_coredump->nic2_regs[i] =
- ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
-
- ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
- ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
- }
-
- /* Rev C. Step 20a */
- ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
- XAUI_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xaui_an),
- "XAUI AN Registers");
-
- /* Rev C. Step 20b */
- ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
- XAUI_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xaui_hss_pcs),
- "XAUI HSS PCS Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_an),
- "XFI AN Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
- XFI_TRAIN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_train),
- "XFI TRAIN Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
- XFI_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_pcs),
- "XFI HSS PCS Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
- XFI_HSS_TX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_tx),
- "XFI HSS TX Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
- XFI_HSS_RX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_rx),
- "XFI HSS RX Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
- XFI_HSS_PLL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes_xfi_hss_pll),
- "XFI HSS PLL Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
- XAUI2_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xaui_an),
- "XAUI2 AN Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
- XAUI2_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
- "XAUI2 HSS PCS Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
- XFI2_AN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_an),
- "XFI2 AN Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
- XFI2_TRAIN_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_train),
- "XFI2 TRAIN Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
- XFI2_HSS_PCS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
- "XFI2 HSS PCS Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
- XFI2_HSS_TX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_tx),
- "XFI2 HSS TX Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
- XFI2_HSS_RX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_rx),
- "XFI2 HSS RX Registers");
-
- ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
- XFI2_HSS_PLL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->serdes2_xfi_hss_pll),
- "XFI2 HSS PLL Registers");
-
- status = ql_get_serdes_regs(qdev, mpi_coredump);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
- status);
- goto err;
- }
-
- ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
- CORE_SEG_NUM,
- sizeof(mpi_coredump->core_regs_seg_hdr) +
- sizeof(mpi_coredump->mpi_core_regs) +
- sizeof(mpi_coredump->mpi_core_sh_regs),
- "Core Registers");
-
- /* Get the MPI Core Registers */
- status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
- MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
- if (status)
- goto err;
- /* Get the 16 MPI shadow registers */
- status = ql_get_mpi_shadow_regs(qdev,
- &mpi_coredump->mpi_core_sh_regs[0]);
- if (status)
- goto err;
-
- /* Get the Test Logic Registers */
- ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
- TEST_LOGIC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->test_logic_regs),
- "Test Logic Regs");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
- TEST_REGS_ADDR, TEST_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the RMII Registers */
- ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
- RMII_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->rmii_regs),
- "RMII Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
- RMII_REGS_ADDR, RMII_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the FCMAC1 Registers */
- ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
- FCMAC1_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fcmac1_regs),
- "FCMAC1 Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
- FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the FCMAC2 Registers */
-
- ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
- FCMAC2_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fcmac2_regs),
- "FCMAC2 Registers");
-
- status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
- FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the FC1 MBX Registers */
- ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
- FC1_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fc1_mbx_regs),
- "FC1 MBox Regs");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
- FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the IDE Registers */
- ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
- IDE_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->ide_regs),
- "IDE Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
- IDE_REGS_ADDR, IDE_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the NIC1 MBX Registers */
- ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
- NIC1_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic1_mbx_regs),
- "NIC1 MBox Regs");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
- NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the SMBus Registers */
- ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
- SMBUS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->smbus_regs),
- "SMBus Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
- SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the FC2 MBX Registers */
- ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
- FC2_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->fc2_mbx_regs),
- "FC2 MBox Regs");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
- FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the NIC2 MBX Registers */
- ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
- NIC2_MBOX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic2_mbx_regs),
- "NIC2 MBox Regs");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
- NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the I2C Registers */
- ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
- I2C_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->i2c_regs),
- "I2C Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
- I2C_REGS_ADDR, I2C_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the MEMC Registers */
- ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
- MEMC_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->memc_regs),
- "MEMC Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
- MEMC_REGS_ADDR, MEMC_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the PBus Registers */
- ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
- PBUS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->pbus_regs),
- "PBUS Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
- PBUS_REGS_ADDR, PBUS_REGS_CNT);
- if (status)
- goto err;
-
- /* Get the MDE Registers */
- ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
- MDE_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->mde_regs),
- "MDE Registers");
- status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
- MDE_REGS_ADDR, MDE_REGS_CNT);
- if (status)
- goto err;
-
- ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
- MISC_NIC_INFO_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->misc_nic_info),
- "MISC NIC INFO");
- mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
- mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
- mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
- mpi_coredump->misc_nic_info.function = qdev->func;
-
- /* Segment 31 */
- /* Get indexed register values. */
- ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
- INTR_STATES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->intr_states),
- "INTR States");
- ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
-
- ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
- CAM_ENTRIES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->cam_entries),
- "CAM Entries");
- status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
- if (status)
- goto err;
-
- ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
- ROUTING_WORDS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic_routing_words),
- "Routing Words");
- status = ql_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
- if (status)
- goto err;
-
- /* Segment 34 (Rev C. step 23) */
- ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
- ETS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->ets),
- "ETS Registers");
- status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
- if (status)
- goto err;
-
- ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
- PROBE_DUMP_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->probe_dump),
- "Probe Dump");
- ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
-
- ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
- ROUTING_INDEX_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->routing_regs),
- "Routing Regs");
- status = ql_get_routing_index_registers(qdev,
- &mpi_coredump->routing_regs[0]);
- if (status)
- goto err;
-
- ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
- MAC_PROTOCOL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->mac_prot_regs),
- "MAC Prot Regs");
- ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
-
- /* Get the semaphore registers for all 5 functions */
- ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
- SEM_REGS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header) +
- sizeof(mpi_coredump->sem_regs), "Sem Registers");
-
- ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
-
- /* Prevent the mpi restarting while we dump the memory.*/
- ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
-
- /* clear the pause */
- status = ql_unpause_mpi_risc(qdev);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed RISC unpause. Status = 0x%.08x\n", status);
- goto err;
- }
-
- /* Reset the RISC so we can dump RAM */
- status = ql_hard_reset_mpi_risc(qdev);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed RISC reset. Status = 0x%.08x\n", status);
- goto err;
- }
-
- ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
- WCS_RAM_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->code_ram),
- "WCS RAM");
- status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
- CODE_RAM_ADDR, CODE_RAM_CNT);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Dump of CODE RAM. Status = 0x%.08x\n",
- status);
- goto err;
- }
-
- /* Insert the segment header */
- ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
- MEMC_RAM_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->memc_ram),
- "MEMC RAM");
- status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
- MEMC_RAM_ADDR, MEMC_RAM_CNT);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
- status);
- goto err;
- }
-err:
- ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
- return status;
-
-}
-
-static void ql_get_core_dump(struct ql_adapter *qdev)
-{
- if (!ql_own_firmware(qdev)) {
- netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
- return;
- }
-
- if (!netif_running(qdev->ndev)) {
- netif_err(qdev, ifup, qdev->ndev,
- "Force Coredump can only be done from interface that is up\n");
- return;
- }
- ql_queue_fw_error(qdev);
-}
-
-static void ql_gen_reg_dump(struct ql_adapter *qdev,
- struct ql_reg_dump *mpi_coredump)
-{
- int i, status;
-
-
- memset(&(mpi_coredump->mpi_global_header), 0,
- sizeof(struct mpi_coredump_global_header));
- mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
- mpi_coredump->mpi_global_header.headerSize =
- sizeof(struct mpi_coredump_global_header);
- mpi_coredump->mpi_global_header.imageSize =
- sizeof(struct ql_reg_dump);
- strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
- sizeof(mpi_coredump->mpi_global_header.idString));
-
-
- /* segment 16 */
- ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
- MISC_NIC_INFO_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->misc_nic_info),
- "MISC NIC INFO");
- mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
- mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
- mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
- mpi_coredump->misc_nic_info.function = qdev->func;
-
- /* Segment 16, Rev C. Step 18 */
- ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
- NIC1_CONTROL_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic_regs),
- "NIC Registers");
- /* Get generic reg dump */
- for (i = 0; i < 64; i++)
- mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
-
- /* Segment 31 */
- /* Get indexed register values. */
- ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
- INTR_STATES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->intr_states),
- "INTR States");
- ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
-
- ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
- CAM_ENTRIES_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->cam_entries),
- "CAM Entries");
- status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
- if (status)
- return;
-
- ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
- ROUTING_WORDS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->nic_routing_words),
- "Routing Words");
- status = ql_get_routing_entries(qdev,
- &mpi_coredump->nic_routing_words[0]);
- if (status)
- return;
-
- /* Segment 34 (Rev C. step 23) */
- ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
- ETS_SEG_NUM,
- sizeof(struct mpi_coredump_segment_header)
- + sizeof(mpi_coredump->ets),
- "ETS Registers");
- status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
- if (status)
- return;
-}
-
-void ql_get_dump(struct ql_adapter *qdev, void *buff)
-{
- /*
- * If the dump has already been taken and is stored
- * in our internal buffer and if force dump is set then
- * just start the spool to dump it to the log file
- * and also, take a snapshot of the general regs to
- * to the user's buffer or else take complete dump
- * to the user's buffer if force is not set.
- */
-
- if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
- if (!ql_core_dump(qdev, buff))
- ql_soft_reset_mpi_risc(qdev);
- else
- netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
- } else {
- ql_gen_reg_dump(qdev, buff);
- ql_get_core_dump(qdev);
- }
-}
-
-/* Coredump to messages log file using separate worker thread */
-void ql_mpi_core_to_log(struct work_struct *work)
-{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_core_to_log.work);
- u32 *tmp, count;
- int i;
-
- count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
- tmp = (u32 *)qdev->mpi_coredump;
- netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
- "Core is dumping to log file!\n");
-
- for (i = 0; i < count; i += 8) {
- pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x "
- "%.08x %.08x %.08x\n", i,
- tmp[i + 0],
- tmp[i + 1],
- tmp[i + 2],
- tmp[i + 3],
- tmp[i + 4],
- tmp[i + 5],
- tmp[i + 6],
- tmp[i + 7]);
- msleep(5);
- }
-}
-
-#ifdef QL_REG_DUMP
-static void ql_dump_intr_states(struct ql_adapter *qdev)
-{
- int i;
- u32 value;
- for (i = 0; i < qdev->intr_count; i++) {
- ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
- value = ql_read32(qdev, INTR_EN);
- pr_err("%s: Interrupt %d is %s\n",
- qdev->ndev->name, i,
- (value & INTR_EN_EN ? "enabled" : "disabled"));
- }
-}
-
-#define DUMP_XGMAC(qdev, reg) \
-do { \
- u32 data; \
- ql_read_xgmac_reg(qdev, reg, &data); \
- pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
-} while (0)
-
-void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
-{
- if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
- pr_err("%s: Couldn't get xgmac sem\n", __func__);
- return;
- }
- DUMP_XGMAC(qdev, PAUSE_SRC_LO);
- DUMP_XGMAC(qdev, PAUSE_SRC_HI);
- DUMP_XGMAC(qdev, GLOBAL_CFG);
- DUMP_XGMAC(qdev, TX_CFG);
- DUMP_XGMAC(qdev, RX_CFG);
- DUMP_XGMAC(qdev, FLOW_CTL);
- DUMP_XGMAC(qdev, PAUSE_OPCODE);
- DUMP_XGMAC(qdev, PAUSE_TIMER);
- DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
- DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
- DUMP_XGMAC(qdev, MAC_TX_PARAMS);
- DUMP_XGMAC(qdev, MAC_RX_PARAMS);
- DUMP_XGMAC(qdev, MAC_SYS_INT);
- DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
- DUMP_XGMAC(qdev, MAC_MGMT_INT);
- DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
- DUMP_XGMAC(qdev, EXT_ARB_MODE);
- ql_sem_unlock(qdev, qdev->xg_sem_mask);
-}
-
-static void ql_dump_ets_regs(struct ql_adapter *qdev)
-{
-}
-
-static void ql_dump_cam_entries(struct ql_adapter *qdev)
-{
- int i;
- u32 value[3];
-
- i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (i)
- return;
- for (i = 0; i < 4; i++) {
- if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
- pr_err("%s: Failed read of mac index register\n",
- __func__);
- return;
- } else {
- if (value[0])
- pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
- qdev->ndev->name, i, value[1], value[0],
- value[2]);
- }
- }
- for (i = 0; i < 32; i++) {
- if (ql_get_mac_addr_reg
- (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
- pr_err("%s: Failed read of mac index register\n",
- __func__);
- return;
- } else {
- if (value[0])
- pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
- qdev->ndev->name, i, value[1], value[0]);
- }
- }
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
-}
-
-void ql_dump_routing_entries(struct ql_adapter *qdev)
-{
- int i;
- u32 value;
- i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (i)
- return;
- for (i = 0; i < 16; i++) {
- value = 0;
- if (ql_get_routing_reg(qdev, i, &value)) {
- pr_err("%s: Failed read of routing index register\n",
- __func__);
- return;
- } else {
- if (value)
- pr_err("%s: Routing Mask %d = 0x%.08x\n",
- qdev->ndev->name, i, value);
- }
- }
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
-}
-
-#define DUMP_REG(qdev, reg) \
- pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
-
-void ql_dump_regs(struct ql_adapter *qdev)
-{
- pr_err("reg dump for function #%d\n", qdev->func);
- DUMP_REG(qdev, SYS);
- DUMP_REG(qdev, RST_FO);
- DUMP_REG(qdev, FSC);
- DUMP_REG(qdev, CSR);
- DUMP_REG(qdev, ICB_RID);
- DUMP_REG(qdev, ICB_L);
- DUMP_REG(qdev, ICB_H);
- DUMP_REG(qdev, CFG);
- DUMP_REG(qdev, BIOS_ADDR);
- DUMP_REG(qdev, STS);
- DUMP_REG(qdev, INTR_EN);
- DUMP_REG(qdev, INTR_MASK);
- DUMP_REG(qdev, ISR1);
- DUMP_REG(qdev, ISR2);
- DUMP_REG(qdev, ISR3);
- DUMP_REG(qdev, ISR4);
- DUMP_REG(qdev, REV_ID);
- DUMP_REG(qdev, FRC_ECC_ERR);
- DUMP_REG(qdev, ERR_STS);
- DUMP_REG(qdev, RAM_DBG_ADDR);
- DUMP_REG(qdev, RAM_DBG_DATA);
- DUMP_REG(qdev, ECC_ERR_CNT);
- DUMP_REG(qdev, SEM);
- DUMP_REG(qdev, GPIO_1);
- DUMP_REG(qdev, GPIO_2);
- DUMP_REG(qdev, GPIO_3);
- DUMP_REG(qdev, XGMAC_ADDR);
- DUMP_REG(qdev, XGMAC_DATA);
- DUMP_REG(qdev, NIC_ETS);
- DUMP_REG(qdev, CNA_ETS);
- DUMP_REG(qdev, FLASH_ADDR);
- DUMP_REG(qdev, FLASH_DATA);
- DUMP_REG(qdev, CQ_STOP);
- DUMP_REG(qdev, PAGE_TBL_RID);
- DUMP_REG(qdev, WQ_PAGE_TBL_LO);
- DUMP_REG(qdev, WQ_PAGE_TBL_HI);
- DUMP_REG(qdev, CQ_PAGE_TBL_LO);
- DUMP_REG(qdev, CQ_PAGE_TBL_HI);
- DUMP_REG(qdev, COS_DFLT_CQ1);
- DUMP_REG(qdev, COS_DFLT_CQ2);
- DUMP_REG(qdev, SPLT_HDR);
- DUMP_REG(qdev, FC_PAUSE_THRES);
- DUMP_REG(qdev, NIC_PAUSE_THRES);
- DUMP_REG(qdev, FC_ETHERTYPE);
- DUMP_REG(qdev, FC_RCV_CFG);
- DUMP_REG(qdev, NIC_RCV_CFG);
- DUMP_REG(qdev, FC_COS_TAGS);
- DUMP_REG(qdev, NIC_COS_TAGS);
- DUMP_REG(qdev, MGMT_RCV_CFG);
- DUMP_REG(qdev, XG_SERDES_ADDR);
- DUMP_REG(qdev, XG_SERDES_DATA);
- DUMP_REG(qdev, PRB_MX_ADDR);
- DUMP_REG(qdev, PRB_MX_DATA);
- ql_dump_intr_states(qdev);
- ql_dump_xgmac_control_regs(qdev);
- ql_dump_ets_regs(qdev);
- ql_dump_cam_entries(qdev);
- ql_dump_routing_entries(qdev);
-}
-#endif
-
-#ifdef QL_STAT_DUMP
-
-#define DUMP_STAT(qdev, stat) \
- pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
-
-void ql_dump_stat(struct ql_adapter *qdev)
-{
- pr_err("%s: Enter\n", __func__);
- DUMP_STAT(qdev, tx_pkts);
- DUMP_STAT(qdev, tx_bytes);
- DUMP_STAT(qdev, tx_mcast_pkts);
- DUMP_STAT(qdev, tx_bcast_pkts);
- DUMP_STAT(qdev, tx_ucast_pkts);
- DUMP_STAT(qdev, tx_ctl_pkts);
- DUMP_STAT(qdev, tx_pause_pkts);
- DUMP_STAT(qdev, tx_64_pkt);
- DUMP_STAT(qdev, tx_65_to_127_pkt);
- DUMP_STAT(qdev, tx_128_to_255_pkt);
- DUMP_STAT(qdev, tx_256_511_pkt);
- DUMP_STAT(qdev, tx_512_to_1023_pkt);
- DUMP_STAT(qdev, tx_1024_to_1518_pkt);
- DUMP_STAT(qdev, tx_1519_to_max_pkt);
- DUMP_STAT(qdev, tx_undersize_pkt);
- DUMP_STAT(qdev, tx_oversize_pkt);
- DUMP_STAT(qdev, rx_bytes);
- DUMP_STAT(qdev, rx_bytes_ok);
- DUMP_STAT(qdev, rx_pkts);
- DUMP_STAT(qdev, rx_pkts_ok);
- DUMP_STAT(qdev, rx_bcast_pkts);
- DUMP_STAT(qdev, rx_mcast_pkts);
- DUMP_STAT(qdev, rx_ucast_pkts);
- DUMP_STAT(qdev, rx_undersize_pkts);
- DUMP_STAT(qdev, rx_oversize_pkts);
- DUMP_STAT(qdev, rx_jabber_pkts);
- DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
- DUMP_STAT(qdev, rx_drop_events);
- DUMP_STAT(qdev, rx_fcerr_pkts);
- DUMP_STAT(qdev, rx_align_err);
- DUMP_STAT(qdev, rx_symbol_err);
- DUMP_STAT(qdev, rx_mac_err);
- DUMP_STAT(qdev, rx_ctl_pkts);
- DUMP_STAT(qdev, rx_pause_pkts);
- DUMP_STAT(qdev, rx_64_pkts);
- DUMP_STAT(qdev, rx_65_to_127_pkts);
- DUMP_STAT(qdev, rx_128_255_pkts);
- DUMP_STAT(qdev, rx_256_511_pkts);
- DUMP_STAT(qdev, rx_512_to_1023_pkts);
- DUMP_STAT(qdev, rx_1024_to_1518_pkts);
- DUMP_STAT(qdev, rx_1519_to_max_pkts);
- DUMP_STAT(qdev, rx_len_err_pkts);
-};
-#endif
-
-#ifdef QL_DEV_DUMP
-
-#define DUMP_QDEV_FIELD(qdev, type, field) \
- pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
-#define DUMP_QDEV_DMA_FIELD(qdev, field) \
- pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
-#define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
- pr_err("%s[%d].%s = " type "\n", \
- #array, index, #field, qdev->array[index].field);
-void ql_dump_qdev(struct ql_adapter *qdev)
-{
- int i;
- DUMP_QDEV_FIELD(qdev, "%lx", flags);
- DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
- DUMP_QDEV_FIELD(qdev, "%p", pdev);
- DUMP_QDEV_FIELD(qdev, "%p", ndev);
- DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
- DUMP_QDEV_FIELD(qdev, "%p", reg_base);
- DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
- DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
- DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
- DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
- DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
- DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
- DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
- DUMP_QDEV_FIELD(qdev, "%d", intr_count);
- if (qdev->msi_x_entry)
- for (i = 0; i < qdev->intr_count; i++) {
- DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
- DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
- }
- for (i = 0; i < qdev->intr_count; i++) {
- DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
- DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
- DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
- DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
- DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
- DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
- }
- DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
- DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
- DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
- DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
- DUMP_QDEV_FIELD(qdev, "%d", intr_count);
- DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
- DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
- DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
- DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
- DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
- DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
- DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
-}
-#endif
-
-#ifdef QL_CB_DUMP
-void ql_dump_wqicb(struct wqicb *wqicb)
-{
- pr_err("Dumping wqicb stuff...\n");
- pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
- pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags));
- pr_err("wqicb->cq_id_rss = %d\n",
- le16_to_cpu(wqicb->cq_id_rss));
- pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
- pr_err("wqicb->wq_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(wqicb->addr));
- pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
-}
-
-void ql_dump_tx_ring(struct tx_ring *tx_ring)
-{
- if (tx_ring == NULL)
- return;
- pr_err("===================== Dumping tx_ring %d ===============\n",
- tx_ring->wq_id);
- pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
- pr_err("tx_ring->base_dma = 0x%llx\n",
- (unsigned long long) tx_ring->wq_base_dma);
- pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
- tx_ring->cnsmr_idx_sh_reg,
- tx_ring->cnsmr_idx_sh_reg
- ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
- pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
- pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
- pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
- pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
- pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
- pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
- pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
- pr_err("tx_ring->q = %p\n", tx_ring->q);
- pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
-}
-
-void ql_dump_ricb(struct ricb *ricb)
-{
- int i;
- pr_err("===================== Dumping ricb ===============\n");
- pr_err("Dumping ricb stuff...\n");
-
- pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
- pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n",
- ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
- ricb->flags & RSS_L6K ? "RSS_L6K " : "",
- ricb->flags & RSS_LI ? "RSS_LI " : "",
- ricb->flags & RSS_LB ? "RSS_LB " : "",
- ricb->flags & RSS_LM ? "RSS_LM " : "",
- ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
- ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
- ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
- ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
- pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
- for (i = 0; i < 16; i++)
- pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i,
- le32_to_cpu(ricb->hash_cq_id[i]));
- for (i = 0; i < 10; i++)
- pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
- le32_to_cpu(ricb->ipv6_hash_key[i]));
- for (i = 0; i < 4; i++)
- pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
- le32_to_cpu(ricb->ipv4_hash_key[i]));
-}
-
-void ql_dump_cqicb(struct cqicb *cqicb)
-{
- pr_err("Dumping cqicb stuff...\n");
-
- pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect);
- pr_err("cqicb->flags = %x\n", cqicb->flags);
- pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
- pr_err("cqicb->addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(cqicb->addr));
- pr_err("cqicb->prod_idx_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
- pr_err("cqicb->pkt_delay = 0x%.04x\n",
- le16_to_cpu(cqicb->pkt_delay));
- pr_err("cqicb->irq_delay = 0x%.04x\n",
- le16_to_cpu(cqicb->irq_delay));
- pr_err("cqicb->lbq_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
- pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
- le16_to_cpu(cqicb->lbq_buf_size));
- pr_err("cqicb->lbq_len = 0x%.04x\n",
- le16_to_cpu(cqicb->lbq_len));
- pr_err("cqicb->sbq_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
- pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
- le16_to_cpu(cqicb->sbq_buf_size));
- pr_err("cqicb->sbq_len = 0x%.04x\n",
- le16_to_cpu(cqicb->sbq_len));
-}
-
-void ql_dump_rx_ring(struct rx_ring *rx_ring)
-{
- if (rx_ring == NULL)
- return;
- pr_err("===================== Dumping rx_ring %d ===============\n",
- rx_ring->cq_id);
- pr_err("Dumping rx_ring %d, type = %s%s%s\n",
- rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
- rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
- rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
- pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
- pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
- pr_err("rx_ring->cq_base_dma = %llx\n",
- (unsigned long long) rx_ring->cq_base_dma);
- pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
- pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
- pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
- rx_ring->prod_idx_sh_reg,
- rx_ring->prod_idx_sh_reg
- ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
- pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
- (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
- pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
- rx_ring->cnsmr_idx_db_reg);
- pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
- pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
- pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
-
- pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base);
- pr_err("rx_ring->lbq_base_dma = %llx\n",
- (unsigned long long) rx_ring->lbq_base_dma);
- pr_err("rx_ring->lbq_base_indirect = %p\n",
- rx_ring->lbq_base_indirect);
- pr_err("rx_ring->lbq_base_indirect_dma = %llx\n",
- (unsigned long long) rx_ring->lbq_base_indirect_dma);
- pr_err("rx_ring->lbq = %p\n", rx_ring->lbq);
- pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len);
- pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size);
- pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n",
- rx_ring->lbq_prod_idx_db_reg);
- pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx);
- pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx);
- pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
- pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
- pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size);
-
- pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base);
- pr_err("rx_ring->sbq_base_dma = %llx\n",
- (unsigned long long) rx_ring->sbq_base_dma);
- pr_err("rx_ring->sbq_base_indirect = %p\n",
- rx_ring->sbq_base_indirect);
- pr_err("rx_ring->sbq_base_indirect_dma = %llx\n",
- (unsigned long long) rx_ring->sbq_base_indirect_dma);
- pr_err("rx_ring->sbq = %p\n", rx_ring->sbq);
- pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len);
- pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size);
- pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n",
- rx_ring->sbq_prod_idx_db_reg);
- pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx);
- pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
- pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
- pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
- pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
- pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
- pr_err("rx_ring->irq = %d\n", rx_ring->irq);
- pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
- pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
-}
-
-void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
-{
- void *ptr;
-
- pr_err("%s: Enter\n", __func__);
-
- ptr = kmalloc(size, GFP_ATOMIC);
- if (ptr == NULL)
- return;
-
- if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
- pr_err("%s: Failed to upload control block!\n", __func__);
- goto fail_it;
- }
- switch (bit) {
- case CFG_DRQ:
- ql_dump_wqicb((struct wqicb *)ptr);
- break;
- case CFG_DCQ:
- ql_dump_cqicb((struct cqicb *)ptr);
- break;
- case CFG_DR:
- ql_dump_ricb((struct ricb *)ptr);
- break;
- default:
- pr_err("%s: Invalid bit value = %x\n", __func__, bit);
- break;
- }
-fail_it:
- kfree(ptr);
-}
-#endif
-
-#ifdef QL_OB_DUMP
-void ql_dump_tx_desc(struct tx_buf_desc *tbd)
-{
- pr_err("tbd->addr = 0x%llx\n",
- le64_to_cpu((u64) tbd->addr));
- pr_err("tbd->len = %d\n",
- le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
- pr_err("tbd->flags = %s %s\n",
- tbd->len & TX_DESC_C ? "C" : ".",
- tbd->len & TX_DESC_E ? "E" : ".");
- tbd++;
- pr_err("tbd->addr = 0x%llx\n",
- le64_to_cpu((u64) tbd->addr));
- pr_err("tbd->len = %d\n",
- le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
- pr_err("tbd->flags = %s %s\n",
- tbd->len & TX_DESC_C ? "C" : ".",
- tbd->len & TX_DESC_E ? "E" : ".");
- tbd++;
- pr_err("tbd->addr = 0x%llx\n",
- le64_to_cpu((u64) tbd->addr));
- pr_err("tbd->len = %d\n",
- le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
- pr_err("tbd->flags = %s %s\n",
- tbd->len & TX_DESC_C ? "C" : ".",
- tbd->len & TX_DESC_E ? "E" : ".");
-
-}
-
-void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
-{
- struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
- (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
- struct tx_buf_desc *tbd;
- u16 frame_len;
-
- pr_err("%s\n", __func__);
- pr_err("opcode = %s\n",
- (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
- pr_err("flags1 = %s %s %s %s %s\n",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
- ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
- pr_err("flags2 = %s %s %s\n",
- ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
- ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
- ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
- pr_err("flags3 = %s %s %s\n",
- ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
- ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
- ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
- pr_err("tid = %x\n", ob_mac_iocb->tid);
- pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx);
- pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
- if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
- pr_err("frame_len = %d\n",
- le32_to_cpu(ob_mac_tso_iocb->frame_len));
- pr_err("mss = %d\n",
- le16_to_cpu(ob_mac_tso_iocb->mss));
- pr_err("prot_hdr_len = %d\n",
- le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
- pr_err("hdr_offset = 0x%.04x\n",
- le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
- frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
- } else {
- pr_err("frame_len = %d\n",
- le16_to_cpu(ob_mac_iocb->frame_len));
- frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
- }
- tbd = &ob_mac_iocb->tbd[0];
- ql_dump_tx_desc(tbd);
-}
-
-void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
-{
- pr_err("%s\n", __func__);
- pr_err("opcode = %d\n", ob_mac_rsp->opcode);
- pr_err("flags = %s %s %s %s %s %s %s\n",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
- ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
- ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
- pr_err("tid = %x\n", ob_mac_rsp->tid);
-}
-#endif
-
-#ifdef QL_IB_DUMP
-void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
-{
- pr_err("%s\n", __func__);
- pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode);
- pr_err("flags1 = %s%s%s%s%s%s\n",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
- ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
-
- if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
- pr_err("%s%s%s Multicast\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
-
- pr_err("flags2 = %s%s%s%s%s\n",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
-
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
- pr_err("%s%s%s%s%s error\n",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
- IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
-
- pr_err("flags3 = %s%s\n",
- ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
- ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
-
- if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
- pr_err("RSS flags = %s%s%s%s\n",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
- ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
- IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
-
- pr_err("data_len = %d\n",
- le32_to_cpu(ib_mac_rsp->data_len));
- pr_err("data_addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
- if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
- pr_err("rss = %x\n",
- le32_to_cpu(ib_mac_rsp->rss));
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
- pr_err("vlan_id = %x\n",
- le16_to_cpu(ib_mac_rsp->vlan_id));
-
- pr_err("flags4 = %s%s%s\n",
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
-
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
- pr_err("hdr length = %d\n",
- le32_to_cpu(ib_mac_rsp->hdr_len));
- pr_err("hdr addr = 0x%llx\n",
- (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
- }
-}
-#endif
-
-#ifdef QL_ALL_DUMP
-void ql_dump_all(struct ql_adapter *qdev)
-{
- int i;
-
- QL_DUMP_REGS(qdev);
- QL_DUMP_QDEV(qdev);
- for (i = 0; i < qdev->tx_ring_count; i++) {
- QL_DUMP_TX_RING(&qdev->tx_ring[i]);
- QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
- }
- for (i = 0; i < qdev->rx_ring_count; i++) {
- QL_DUMP_RX_RING(&qdev->rx_ring[i]);
- QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
- }
-}
-#endif
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
deleted file mode 100644
index a6886cc5654c..000000000000
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ /dev/null
@@ -1,735 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/pagemap.h>
-#include <linux/sched.h>
-#include <linux/dmapool.h>
-#include <linux/mempool.h>
-#include <linux/spinlock.h>
-#include <linux/kthread.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <net/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
-#include <linux/if_vlan.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-
-
-#include "qlge.h"
-
-struct ql_stats {
- char stat_string[ETH_GSTRING_LEN];
- int sizeof_stat;
- int stat_offset;
-};
-
-#define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m)
-#define QL_OFF(m) offsetof(struct ql_adapter, m)
-
-static const struct ql_stats ql_gstrings_stats[] = {
- {"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)},
- {"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)},
- {"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts),
- QL_OFF(nic_stats.tx_mcast_pkts)},
- {"tx_bcast_pkts", QL_SIZEOF(nic_stats.tx_bcast_pkts),
- QL_OFF(nic_stats.tx_bcast_pkts)},
- {"tx_ucast_pkts", QL_SIZEOF(nic_stats.tx_ucast_pkts),
- QL_OFF(nic_stats.tx_ucast_pkts)},
- {"tx_ctl_pkts", QL_SIZEOF(nic_stats.tx_ctl_pkts),
- QL_OFF(nic_stats.tx_ctl_pkts)},
- {"tx_pause_pkts", QL_SIZEOF(nic_stats.tx_pause_pkts),
- QL_OFF(nic_stats.tx_pause_pkts)},
- {"tx_64_pkts", QL_SIZEOF(nic_stats.tx_64_pkt),
- QL_OFF(nic_stats.tx_64_pkt)},
- {"tx_65_to_127_pkts", QL_SIZEOF(nic_stats.tx_65_to_127_pkt),
- QL_OFF(nic_stats.tx_65_to_127_pkt)},
- {"tx_128_to_255_pkts", QL_SIZEOF(nic_stats.tx_128_to_255_pkt),
- QL_OFF(nic_stats.tx_128_to_255_pkt)},
- {"tx_256_511_pkts", QL_SIZEOF(nic_stats.tx_256_511_pkt),
- QL_OFF(nic_stats.tx_256_511_pkt)},
- {"tx_512_to_1023_pkts", QL_SIZEOF(nic_stats.tx_512_to_1023_pkt),
- QL_OFF(nic_stats.tx_512_to_1023_pkt)},
- {"tx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.tx_1024_to_1518_pkt),
- QL_OFF(nic_stats.tx_1024_to_1518_pkt)},
- {"tx_1519_to_max_pkts", QL_SIZEOF(nic_stats.tx_1519_to_max_pkt),
- QL_OFF(nic_stats.tx_1519_to_max_pkt)},
- {"tx_undersize_pkts", QL_SIZEOF(nic_stats.tx_undersize_pkt),
- QL_OFF(nic_stats.tx_undersize_pkt)},
- {"tx_oversize_pkts", QL_SIZEOF(nic_stats.tx_oversize_pkt),
- QL_OFF(nic_stats.tx_oversize_pkt)},
- {"rx_bytes", QL_SIZEOF(nic_stats.rx_bytes), QL_OFF(nic_stats.rx_bytes)},
- {"rx_bytes_ok", QL_SIZEOF(nic_stats.rx_bytes_ok),
- QL_OFF(nic_stats.rx_bytes_ok)},
- {"rx_pkts", QL_SIZEOF(nic_stats.rx_pkts), QL_OFF(nic_stats.rx_pkts)},
- {"rx_pkts_ok", QL_SIZEOF(nic_stats.rx_pkts_ok),
- QL_OFF(nic_stats.rx_pkts_ok)},
- {"rx_bcast_pkts", QL_SIZEOF(nic_stats.rx_bcast_pkts),
- QL_OFF(nic_stats.rx_bcast_pkts)},
- {"rx_mcast_pkts", QL_SIZEOF(nic_stats.rx_mcast_pkts),
- QL_OFF(nic_stats.rx_mcast_pkts)},
- {"rx_ucast_pkts", QL_SIZEOF(nic_stats.rx_ucast_pkts),
- QL_OFF(nic_stats.rx_ucast_pkts)},
- {"rx_undersize_pkts", QL_SIZEOF(nic_stats.rx_undersize_pkts),
- QL_OFF(nic_stats.rx_undersize_pkts)},
- {"rx_oversize_pkts", QL_SIZEOF(nic_stats.rx_oversize_pkts),
- QL_OFF(nic_stats.rx_oversize_pkts)},
- {"rx_jabber_pkts", QL_SIZEOF(nic_stats.rx_jabber_pkts),
- QL_OFF(nic_stats.rx_jabber_pkts)},
- {"rx_undersize_fcerr_pkts",
- QL_SIZEOF(nic_stats.rx_undersize_fcerr_pkts),
- QL_OFF(nic_stats.rx_undersize_fcerr_pkts)},
- {"rx_drop_events", QL_SIZEOF(nic_stats.rx_drop_events),
- QL_OFF(nic_stats.rx_drop_events)},
- {"rx_fcerr_pkts", QL_SIZEOF(nic_stats.rx_fcerr_pkts),
- QL_OFF(nic_stats.rx_fcerr_pkts)},
- {"rx_align_err", QL_SIZEOF(nic_stats.rx_align_err),
- QL_OFF(nic_stats.rx_align_err)},
- {"rx_symbol_err", QL_SIZEOF(nic_stats.rx_symbol_err),
- QL_OFF(nic_stats.rx_symbol_err)},
- {"rx_mac_err", QL_SIZEOF(nic_stats.rx_mac_err),
- QL_OFF(nic_stats.rx_mac_err)},
- {"rx_ctl_pkts", QL_SIZEOF(nic_stats.rx_ctl_pkts),
- QL_OFF(nic_stats.rx_ctl_pkts)},
- {"rx_pause_pkts", QL_SIZEOF(nic_stats.rx_pause_pkts),
- QL_OFF(nic_stats.rx_pause_pkts)},
- {"rx_64_pkts", QL_SIZEOF(nic_stats.rx_64_pkts),
- QL_OFF(nic_stats.rx_64_pkts)},
- {"rx_65_to_127_pkts", QL_SIZEOF(nic_stats.rx_65_to_127_pkts),
- QL_OFF(nic_stats.rx_65_to_127_pkts)},
- {"rx_128_255_pkts", QL_SIZEOF(nic_stats.rx_128_255_pkts),
- QL_OFF(nic_stats.rx_128_255_pkts)},
- {"rx_256_511_pkts", QL_SIZEOF(nic_stats.rx_256_511_pkts),
- QL_OFF(nic_stats.rx_256_511_pkts)},
- {"rx_512_to_1023_pkts", QL_SIZEOF(nic_stats.rx_512_to_1023_pkts),
- QL_OFF(nic_stats.rx_512_to_1023_pkts)},
- {"rx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.rx_1024_to_1518_pkts),
- QL_OFF(nic_stats.rx_1024_to_1518_pkts)},
- {"rx_1519_to_max_pkts", QL_SIZEOF(nic_stats.rx_1519_to_max_pkts),
- QL_OFF(nic_stats.rx_1519_to_max_pkts)},
- {"rx_len_err_pkts", QL_SIZEOF(nic_stats.rx_len_err_pkts),
- QL_OFF(nic_stats.rx_len_err_pkts)},
- {"rx_code_err", QL_SIZEOF(nic_stats.rx_code_err),
- QL_OFF(nic_stats.rx_code_err)},
- {"rx_oversize_err", QL_SIZEOF(nic_stats.rx_oversize_err),
- QL_OFF(nic_stats.rx_oversize_err)},
- {"rx_undersize_err", QL_SIZEOF(nic_stats.rx_undersize_err),
- QL_OFF(nic_stats.rx_undersize_err)},
- {"rx_preamble_err", QL_SIZEOF(nic_stats.rx_preamble_err),
- QL_OFF(nic_stats.rx_preamble_err)},
- {"rx_frame_len_err", QL_SIZEOF(nic_stats.rx_frame_len_err),
- QL_OFF(nic_stats.rx_frame_len_err)},
- {"rx_crc_err", QL_SIZEOF(nic_stats.rx_crc_err),
- QL_OFF(nic_stats.rx_crc_err)},
- {"rx_err_count", QL_SIZEOF(nic_stats.rx_err_count),
- QL_OFF(nic_stats.rx_err_count)},
- {"tx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames0),
- QL_OFF(nic_stats.tx_cbfc_pause_frames0)},
- {"tx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames1),
- QL_OFF(nic_stats.tx_cbfc_pause_frames1)},
- {"tx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames2),
- QL_OFF(nic_stats.tx_cbfc_pause_frames2)},
- {"tx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames3),
- QL_OFF(nic_stats.tx_cbfc_pause_frames3)},
- {"tx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames4),
- QL_OFF(nic_stats.tx_cbfc_pause_frames4)},
- {"tx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames5),
- QL_OFF(nic_stats.tx_cbfc_pause_frames5)},
- {"tx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames6),
- QL_OFF(nic_stats.tx_cbfc_pause_frames6)},
- {"tx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames7),
- QL_OFF(nic_stats.tx_cbfc_pause_frames7)},
- {"rx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames0),
- QL_OFF(nic_stats.rx_cbfc_pause_frames0)},
- {"rx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames1),
- QL_OFF(nic_stats.rx_cbfc_pause_frames1)},
- {"rx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames2),
- QL_OFF(nic_stats.rx_cbfc_pause_frames2)},
- {"rx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames3),
- QL_OFF(nic_stats.rx_cbfc_pause_frames3)},
- {"rx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames4),
- QL_OFF(nic_stats.rx_cbfc_pause_frames4)},
- {"rx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames5),
- QL_OFF(nic_stats.rx_cbfc_pause_frames5)},
- {"rx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames6),
- QL_OFF(nic_stats.rx_cbfc_pause_frames6)},
- {"rx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames7),
- QL_OFF(nic_stats.rx_cbfc_pause_frames7)},
- {"rx_nic_fifo_drop", QL_SIZEOF(nic_stats.rx_nic_fifo_drop),
- QL_OFF(nic_stats.rx_nic_fifo_drop)},
-};
-
-static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
- "Loopback test (offline)"
-};
-#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
-#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
-#define QLGE_RCV_MAC_ERR_STATS 7
-
-static int ql_update_ring_coalescing(struct ql_adapter *qdev)
-{
- int i, status = 0;
- struct rx_ring *rx_ring;
- struct cqicb *cqicb;
-
- if (!netif_running(qdev->ndev))
- return status;
-
- /* Skip the default queue, and update the outbound handler
- * queues if they changed.
- */
- cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count];
- if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs ||
- le16_to_cpu(cqicb->pkt_delay) !=
- qdev->tx_max_coalesced_frames) {
- for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- cqicb = (struct cqicb *)rx_ring;
- cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
- cqicb->pkt_delay =
- cpu_to_le16(qdev->tx_max_coalesced_frames);
- cqicb->flags = FLAGS_LI;
- status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
- CFG_LCQ, rx_ring->cq_id);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to load CQICB.\n");
- goto exit;
- }
- }
- }
-
- /* Update the inbound (RSS) handler queues if they changed. */
- cqicb = (struct cqicb *)&qdev->rx_ring[0];
- if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs ||
- le16_to_cpu(cqicb->pkt_delay) !=
- qdev->rx_max_coalesced_frames) {
- for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
- rx_ring = &qdev->rx_ring[i];
- cqicb = (struct cqicb *)rx_ring;
- cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
- cqicb->pkt_delay =
- cpu_to_le16(qdev->rx_max_coalesced_frames);
- cqicb->flags = FLAGS_LI;
- status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
- CFG_LCQ, rx_ring->cq_id);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to load CQICB.\n");
- goto exit;
- }
- }
- }
-exit:
- return status;
-}
-
-static void ql_update_stats(struct ql_adapter *qdev)
-{
- u32 i;
- u64 data;
- u64 *iter = &qdev->nic_stats.tx_pkts;
-
- spin_lock(&qdev->stats_lock);
- if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
- netif_err(qdev, drv, qdev->ndev,
- "Couldn't get xgmac sem.\n");
- goto quit;
- }
- /*
- * Get TX statistics.
- */
- for (i = 0x200; i < 0x280; i += 8) {
- if (ql_read_xgmac_reg64(qdev, i, &data)) {
- netif_err(qdev, drv, qdev->ndev,
- "Error reading status register 0x%.04x.\n",
- i);
- goto end;
- } else
- *iter = data;
- iter++;
- }
-
- /*
- * Get RX statistics.
- */
- for (i = 0x300; i < 0x3d0; i += 8) {
- if (ql_read_xgmac_reg64(qdev, i, &data)) {
- netif_err(qdev, drv, qdev->ndev,
- "Error reading status register 0x%.04x.\n",
- i);
- goto end;
- } else
- *iter = data;
- iter++;
- }
-
- /* Update receive mac error statistics */
- iter += QLGE_RCV_MAC_ERR_STATS;
-
- /*
- * Get Per-priority TX pause frame counter statistics.
- */
- for (i = 0x500; i < 0x540; i += 8) {
- if (ql_read_xgmac_reg64(qdev, i, &data)) {
- netif_err(qdev, drv, qdev->ndev,
- "Error reading status register 0x%.04x.\n",
- i);
- goto end;
- } else
- *iter = data;
- iter++;
- }
-
- /*
- * Get Per-priority RX pause frame counter statistics.
- */
- for (i = 0x568; i < 0x5a8; i += 8) {
- if (ql_read_xgmac_reg64(qdev, i, &data)) {
- netif_err(qdev, drv, qdev->ndev,
- "Error reading status register 0x%.04x.\n",
- i);
- goto end;
- } else
- *iter = data;
- iter++;
- }
-
- /*
- * Get RX NIC FIFO DROP statistics.
- */
- if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
- netif_err(qdev, drv, qdev->ndev,
- "Error reading status register 0x%.04x.\n", i);
- goto end;
- } else
- *iter = data;
-end:
- ql_sem_unlock(qdev, qdev->xg_sem_mask);
-quit:
- spin_unlock(&qdev->stats_lock);
-
- QL_DUMP_STAT(qdev);
-}
-
-static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
-{
- int index;
- switch (stringset) {
- case ETH_SS_TEST:
- memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
- break;
- case ETH_SS_STATS:
- for (index = 0; index < QLGE_STATS_LEN; index++) {
- memcpy(buf + index * ETH_GSTRING_LEN,
- ql_gstrings_stats[index].stat_string,
- ETH_GSTRING_LEN);
- }
- break;
- }
-}
-
-static int ql_get_sset_count(struct net_device *dev, int sset)
-{
- switch (sset) {
- case ETH_SS_TEST:
- return QLGE_TEST_LEN;
- case ETH_SS_STATS:
- return QLGE_STATS_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void
-ql_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- int index, length;
-
- length = QLGE_STATS_LEN;
- ql_update_stats(qdev);
-
- for (index = 0; index < length; index++) {
- char *p = (char *)qdev +
- ql_gstrings_stats[index].stat_offset;
- *data++ = (ql_gstrings_stats[index].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
- }
-}
-
-static int ql_get_link_ksettings(struct net_device *ndev,
- struct ethtool_link_ksettings *ecmd)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- u32 supported, advertising;
-
- supported = SUPPORTED_10000baseT_Full;
- advertising = ADVERTISED_10000baseT_Full;
-
- if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
- STS_LINK_TYPE_10GBASET) {
- supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
- advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
- ecmd->base.port = PORT_TP;
- ecmd->base.autoneg = AUTONEG_ENABLE;
- } else {
- supported |= SUPPORTED_FIBRE;
- advertising |= ADVERTISED_FIBRE;
- ecmd->base.port = PORT_FIBRE;
- }
-
- ecmd->base.speed = SPEED_10000;
- ecmd->base.duplex = DUPLEX_FULL;
-
- ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
- advertising);
-
- return 0;
-}
-
-static void ql_get_drvinfo(struct net_device *ndev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, qlge_driver_version,
- sizeof(drvinfo->version));
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "v%d.%d.%d",
- (qdev->fw_rev_id & 0x00ff0000) >> 16,
- (qdev->fw_rev_id & 0x0000ff00) >> 8,
- (qdev->fw_rev_id & 0x000000ff));
- strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
- sizeof(drvinfo->bus_info));
-}
-
-static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- unsigned short ssys_dev = qdev->pdev->subsystem_device;
-
- /* WOL is only supported for mezz card. */
- if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 ||
- ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
- wol->supported = WAKE_MAGIC;
- wol->wolopts = qdev->wol;
- }
-}
-
-static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- unsigned short ssys_dev = qdev->pdev->subsystem_device;
-
- /* WOL is only supported for mezz card. */
- if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 &&
- ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
- netif_info(qdev, drv, qdev->ndev,
- "WOL is only supported for mezz card\n");
- return -EOPNOTSUPP;
- }
- if (wol->wolopts & ~WAKE_MAGIC)
- return -EINVAL;
- qdev->wol = wol->wolopts;
-
- netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
- return 0;
-}
-
-static int ql_set_phys_id(struct net_device *ndev,
- enum ethtool_phys_id_state state)
-
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- /* Save the current LED settings */
- if (ql_mb_get_led_cfg(qdev))
- return -EIO;
-
- /* Start blinking */
- ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
- return 0;
-
- case ETHTOOL_ID_INACTIVE:
- /* Restore LED settings */
- if (ql_mb_set_led_cfg(qdev, qdev->led_config))
- return -EIO;
- return 0;
-
- default:
- return -EINVAL;
- }
-}
-
-static int ql_start_loopback(struct ql_adapter *qdev)
-{
- if (netif_carrier_ok(qdev->ndev)) {
- set_bit(QL_LB_LINK_UP, &qdev->flags);
- netif_carrier_off(qdev->ndev);
- } else
- clear_bit(QL_LB_LINK_UP, &qdev->flags);
- qdev->link_config |= CFG_LOOPBACK_PCS;
- return ql_mb_set_port_cfg(qdev);
-}
-
-static void ql_stop_loopback(struct ql_adapter *qdev)
-{
- qdev->link_config &= ~CFG_LOOPBACK_PCS;
- ql_mb_set_port_cfg(qdev);
- if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
- netif_carrier_on(qdev->ndev);
- clear_bit(QL_LB_LINK_UP, &qdev->flags);
- }
-}
-
-static void ql_create_lb_frame(struct sk_buff *skb,
- unsigned int frame_size)
-{
- memset(skb->data, 0xFF, frame_size);
- frame_size &= ~1;
- memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
- memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
- memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
-}
-
-void ql_check_lb_frame(struct ql_adapter *qdev,
- struct sk_buff *skb)
-{
- unsigned int frame_size = skb->len;
-
- if ((*(skb->data + 3) == 0xFF) &&
- (*(skb->data + frame_size / 2 + 10) == 0xBE) &&
- (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
- atomic_dec(&qdev->lb_count);
- return;
- }
-}
-
-static int ql_run_loopback_test(struct ql_adapter *qdev)
-{
- int i;
- netdev_tx_t rc;
- struct sk_buff *skb;
- unsigned int size = SMALL_BUF_MAP_SIZE;
-
- for (i = 0; i < 64; i++) {
- skb = netdev_alloc_skb(qdev->ndev, size);
- if (!skb)
- return -ENOMEM;
-
- skb->queue_mapping = 0;
- skb_put(skb, size);
- ql_create_lb_frame(skb, size);
- rc = ql_lb_send(skb, qdev->ndev);
- if (rc != NETDEV_TX_OK)
- return -EPIPE;
- atomic_inc(&qdev->lb_count);
- }
- /* Give queue time to settle before testing results. */
- msleep(2);
- ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
- return atomic_read(&qdev->lb_count) ? -EIO : 0;
-}
-
-static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
-{
- *data = ql_start_loopback(qdev);
- if (*data)
- goto out;
- *data = ql_run_loopback_test(qdev);
-out:
- ql_stop_loopback(qdev);
- return *data;
-}
-
-static void ql_self_test(struct net_device *ndev,
- struct ethtool_test *eth_test, u64 *data)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- memset(data, 0, sizeof(u64) * QLGE_TEST_LEN);
-
- if (netif_running(ndev)) {
- set_bit(QL_SELFTEST, &qdev->flags);
- if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
- /* Offline tests */
- if (ql_loopback_test(qdev, &data[0]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
- } else {
- /* Online tests */
- data[0] = 0;
- }
- clear_bit(QL_SELFTEST, &qdev->flags);
- /* Give link time to come up after
- * port configuration changes.
- */
- msleep_interruptible(4 * 1000);
- } else {
- netif_err(qdev, drv, qdev->ndev,
- "is down, Loopback test will fail.\n");
- eth_test->flags |= ETH_TEST_FL_FAILED;
- }
-}
-
-static int ql_get_regs_len(struct net_device *ndev)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
- return sizeof(struct ql_mpi_coredump);
- else
- return sizeof(struct ql_reg_dump);
-}
-
-static void ql_get_regs(struct net_device *ndev,
- struct ethtool_regs *regs, void *p)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- ql_get_dump(qdev, p);
- qdev->core_is_dumped = 0;
- if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
- regs->len = sizeof(struct ql_mpi_coredump);
- else
- regs->len = sizeof(struct ql_reg_dump);
-}
-
-static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
-{
- struct ql_adapter *qdev = netdev_priv(dev);
-
- c->rx_coalesce_usecs = qdev->rx_coalesce_usecs;
- c->tx_coalesce_usecs = qdev->tx_coalesce_usecs;
-
- /* This chip coalesces as follows:
- * If a packet arrives, hold off interrupts until
- * cqicb->int_delay expires, but if no other packets arrive don't
- * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
- * timer to coalesce on a frame basis. So, we have to take ethtool's
- * max_coalesced_frames value and convert it to a delay in microseconds.
- * We do this by using a basic thoughput of 1,000,000 frames per
- * second @ (1024 bytes). This means one frame per usec. So it's a
- * simple one to one ratio.
- */
- c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames;
- c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames;
-
- return 0;
-}
-
-static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- /* Validate user parameters. */
- if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2)
- return -EINVAL;
- /* Don't wait more than 10 usec. */
- if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
- return -EINVAL;
- if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2)
- return -EINVAL;
- if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT)
- return -EINVAL;
-
- /* Verify a change took place before updating the hardware. */
- if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs &&
- qdev->tx_coalesce_usecs == c->tx_coalesce_usecs &&
- qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames &&
- qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames)
- return 0;
-
- qdev->rx_coalesce_usecs = c->rx_coalesce_usecs;
- qdev->tx_coalesce_usecs = c->tx_coalesce_usecs;
- qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames;
- qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames;
-
- return ql_update_ring_coalescing(qdev);
-}
-
-static void ql_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct ql_adapter *qdev = netdev_priv(netdev);
-
- ql_mb_get_port_cfg(qdev);
- if (qdev->link_config & CFG_PAUSE_STD) {
- pause->rx_pause = 1;
- pause->tx_pause = 1;
- }
-}
-
-static int ql_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct ql_adapter *qdev = netdev_priv(netdev);
- int status = 0;
-
- if ((pause->rx_pause) && (pause->tx_pause))
- qdev->link_config |= CFG_PAUSE_STD;
- else if (!pause->rx_pause && !pause->tx_pause)
- qdev->link_config &= ~CFG_PAUSE_STD;
- else
- return -EINVAL;
-
- status = ql_mb_set_port_cfg(qdev);
- return status;
-}
-
-static u32 ql_get_msglevel(struct net_device *ndev)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- return qdev->msg_enable;
-}
-
-static void ql_set_msglevel(struct net_device *ndev, u32 value)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- qdev->msg_enable = value;
-}
-
-const struct ethtool_ops qlge_ethtool_ops = {
- .get_drvinfo = ql_get_drvinfo,
- .get_wol = ql_get_wol,
- .set_wol = ql_set_wol,
- .get_regs_len = ql_get_regs_len,
- .get_regs = ql_get_regs,
- .get_msglevel = ql_get_msglevel,
- .set_msglevel = ql_set_msglevel,
- .get_link = ethtool_op_get_link,
- .set_phys_id = ql_set_phys_id,
- .self_test = ql_self_test,
- .get_pauseparam = ql_get_pauseparam,
- .set_pauseparam = ql_set_pauseparam,
- .get_coalesce = ql_get_coalesce,
- .set_coalesce = ql_set_coalesce,
- .get_sset_count = ql_get_sset_count,
- .get_strings = ql_get_strings,
- .get_ethtool_stats = ql_get_ethtool_stats,
- .get_link_ksettings = ql_get_link_ksettings,
-};
-
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
deleted file mode 100644
index 6cae33072496..000000000000
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ /dev/null
@@ -1,5027 +0,0 @@
-/*
- * QLogic qlge NIC HBA Driver
- * Copyright (c) 2003-2008 QLogic Corporation
- * See LICENSE.qlge for copyright and licensing details.
- * Author: Linux qlge network device driver by
- * Ron Mercer <ron.mercer@qlogic.com>
- */
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/list.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/pagemap.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/dmapool.h>
-#include <linux/mempool.h>
-#include <linux/spinlock.h>
-#include <linux/kthread.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <net/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/prefetch.h>
-#include <net/ip6_checksum.h>
-
-#include "qlge.h"
-
-char qlge_driver_name[] = DRV_NAME;
-const char qlge_driver_version[] = DRV_VERSION;
-
-MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
-MODULE_DESCRIPTION(DRV_STRING " ");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-static const u32 default_msg =
- NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
-/* NETIF_MSG_TIMER | */
- NETIF_MSG_IFDOWN |
- NETIF_MSG_IFUP |
- NETIF_MSG_RX_ERR |
- NETIF_MSG_TX_ERR |
-/* NETIF_MSG_TX_QUEUED | */
-/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
-/* NETIF_MSG_PKTDATA | */
- NETIF_MSG_HW | NETIF_MSG_WOL | 0;
-
-static int debug = -1; /* defaults above */
-module_param(debug, int, 0664);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-
-#define MSIX_IRQ 0
-#define MSI_IRQ 1
-#define LEG_IRQ 2
-static int qlge_irq_type = MSIX_IRQ;
-module_param(qlge_irq_type, int, 0664);
-MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
-
-static int qlge_mpi_coredump;
-module_param(qlge_mpi_coredump, int, 0);
-MODULE_PARM_DESC(qlge_mpi_coredump,
- "Option to enable MPI firmware dump. "
- "Default is OFF - Do Not allocate memory. ");
-
-static int qlge_force_coredump;
-module_param(qlge_force_coredump, int, 0);
-MODULE_PARM_DESC(qlge_force_coredump,
- "Option to allow force of firmware core dump. "
- "Default is OFF - Do not allow.");
-
-static const struct pci_device_id qlge_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
- {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
- /* required last entry */
- {0,}
-};
-
-MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
-
-static int ql_wol(struct ql_adapter *);
-static void qlge_set_multicast_list(struct net_device *);
-static int ql_adapter_down(struct ql_adapter *);
-static int ql_adapter_up(struct ql_adapter *);
-
-/* This hardware semaphore causes exclusive access to
- * resources shared between the NIC driver, MPI firmware,
- * FCOE firmware and the FC driver.
- */
-static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
-{
- u32 sem_bits = 0;
-
- switch (sem_mask) {
- case SEM_XGMAC0_MASK:
- sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
- break;
- case SEM_XGMAC1_MASK:
- sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
- break;
- case SEM_ICB_MASK:
- sem_bits = SEM_SET << SEM_ICB_SHIFT;
- break;
- case SEM_MAC_ADDR_MASK:
- sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
- break;
- case SEM_FLASH_MASK:
- sem_bits = SEM_SET << SEM_FLASH_SHIFT;
- break;
- case SEM_PROBE_MASK:
- sem_bits = SEM_SET << SEM_PROBE_SHIFT;
- break;
- case SEM_RT_IDX_MASK:
- sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
- break;
- case SEM_PROC_REG_MASK:
- sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
- break;
- default:
- netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
- return -EINVAL;
- }
-
- ql_write32(qdev, SEM, sem_bits | sem_mask);
- return !(ql_read32(qdev, SEM) & sem_bits);
-}
-
-int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
-{
- unsigned int wait_count = 30;
- do {
- if (!ql_sem_trylock(qdev, sem_mask))
- return 0;
- udelay(100);
- } while (--wait_count);
- return -ETIMEDOUT;
-}
-
-void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
-{
- ql_write32(qdev, SEM, sem_mask);
- ql_read32(qdev, SEM); /* flush */
-}
-
-/* This function waits for a specific bit to come ready
- * in a given register. It is used mostly by the initialize
- * process, but is also used in kernel thread API such as
- * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
- */
-int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
-{
- u32 temp;
- int count = UDELAY_COUNT;
-
- while (count) {
- temp = ql_read32(qdev, reg);
-
- /* check for errors */
- if (temp & err_bit) {
- netif_alert(qdev, probe, qdev->ndev,
- "register 0x%.08x access error, value = 0x%.08x!.\n",
- reg, temp);
- return -EIO;
- } else if (temp & bit)
- return 0;
- udelay(UDELAY_DELAY);
- count--;
- }
- netif_alert(qdev, probe, qdev->ndev,
- "Timed out waiting for reg %x to come ready.\n", reg);
- return -ETIMEDOUT;
-}
-
-/* The CFG register is used to download TX and RX control blocks
- * to the chip. This function waits for an operation to complete.
- */
-static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
-{
- int count = UDELAY_COUNT;
- u32 temp;
-
- while (count) {
- temp = ql_read32(qdev, CFG);
- if (temp & CFG_LE)
- return -EIO;
- if (!(temp & bit))
- return 0;
- udelay(UDELAY_DELAY);
- count--;
- }
- return -ETIMEDOUT;
-}
-
-
-/* Used to issue init control blocks to hw. Maps control block,
- * sets address, triggers download, waits for completion.
- */
-int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
- u16 q_id)
-{
- u64 map;
- int status = 0;
- int direction;
- u32 mask;
- u32 value;
-
- direction =
- (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE;
-
- map = pci_map_single(qdev->pdev, ptr, size, direction);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
- return -ENOMEM;
- }
-
- status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
- if (status)
- return status;
-
- status = ql_wait_cfg(qdev, bit);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Timed out waiting for CFG to come ready.\n");
- goto exit;
- }
-
- ql_write32(qdev, ICB_L, (u32) map);
- ql_write32(qdev, ICB_H, (u32) (map >> 32));
-
- mask = CFG_Q_MASK | (bit << 16);
- value = bit | (q_id << CFG_Q_SHIFT);
- ql_write32(qdev, CFG, (mask | value));
-
- /*
- * Wait for the bit to clear after signaling hw.
- */
- status = ql_wait_cfg(qdev, bit);
-exit:
- ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
- pci_unmap_single(qdev->pdev, map, size, direction);
- return status;
-}
-
-/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
-int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
- u32 *value)
-{
- u32 offset = 0;
- int status;
-
- switch (type) {
- case MAC_ADDR_TYPE_MULTI_MAC:
- case MAC_ADDR_TYPE_CAM_MAC:
- {
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MR, 0);
- if (status)
- goto exit;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MR, 0);
- if (status)
- goto exit;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
- if (type == MAC_ADDR_TYPE_CAM_MAC) {
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
- status =
- ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
- MAC_ADDR_MR, 0);
- if (status)
- goto exit;
- *value++ = ql_read32(qdev, MAC_ADDR_DATA);
- }
- break;
- }
- case MAC_ADDR_TYPE_VLAN:
- case MAC_ADDR_TYPE_MULTI_FLTR:
- default:
- netif_crit(qdev, ifup, qdev->ndev,
- "Address type %d not yet supported.\n", type);
- status = -EPERM;
- }
-exit:
- return status;
-}
-
-/* Set up a MAC, multicast or VLAN address for the
- * inbound frame matching.
- */
-static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
- u16 index)
-{
- u32 offset = 0;
- int status = 0;
-
- switch (type) {
- case MAC_ADDR_TYPE_MULTI_MAC:
- {
- u32 upper = (addr[0] << 8) | addr[1];
- u32 lower = (addr[2] << 24) | (addr[3] << 16) |
- (addr[4] << 8) | (addr[5]);
-
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
- (index << MAC_ADDR_IDX_SHIFT) |
- type | MAC_ADDR_E);
- ql_write32(qdev, MAC_ADDR_DATA, lower);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
- (index << MAC_ADDR_IDX_SHIFT) |
- type | MAC_ADDR_E);
-
- ql_write32(qdev, MAC_ADDR_DATA, upper);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- break;
- }
- case MAC_ADDR_TYPE_CAM_MAC:
- {
- u32 cam_output;
- u32 upper = (addr[0] << 8) | addr[1];
- u32 lower =
- (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
- (addr[5]);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- ql_write32(qdev, MAC_ADDR_DATA, lower);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- ql_write32(qdev, MAC_ADDR_DATA, upper);
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type); /* type */
- /* This field should also include the queue id
- and possibly the function id. Right now we hardcode
- the route field to NIC core.
- */
- cam_output = (CAM_OUT_ROUTE_NIC |
- (qdev->
- func << CAM_OUT_FUNC_SHIFT) |
- (0 << CAM_OUT_CQ_ID_SHIFT));
- if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
- cam_output |= CAM_OUT_RV;
- /* route to NIC core */
- ql_write32(qdev, MAC_ADDR_DATA, cam_output);
- break;
- }
- case MAC_ADDR_TYPE_VLAN:
- {
- u32 enable_bit = *((u32 *) &addr[0]);
- /* For VLAN, the addr actually holds a bit that
- * either enables or disables the vlan id we are
- * addressing. It's either MAC_ADDR_E on or off.
- * That's bit-27 we're talking about.
- */
- status =
- ql_wait_reg_rdy(qdev,
- MAC_ADDR_IDX, MAC_ADDR_MW, 0);
- if (status)
- goto exit;
- ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
- (index << MAC_ADDR_IDX_SHIFT) | /* index */
- type | /* type */
- enable_bit); /* enable/disable */
- break;
- }
- case MAC_ADDR_TYPE_MULTI_FLTR:
- default:
- netif_crit(qdev, ifup, qdev->ndev,
- "Address type %d not yet supported.\n", type);
- status = -EPERM;
- }
-exit:
- return status;
-}
-
-/* Set or clear MAC address in hardware. We sometimes
- * have to clear it to prevent wrong frame routing
- * especially in a bonding environment.
- */
-static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
-{
- int status;
- char zero_mac_addr[ETH_ALEN];
- char *addr;
-
- if (set) {
- addr = &qdev->current_mac_addr[0];
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Set Mac addr %pM\n", addr);
- } else {
- eth_zero_addr(zero_mac_addr);
- addr = &zero_mac_addr[0];
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Clearing MAC address\n");
- }
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
- status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
- MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init mac address.\n");
- return status;
-}
-
-void ql_link_on(struct ql_adapter *qdev)
-{
- netif_err(qdev, link, qdev->ndev, "Link is up.\n");
- netif_carrier_on(qdev->ndev);
- ql_set_mac_addr(qdev, 1);
-}
-
-void ql_link_off(struct ql_adapter *qdev)
-{
- netif_err(qdev, link, qdev->ndev, "Link is down.\n");
- netif_carrier_off(qdev->ndev);
- ql_set_mac_addr(qdev, 0);
-}
-
-/* Get a specific frame routing value from the CAM.
- * Used for debug and reg dump.
- */
-int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
-{
- int status = 0;
-
- status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
- if (status)
- goto exit;
-
- ql_write32(qdev, RT_IDX,
- RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
- status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
- if (status)
- goto exit;
- *value = ql_read32(qdev, RT_DATA);
-exit:
- return status;
-}
-
-/* The NIC function for this chip has 16 routing indexes. Each one can be used
- * to route different frame types to various inbound queues. We send broadcast/
- * multicast/error frames to the default queue for slow handling,
- * and CAM hit/RSS frames to the fast handling queues.
- */
-static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
- int enable)
-{
- int status = -EINVAL; /* Return error if no mask match. */
- u32 value = 0;
-
- switch (mask) {
- case RT_IDX_CAM_HIT:
- {
- value = RT_IDX_DST_CAM_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_VALID: /* Promiscuous Mode frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_IP_CSUM_ERR_SLOT <<
- RT_IDX_IDX_SHIFT); /* index */
- break;
- }
- case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
- RT_IDX_IDX_SHIFT); /* index */
- break;
- }
- case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_MCAST: /* Pass up All Multicast frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
- {
- value = RT_IDX_DST_RSS | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- case 0: /* Clear the E-bit on an entry. */
- {
- value = RT_IDX_DST_DFLT_Q | /* dest */
- RT_IDX_TYPE_NICQ | /* type */
- (index << RT_IDX_IDX_SHIFT);/* index */
- break;
- }
- default:
- netif_err(qdev, ifup, qdev->ndev,
- "Mask type %d not yet supported.\n", mask);
- status = -EPERM;
- goto exit;
- }
-
- if (value) {
- status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
- if (status)
- goto exit;
- value |= (enable ? RT_IDX_E : 0);
- ql_write32(qdev, RT_IDX, value);
- ql_write32(qdev, RT_DATA, enable ? mask : 0);
- }
-exit:
- return status;
-}
-
-static void ql_enable_interrupts(struct ql_adapter *qdev)
-{
- ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
-}
-
-static void ql_disable_interrupts(struct ql_adapter *qdev)
-{
- ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
-}
-
-/* If we're running with multiple MSI-X vectors then we enable on the fly.
- * Otherwise, we may have multiple outstanding workers and don't want to
- * enable until the last one finishes. In this case, the irq_cnt gets
- * incremented every time we queue a worker and decremented every time
- * a worker finishes. Once it hits zero we enable the interrupt.
- */
-u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
-{
- u32 var = 0;
- unsigned long hw_flags = 0;
- struct intr_context *ctx = qdev->intr_context + intr;
-
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
- /* Always enable if we're MSIX multi interrupts and
- * it's not the default (zeroeth) interrupt.
- */
- ql_write32(qdev, INTR_EN,
- ctx->intr_en_mask);
- var = ql_read32(qdev, STS);
- return var;
- }
-
- spin_lock_irqsave(&qdev->hw_lock, hw_flags);
- if (atomic_dec_and_test(&ctx->irq_cnt)) {
- ql_write32(qdev, INTR_EN,
- ctx->intr_en_mask);
- var = ql_read32(qdev, STS);
- }
- spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
- return var;
-}
-
-static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
-{
- u32 var = 0;
- struct intr_context *ctx;
-
- /* HW disables for us if we're MSIX multi interrupts and
- * it's not the default (zeroeth) interrupt.
- */
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
- return 0;
-
- ctx = qdev->intr_context + intr;
- spin_lock(&qdev->hw_lock);
- if (!atomic_read(&ctx->irq_cnt)) {
- ql_write32(qdev, INTR_EN,
- ctx->intr_dis_mask);
- var = ql_read32(qdev, STS);
- }
- atomic_inc(&ctx->irq_cnt);
- spin_unlock(&qdev->hw_lock);
- return var;
-}
-
-static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
-{
- int i;
- for (i = 0; i < qdev->intr_count; i++) {
- /* The enable call does a atomic_dec_and_test
- * and enables only if the result is zero.
- * So we precharge it here.
- */
- if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
- i == 0))
- atomic_set(&qdev->intr_context[i].irq_cnt, 1);
- ql_enable_completion_interrupt(qdev, i);
- }
-
-}
-
-static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
-{
- int status, i;
- u16 csum = 0;
- __le16 *flash = (__le16 *)&qdev->flash;
-
- status = strncmp((char *)&qdev->flash, str, 4);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
- return status;
- }
-
- for (i = 0; i < size; i++)
- csum += le16_to_cpu(*flash++);
-
- if (csum)
- netif_err(qdev, ifup, qdev->ndev,
- "Invalid flash checksum, csum = 0x%.04x.\n", csum);
-
- return csum;
-}
-
-static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
-{
- int status = 0;
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
- if (status)
- goto exit;
- /* set up for reg read */
- ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
- if (status)
- goto exit;
- /* This data is stored on flash as an array of
- * __le32. Since ql_read32() returns cpu endian
- * we need to swap it back.
- */
- *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
-exit:
- return status;
-}
-
-static int ql_get_8000_flash_params(struct ql_adapter *qdev)
-{
- u32 i, size;
- int status;
- __le32 *p = (__le32 *)&qdev->flash;
- u32 offset;
- u8 mac_addr[6];
-
- /* Get flash offset for function and adjust
- * for dword access.
- */
- if (!qdev->port)
- offset = FUNC0_FLASH_OFFSET / sizeof(u32);
- else
- offset = FUNC1_FLASH_OFFSET / sizeof(u32);
-
- if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
- return -ETIMEDOUT;
-
- size = sizeof(struct flash_params_8000) / sizeof(u32);
- for (i = 0; i < size; i++, p++) {
- status = ql_read_flash_word(qdev, i+offset, p);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Error reading flash.\n");
- goto exit;
- }
- }
-
- status = ql_validate_flash(qdev,
- sizeof(struct flash_params_8000) / sizeof(u16),
- "8000");
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
- status = -EINVAL;
- goto exit;
- }
-
- /* Extract either manufacturer or BOFM modified
- * MAC address.
- */
- if (qdev->flash.flash_params_8000.data_type1 == 2)
- memcpy(mac_addr,
- qdev->flash.flash_params_8000.mac_addr1,
- qdev->ndev->addr_len);
- else
- memcpy(mac_addr,
- qdev->flash.flash_params_8000.mac_addr,
- qdev->ndev->addr_len);
-
- if (!is_valid_ether_addr(mac_addr)) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
- status = -EINVAL;
- goto exit;
- }
-
- memcpy(qdev->ndev->dev_addr,
- mac_addr,
- qdev->ndev->addr_len);
-
-exit:
- ql_sem_unlock(qdev, SEM_FLASH_MASK);
- return status;
-}
-
-static int ql_get_8012_flash_params(struct ql_adapter *qdev)
-{
- int i;
- int status;
- __le32 *p = (__le32 *)&qdev->flash;
- u32 offset = 0;
- u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
-
- /* Second function's parameters follow the first
- * function's.
- */
- if (qdev->port)
- offset = size;
-
- if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
- return -ETIMEDOUT;
-
- for (i = 0; i < size; i++, p++) {
- status = ql_read_flash_word(qdev, i+offset, p);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Error reading flash.\n");
- goto exit;
- }
-
- }
-
- status = ql_validate_flash(qdev,
- sizeof(struct flash_params_8012) / sizeof(u16),
- "8012");
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
- status = -EINVAL;
- goto exit;
- }
-
- if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
- status = -EINVAL;
- goto exit;
- }
-
- memcpy(qdev->ndev->dev_addr,
- qdev->flash.flash_params_8012.mac_addr,
- qdev->ndev->addr_len);
-
-exit:
- ql_sem_unlock(qdev, SEM_FLASH_MASK);
- return status;
-}
-
-/* xgmac register are located behind the xgmac_addr and xgmac_data
- * register pair. Each read/write requires us to wait for the ready
- * bit before reading/writing the data.
- */
-static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
-{
- int status;
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- return status;
- /* write the data to the data reg */
- ql_write32(qdev, XGMAC_DATA, data);
- /* trigger the write */
- ql_write32(qdev, XGMAC_ADDR, reg);
- return status;
-}
-
-/* xgmac register are located behind the xgmac_addr and xgmac_data
- * register pair. Each read/write requires us to wait for the ready
- * bit before reading/writing the data.
- */
-int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
-{
- int status = 0;
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- goto exit;
- /* set up for reg read */
- ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev,
- XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
- if (status)
- goto exit;
- /* get the data */
- *data = ql_read32(qdev, XGMAC_DATA);
-exit:
- return status;
-}
-
-/* This is used for reading the 64-bit statistics regs. */
-int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
-{
- int status = 0;
- u32 hi = 0;
- u32 lo = 0;
-
- status = ql_read_xgmac_reg(qdev, reg, &lo);
- if (status)
- goto exit;
-
- status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
- if (status)
- goto exit;
-
- *data = (u64) lo | ((u64) hi << 32);
-
-exit:
- return status;
-}
-
-static int ql_8000_port_initialize(struct ql_adapter *qdev)
-{
- int status;
- /*
- * Get MPI firmware version for driver banner
- * and ethool info.
- */
- status = ql_mb_about_fw(qdev);
- if (status)
- goto exit;
- status = ql_mb_get_fw_state(qdev);
- if (status)
- goto exit;
- /* Wake up a worker to get/set the TX/RX frame sizes. */
- queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
-exit:
- return status;
-}
-
-/* Take the MAC Core out of reset.
- * Enable statistics counting.
- * Take the transmitter/receiver out of reset.
- * This functionality may be done in the MPI firmware at a
- * later date.
- */
-static int ql_8012_port_initialize(struct ql_adapter *qdev)
-{
- int status = 0;
- u32 data;
-
- if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
- /* Another function has the semaphore, so
- * wait for the port init bit to come ready.
- */
- netif_info(qdev, link, qdev->ndev,
- "Another function has the semaphore, so wait for the port init bit to come ready.\n");
- status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
- if (status) {
- netif_crit(qdev, link, qdev->ndev,
- "Port initialize timed out.\n");
- }
- return status;
- }
-
- netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
- /* Set the core reset. */
- status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
- if (status)
- goto end;
- data |= GLOBAL_CFG_RESET;
- status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
- if (status)
- goto end;
-
- /* Clear the core reset and turn on jumbo for receiver. */
- data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
- data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
- data |= GLOBAL_CFG_TX_STAT_EN;
- data |= GLOBAL_CFG_RX_STAT_EN;
- status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
- if (status)
- goto end;
-
- /* Enable transmitter, and clear it's reset. */
- status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
- if (status)
- goto end;
- data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
- data |= TX_CFG_EN; /* Enable the transmitter. */
- status = ql_write_xgmac_reg(qdev, TX_CFG, data);
- if (status)
- goto end;
-
- /* Enable receiver and clear it's reset. */
- status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
- if (status)
- goto end;
- data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
- data |= RX_CFG_EN; /* Enable the receiver. */
- status = ql_write_xgmac_reg(qdev, RX_CFG, data);
- if (status)
- goto end;
-
- /* Turn on jumbo. */
- status =
- ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
- if (status)
- goto end;
- status =
- ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
- if (status)
- goto end;
-
- /* Signal to the world that the port is enabled. */
- ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
-end:
- ql_sem_unlock(qdev, qdev->xg_sem_mask);
- return status;
-}
-
-static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
-{
- return PAGE_SIZE << qdev->lbq_buf_order;
-}
-
-/* Get the next large buffer. */
-static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
-{
- struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
- rx_ring->lbq_curr_idx++;
- if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
- rx_ring->lbq_curr_idx = 0;
- rx_ring->lbq_free_cnt++;
- return lbq_desc;
-}
-
-static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
-
- pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr(lbq_desc, mapaddr),
- rx_ring->lbq_buf_size,
- PCI_DMA_FROMDEVICE);
-
- /* If it's the last chunk of our master page then
- * we unmap it.
- */
- if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
- == ql_lbq_block_size(qdev))
- pci_unmap_page(qdev->pdev,
- lbq_desc->p.pg_chunk.map,
- ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
- return lbq_desc;
-}
-
-/* Get the next small buffer. */
-static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
-{
- struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
- rx_ring->sbq_curr_idx++;
- if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
- rx_ring->sbq_curr_idx = 0;
- rx_ring->sbq_free_cnt++;
- return sbq_desc;
-}
-
-/* Update an rx ring index. */
-static void ql_update_cq(struct rx_ring *rx_ring)
-{
- rx_ring->cnsmr_idx++;
- rx_ring->curr_entry++;
- if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
- rx_ring->cnsmr_idx = 0;
- rx_ring->curr_entry = rx_ring->cq_base;
- }
-}
-
-static void ql_write_cq_idx(struct rx_ring *rx_ring)
-{
- ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
-}
-
-static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
- struct bq_desc *lbq_desc)
-{
- if (!rx_ring->pg_chunk.page) {
- u64 map;
- rx_ring->pg_chunk.page = alloc_pages(__GFP_COMP | GFP_ATOMIC,
- qdev->lbq_buf_order);
- if (unlikely(!rx_ring->pg_chunk.page)) {
- netif_err(qdev, drv, qdev->ndev,
- "page allocation failed.\n");
- return -ENOMEM;
- }
- rx_ring->pg_chunk.offset = 0;
- map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
- 0, ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- __free_pages(rx_ring->pg_chunk.page,
- qdev->lbq_buf_order);
- rx_ring->pg_chunk.page = NULL;
- netif_err(qdev, drv, qdev->ndev,
- "PCI mapping failed.\n");
- return -ENOMEM;
- }
- rx_ring->pg_chunk.map = map;
- rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
- }
-
- /* Copy the current master pg_chunk info
- * to the current descriptor.
- */
- lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
-
- /* Adjust the master page chunk for next
- * buffer get.
- */
- rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
- if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
- rx_ring->pg_chunk.page = NULL;
- lbq_desc->p.pg_chunk.last_flag = 1;
- } else {
- rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
- get_page(rx_ring->pg_chunk.page);
- lbq_desc->p.pg_chunk.last_flag = 0;
- }
- return 0;
-}
-/* Process (refill) a large buffer queue. */
-static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
-{
- u32 clean_idx = rx_ring->lbq_clean_idx;
- u32 start_idx = clean_idx;
- struct bq_desc *lbq_desc;
- u64 map;
- int i;
-
- while (rx_ring->lbq_free_cnt > 32) {
- for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "lbq: try cleaning clean_idx = %d.\n",
- clean_idx);
- lbq_desc = &rx_ring->lbq[clean_idx];
- if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
- rx_ring->lbq_clean_idx = clean_idx;
- netif_err(qdev, ifup, qdev->ndev,
- "Could not get a page chunk, i=%d, clean_idx =%d .\n",
- i, clean_idx);
- return;
- }
-
- map = lbq_desc->p.pg_chunk.map +
- lbq_desc->p.pg_chunk.offset;
- dma_unmap_addr_set(lbq_desc, mapaddr, map);
- dma_unmap_len_set(lbq_desc, maplen,
- rx_ring->lbq_buf_size);
- *lbq_desc->addr = cpu_to_le64(map);
-
- pci_dma_sync_single_for_device(qdev->pdev, map,
- rx_ring->lbq_buf_size,
- PCI_DMA_FROMDEVICE);
- clean_idx++;
- if (clean_idx == rx_ring->lbq_len)
- clean_idx = 0;
- }
-
- rx_ring->lbq_clean_idx = clean_idx;
- rx_ring->lbq_prod_idx += 16;
- if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
- rx_ring->lbq_prod_idx = 0;
- rx_ring->lbq_free_cnt -= 16;
- }
-
- if (start_idx != clean_idx) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "lbq: updating prod idx = %d.\n",
- rx_ring->lbq_prod_idx);
- ql_write_db_reg(rx_ring->lbq_prod_idx,
- rx_ring->lbq_prod_idx_db_reg);
- }
-}
-
-/* Process (refill) a small buffer queue. */
-static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
-{
- u32 clean_idx = rx_ring->sbq_clean_idx;
- u32 start_idx = clean_idx;
- struct bq_desc *sbq_desc;
- u64 map;
- int i;
-
- while (rx_ring->sbq_free_cnt > 16) {
- for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
- sbq_desc = &rx_ring->sbq[clean_idx];
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "sbq: try cleaning clean_idx = %d.\n",
- clean_idx);
- if (sbq_desc->p.skb == NULL) {
- netif_printk(qdev, rx_status, KERN_DEBUG,
- qdev->ndev,
- "sbq: getting new skb for index %d.\n",
- sbq_desc->index);
- sbq_desc->p.skb =
- netdev_alloc_skb(qdev->ndev,
- SMALL_BUFFER_SIZE);
- if (sbq_desc->p.skb == NULL) {
- rx_ring->sbq_clean_idx = clean_idx;
- return;
- }
- skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
- map = pci_map_single(qdev->pdev,
- sbq_desc->p.skb->data,
- rx_ring->sbq_buf_size,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
- netif_err(qdev, ifup, qdev->ndev,
- "PCI mapping failed.\n");
- rx_ring->sbq_clean_idx = clean_idx;
- dev_kfree_skb_any(sbq_desc->p.skb);
- sbq_desc->p.skb = NULL;
- return;
- }
- dma_unmap_addr_set(sbq_desc, mapaddr, map);
- dma_unmap_len_set(sbq_desc, maplen,
- rx_ring->sbq_buf_size);
- *sbq_desc->addr = cpu_to_le64(map);
- }
-
- clean_idx++;
- if (clean_idx == rx_ring->sbq_len)
- clean_idx = 0;
- }
- rx_ring->sbq_clean_idx = clean_idx;
- rx_ring->sbq_prod_idx += 16;
- if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
- rx_ring->sbq_prod_idx = 0;
- rx_ring->sbq_free_cnt -= 16;
- }
-
- if (start_idx != clean_idx) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "sbq: updating prod idx = %d.\n",
- rx_ring->sbq_prod_idx);
- ql_write_db_reg(rx_ring->sbq_prod_idx,
- rx_ring->sbq_prod_idx_db_reg);
- }
-}
-
-static void ql_update_buffer_queues(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- ql_update_sbq(qdev, rx_ring);
- ql_update_lbq(qdev, rx_ring);
-}
-
-/* Unmaps tx buffers. Can be called from send() if a pci mapping
- * fails at some stage, or from the interrupt when a tx completes.
- */
-static void ql_unmap_send(struct ql_adapter *qdev,
- struct tx_ring_desc *tx_ring_desc, int mapped)
-{
- int i;
- for (i = 0; i < mapped; i++) {
- if (i == 0 || (i == 7 && mapped > 7)) {
- /*
- * Unmap the skb->data area, or the
- * external sglist (AKA the Outbound
- * Address List (OAL)).
- * If its the zeroeth element, then it's
- * the skb->data area. If it's the 7th
- * element and there is more than 6 frags,
- * then its an OAL.
- */
- if (i == 7) {
- netif_printk(qdev, tx_done, KERN_DEBUG,
- qdev->ndev,
- "unmapping OAL area.\n");
- }
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(&tx_ring_desc->map[i],
- mapaddr),
- dma_unmap_len(&tx_ring_desc->map[i],
- maplen),
- PCI_DMA_TODEVICE);
- } else {
- netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
- "unmapping frag %d.\n", i);
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(&tx_ring_desc->map[i],
- mapaddr),
- dma_unmap_len(&tx_ring_desc->map[i],
- maplen), PCI_DMA_TODEVICE);
- }
- }
-
-}
-
-/* Map the buffers for this transmit. This will return
- * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
- */
-static int ql_map_send(struct ql_adapter *qdev,
- struct ob_mac_iocb_req *mac_iocb_ptr,
- struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
-{
- int len = skb_headlen(skb);
- dma_addr_t map;
- int frag_idx, err, map_idx = 0;
- struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
- int frag_cnt = skb_shinfo(skb)->nr_frags;
-
- if (frag_cnt) {
- netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
- "frag_cnt = %d.\n", frag_cnt);
- }
- /*
- * Map the skb buffer first.
- */
- map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
-
- err = pci_dma_mapping_error(qdev->pdev, map);
- if (err) {
- netif_err(qdev, tx_queued, qdev->ndev,
- "PCI mapping failed with error: %d\n", err);
-
- return NETDEV_TX_BUSY;
- }
-
- tbd->len = cpu_to_le32(len);
- tbd->addr = cpu_to_le64(map);
- dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
- map_idx++;
-
- /*
- * This loop fills the remainder of the 8 address descriptors
- * in the IOCB. If there are more than 7 fragments, then the
- * eighth address desc will point to an external list (OAL).
- * When this happens, the remainder of the frags will be stored
- * in this list.
- */
- for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
- tbd++;
- if (frag_idx == 6 && frag_cnt > 7) {
- /* Let's tack on an sglist.
- * Our control block will now
- * look like this:
- * iocb->seg[0] = skb->data
- * iocb->seg[1] = frag[0]
- * iocb->seg[2] = frag[1]
- * iocb->seg[3] = frag[2]
- * iocb->seg[4] = frag[3]
- * iocb->seg[5] = frag[4]
- * iocb->seg[6] = frag[5]
- * iocb->seg[7] = ptr to OAL (external sglist)
- * oal->seg[0] = frag[6]
- * oal->seg[1] = frag[7]
- * oal->seg[2] = frag[8]
- * oal->seg[3] = frag[9]
- * oal->seg[4] = frag[10]
- * etc...
- */
- /* Tack on the OAL in the eighth segment of IOCB. */
- map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
- sizeof(struct oal),
- PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
- if (err) {
- netif_err(qdev, tx_queued, qdev->ndev,
- "PCI mapping outbound address list with error: %d\n",
- err);
- goto map_error;
- }
-
- tbd->addr = cpu_to_le64(map);
- /*
- * The length is the number of fragments
- * that remain to be mapped times the length
- * of our sglist (OAL).
- */
- tbd->len =
- cpu_to_le32((sizeof(struct tx_buf_desc) *
- (frag_cnt - frag_idx)) | TX_DESC_C);
- dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
- map);
- dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
- sizeof(struct oal));
- tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
- map_idx++;
- }
-
- map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
-
- err = dma_mapping_error(&qdev->pdev->dev, map);
- if (err) {
- netif_err(qdev, tx_queued, qdev->ndev,
- "PCI mapping frags failed with error: %d.\n",
- err);
- goto map_error;
- }
-
- tbd->addr = cpu_to_le64(map);
- tbd->len = cpu_to_le32(skb_frag_size(frag));
- dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
- dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
- skb_frag_size(frag));
-
- }
- /* Save the number of segments we've mapped. */
- tx_ring_desc->map_cnt = map_idx;
- /* Terminate the last segment. */
- tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
- return NETDEV_TX_OK;
-
-map_error:
- /*
- * If the first frag mapping failed, then i will be zero.
- * This causes the unmap of the skb->data area. Otherwise
- * we pass in the number of frags that mapped successfully
- * so they can be umapped.
- */
- ql_unmap_send(qdev, tx_ring_desc, map_idx);
- return NETDEV_TX_BUSY;
-}
-
-/* Categorizing receive firmware frame errors */
-static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
- struct rx_ring *rx_ring)
-{
- struct nic_stats *stats = &qdev->nic_stats;
-
- stats->rx_err_count++;
- rx_ring->rx_errors++;
-
- switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
- case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
- stats->rx_code_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
- stats->rx_oversize_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
- stats->rx_undersize_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
- stats->rx_preamble_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
- stats->rx_frame_len_err++;
- break;
- case IB_MAC_IOCB_RSP_ERR_CRC:
- stats->rx_crc_err++;
- default:
- break;
- }
-}
-
-/**
- * ql_update_mac_hdr_len - helper routine to update the mac header length
- * based on vlan tags if present
- */
-static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- void *page, size_t *len)
-{
- u16 *tags;
-
- if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
- return;
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
- tags = (u16 *)page;
- /* Look for stacked vlan tags in ethertype field */
- if (tags[6] == ETH_P_8021Q &&
- tags[8] == ETH_P_8021Q)
- *len += 2 * VLAN_HLEN;
- else
- *len += VLAN_HLEN;
- }
-}
-
-/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
-{
- struct sk_buff *skb;
- struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- struct napi_struct *napi = &rx_ring->napi;
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- put_page(lbq_desc->p.pg_chunk.page);
- return;
- }
- napi->dev = qdev->ndev;
-
- skb = napi_get_frags(napi);
- if (!skb) {
- netif_err(qdev, drv, qdev->ndev,
- "Couldn't get an skb, exiting.\n");
- rx_ring->rx_dropped++;
- put_page(lbq_desc->p.pg_chunk.page);
- return;
- }
- prefetch(lbq_desc->p.pg_chunk.va);
- __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- length);
-
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
- skb_shinfo(skb)->nr_frags++;
-
- rx_ring->rx_packets++;
- rx_ring->rx_bytes += length;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
- napi_gro_frags(napi);
-}
-
-/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_rx_page(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
-{
- struct net_device *ndev = qdev->ndev;
- struct sk_buff *skb = NULL;
- void *addr;
- struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- struct napi_struct *napi = &rx_ring->napi;
- size_t hlen = ETH_HLEN;
-
- skb = netdev_alloc_skb(ndev, length);
- if (!skb) {
- rx_ring->rx_dropped++;
- put_page(lbq_desc->p.pg_chunk.page);
- return;
- }
-
- addr = lbq_desc->p.pg_chunk.va;
- prefetch(addr);
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- goto err_out;
- }
-
- /* Update the MAC header length*/
- ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
-
- /* The max framesize filter on this chip is set higher than
- * MTU since FCoE uses 2k frames.
- */
- if (skb->len > ndev->mtu + hlen) {
- netif_err(qdev, drv, qdev->ndev,
- "Segment too small, dropping.\n");
- rx_ring->rx_dropped++;
- goto err_out;
- }
- skb_put_data(skb, addr, hlen);
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
- length);
- skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset + hlen,
- length - hlen);
- skb->len += length - hlen;
- skb->data_len += length - hlen;
- skb->truesize += length - hlen;
-
- rx_ring->rx_packets++;
- rx_ring->rx_bytes += skb->len;
- skb->protocol = eth_type_trans(skb, ndev);
- skb_checksum_none_assert(skb);
-
- if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
- /* TCP frame. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "TCP checksum done!\n");
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
- (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
- /* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph =
- (struct iphdr *)((u8 *)addr + hlen);
- if (!(iph->frag_off &
- htons(IP_MF|IP_OFFSET))) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_printk(qdev, rx_status, KERN_DEBUG,
- qdev->ndev,
- "UDP checksum done!\n");
- }
- }
- }
-
- skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- napi_gro_receive(napi, skb);
- else
- netif_receive_skb(skb);
- return;
-err_out:
- dev_kfree_skb_any(skb);
- put_page(lbq_desc->p.pg_chunk.page);
-}
-
-/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u32 length,
- u16 vlan_id)
-{
- struct net_device *ndev = qdev->ndev;
- struct sk_buff *skb = NULL;
- struct sk_buff *new_skb = NULL;
- struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
-
- skb = sbq_desc->p.skb;
- /* Allocate new_skb and copy */
- new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
- if (new_skb == NULL) {
- rx_ring->rx_dropped++;
- return;
- }
- skb_reserve(new_skb, NET_IP_ALIGN);
-
- pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
-
- skb_put_data(new_skb, skb->data, length);
-
- pci_dma_sync_single_for_device(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- skb = new_skb;
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- dev_kfree_skb_any(skb);
- return;
- }
-
- /* loopback self test for ethtool */
- if (test_bit(QL_SELFTEST, &qdev->flags)) {
- ql_check_lb_frame(qdev, skb);
- dev_kfree_skb_any(skb);
- return;
- }
-
- /* The max framesize filter on this chip is set higher than
- * MTU since FCoE uses 2k frames.
- */
- if (skb->len > ndev->mtu + ETH_HLEN) {
- dev_kfree_skb_any(skb);
- rx_ring->rx_dropped++;
- return;
- }
-
- prefetch(skb->data);
- if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%s Multicast.\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" :
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
- }
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Promiscuous Packet.\n");
-
- rx_ring->rx_packets++;
- rx_ring->rx_bytes += skb->len;
- skb->protocol = eth_type_trans(skb, ndev);
- skb_checksum_none_assert(skb);
-
- /* If rx checksum is on, and there are no
- * csum or frame errors.
- */
- if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
- /* TCP frame. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "TCP checksum done!\n");
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
- (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
- /* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph = (struct iphdr *) skb->data;
- if (!(iph->frag_off &
- htons(IP_MF|IP_OFFSET))) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_printk(qdev, rx_status, KERN_DEBUG,
- qdev->ndev,
- "UDP checksum done!\n");
- }
- }
- }
-
- skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- napi_gro_receive(&rx_ring->napi, skb);
- else
- netif_receive_skb(skb);
-}
-
-static void ql_realign_skb(struct sk_buff *skb, int len)
-{
- void *temp_addr = skb->data;
-
- /* Undo the skb_reserve(skb,32) we did before
- * giving to hardware, and realign data on
- * a 2-byte boundary.
- */
- skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
- skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
- memmove(skb->data, temp_addr, len);
-}
-
-/*
- * This function builds an skb for the given inbound
- * completion. It will be rewritten for readability in the near
- * future, but for not it works well.
- */
-static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp)
-{
- struct bq_desc *lbq_desc;
- struct bq_desc *sbq_desc;
- struct sk_buff *skb = NULL;
- u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
- size_t hlen = ETH_HLEN;
-
- /*
- * Handle the header buffer if present.
- */
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
- ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Header of %d bytes in small buffer.\n", hdr_len);
- /*
- * Headers fit nicely into a small buffer.
- */
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- skb = sbq_desc->p.skb;
- ql_realign_skb(skb, hdr_len);
- skb_put(skb, hdr_len);
- sbq_desc->p.skb = NULL;
- }
-
- /*
- * Handle the data buffer(s).
- */
- if (unlikely(!length)) { /* Is there data too? */
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "No Data buffer in this packet.\n");
- return skb;
- }
-
- if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Headers in small, data of %d bytes in small, combine them.\n",
- length);
- /*
- * Data is less than small buffer size so it's
- * stuffed in a small buffer.
- * For this case we append the data
- * from the "data" small buffer to the "header" small
- * buffer.
- */
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- pci_dma_sync_single_for_cpu(qdev->pdev,
- dma_unmap_addr
- (sbq_desc, mapaddr),
- dma_unmap_len
- (sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- skb_put_data(skb, sbq_desc->p.skb->data, length);
- pci_dma_sync_single_for_device(qdev->pdev,
- dma_unmap_addr
- (sbq_desc,
- mapaddr),
- dma_unmap_len
- (sbq_desc,
- maplen),
- PCI_DMA_FROMDEVICE);
- } else {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%d bytes in a single small buffer.\n",
- length);
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- skb = sbq_desc->p.skb;
- ql_realign_skb(skb, length);
- skb_put(skb, length);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc,
- mapaddr),
- dma_unmap_len(sbq_desc,
- maplen),
- PCI_DMA_FROMDEVICE);
- sbq_desc->p.skb = NULL;
- }
- } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Header in small, %d bytes in large. Chain large to small!\n",
- length);
- /*
- * The data is in a single large buffer. We
- * chain it to the header buffer's skb and let
- * it rip.
- */
- lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Chaining page at offset = %d, for %d bytes to skb.\n",
- lbq_desc->p.pg_chunk.offset, length);
- skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- length);
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
- } else {
- /*
- * The headers and data are in a single large buffer. We
- * copy it to a new skb and let it go. This can happen with
- * jumbo mtu on a non-TCP/UDP frame.
- */
- lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- skb = netdev_alloc_skb(qdev->ndev, length);
- if (skb == NULL) {
- netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
- "No skb available, drop the packet.\n");
- return NULL;
- }
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(lbq_desc,
- mapaddr),
- dma_unmap_len(lbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- skb_reserve(skb, NET_IP_ALIGN);
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
- length);
- skb_fill_page_desc(skb, 0,
- lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- length);
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
- ql_update_mac_hdr_len(qdev, ib_mac_rsp,
- lbq_desc->p.pg_chunk.va,
- &hlen);
- __pskb_pull_tail(skb, hlen);
- }
- } else {
- /*
- * The data is in a chain of large buffers
- * pointed to by a small buffer. We loop
- * thru and chain them to the our small header
- * buffer's skb.
- * frags: There are 18 max frags and our small
- * buffer will hold 32 of them. The thing is,
- * we'll use 3 max for our 9000 byte jumbo
- * frames. If the MTU goes up we could
- * eventually be in trouble.
- */
- int size, i = 0;
- sbq_desc = ql_get_curr_sbuf(rx_ring);
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
- /*
- * This is an non TCP/UDP IP frame, so
- * the headers aren't split into a small
- * buffer. We have to use the small buffer
- * that contains our sg list as our skb to
- * send upstairs. Copy the sg list here to
- * a local buffer and use it to find the
- * pages to chain.
- */
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "%d bytes of headers & data in chain of large.\n",
- length);
- skb = sbq_desc->p.skb;
- sbq_desc->p.skb = NULL;
- skb_reserve(skb, NET_IP_ALIGN);
- }
- do {
- lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- size = (length < rx_ring->lbq_buf_size) ? length :
- rx_ring->lbq_buf_size;
-
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Adding page %d to skb for %d bytes.\n",
- i, size);
- skb_fill_page_desc(skb, i,
- lbq_desc->p.pg_chunk.page,
- lbq_desc->p.pg_chunk.offset,
- size);
- skb->len += size;
- skb->data_len += size;
- skb->truesize += size;
- length -= size;
- i++;
- } while (length > 0);
- ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
- &hlen);
- __pskb_pull_tail(skb, hlen);
- }
- return skb;
-}
-
-/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp,
- u16 vlan_id)
-{
- struct net_device *ndev = qdev->ndev;
- struct sk_buff *skb = NULL;
-
- QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
-
- skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
- if (unlikely(!skb)) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "No skb available, drop packet.\n");
- rx_ring->rx_dropped++;
- return;
- }
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- dev_kfree_skb_any(skb);
- return;
- }
-
- /* The max framesize filter on this chip is set higher than
- * MTU since FCoE uses 2k frames.
- */
- if (skb->len > ndev->mtu + ETH_HLEN) {
- dev_kfree_skb_any(skb);
- rx_ring->rx_dropped++;
- return;
- }
-
- /* loopback self test for ethtool */
- if (test_bit(QL_SELFTEST, &qdev->flags)) {
- ql_check_lb_frame(qdev, skb);
- dev_kfree_skb_any(skb);
- return;
- }
-
- prefetch(skb->data);
- if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" :
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
- rx_ring->rx_multicast++;
- }
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Promiscuous Packet.\n");
- }
-
- skb->protocol = eth_type_trans(skb, ndev);
- skb_checksum_none_assert(skb);
-
- /* If rx checksum is on, and there are no
- * csum or frame errors.
- */
- if ((ndev->features & NETIF_F_RXCSUM) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
- /* TCP frame. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "TCP checksum done!\n");
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
- (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
- /* Unfragmented ipv4 UDP frame. */
- struct iphdr *iph = (struct iphdr *) skb->data;
- if (!(iph->frag_off &
- htons(IP_MF|IP_OFFSET))) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "TCP checksum done!\n");
- }
- }
- }
-
- rx_ring->rx_packets++;
- rx_ring->rx_bytes += skb->len;
- skb_record_rx_queue(skb, rx_ring->cq_id);
- if (vlan_id != 0xffff)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
- if (skb->ip_summed == CHECKSUM_UNNECESSARY)
- napi_gro_receive(&rx_ring->napi, skb);
- else
- netif_receive_skb(skb);
-}
-
-/* Process an inbound completion from an rx ring. */
-static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
- struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp)
-{
- u32 length = le32_to_cpu(ib_mac_rsp->data_len);
- u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
- (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
- ((le16_to_cpu(ib_mac_rsp->vlan_id) &
- IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
-
- QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
-
- if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
- /* The data and headers are split into
- * separate buffers.
- */
- ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
- vlan_id);
- } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
- /* The data fit in a single small buffer.
- * Allocate a new skb, copy the data and
- * return the buffer to the free pool.
- */
- ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
- length, vlan_id);
- } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
- !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
- /* TCP packet in a page chunk that's been checksummed.
- * Tack it on to our GRO skb and let it go.
- */
- ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
- length, vlan_id);
- } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
- /* Non-TCP packet in a page chunk. Allocate an
- * skb, tack it on frags, and send it up.
- */
- ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
- length, vlan_id);
- } else {
- /* Non-TCP/UDP large frames that span multiple buffers
- * can be processed corrrectly by the split frame logic.
- */
- ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
- vlan_id);
- }
-
- return (unsigned long)length;
-}
-
-/* Process an outbound completion from an rx ring. */
-static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
- struct ob_mac_iocb_rsp *mac_rsp)
-{
- struct tx_ring *tx_ring;
- struct tx_ring_desc *tx_ring_desc;
-
- QL_DUMP_OB_MAC_RSP(mac_rsp);
- tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
- tx_ring_desc = &tx_ring->q[mac_rsp->tid];
- ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
- tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
- tx_ring->tx_packets++;
- dev_kfree_skb(tx_ring_desc->skb);
- tx_ring_desc->skb = NULL;
-
- if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
- OB_MAC_IOCB_RSP_S |
- OB_MAC_IOCB_RSP_L |
- OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
- if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
- netif_warn(qdev, tx_done, qdev->ndev,
- "Total descriptor length did not match transfer length.\n");
- }
- if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
- netif_warn(qdev, tx_done, qdev->ndev,
- "Frame too short to be valid, not sent.\n");
- }
- if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
- netif_warn(qdev, tx_done, qdev->ndev,
- "Frame too long, but sent anyway.\n");
- }
- if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
- netif_warn(qdev, tx_done, qdev->ndev,
- "PCI backplane error. Frame not sent.\n");
- }
- }
- atomic_inc(&tx_ring->tx_count);
-}
-
-/* Fire up a handler to reset the MPI processor. */
-void ql_queue_fw_error(struct ql_adapter *qdev)
-{
- ql_link_off(qdev);
- queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
-}
-
-void ql_queue_asic_error(struct ql_adapter *qdev)
-{
- ql_link_off(qdev);
- ql_disable_interrupts(qdev);
- /* Clear adapter up bit to signal the recovery
- * process that it shouldn't kill the reset worker
- * thread
- */
- clear_bit(QL_ADAPTER_UP, &qdev->flags);
- /* Set asic recovery bit to indicate reset process that we are
- * in fatal error recovery process rather than normal close
- */
- set_bit(QL_ASIC_RECOVERY, &qdev->flags);
- queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
-}
-
-static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
- struct ib_ae_iocb_rsp *ib_ae_rsp)
-{
- switch (ib_ae_rsp->event) {
- case MGMT_ERR_EVENT:
- netif_err(qdev, rx_err, qdev->ndev,
- "Management Processor Fatal Error.\n");
- ql_queue_fw_error(qdev);
- return;
-
- case CAM_LOOKUP_ERR_EVENT:
- netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
- netdev_err(qdev->ndev, "This event shouldn't occur.\n");
- ql_queue_asic_error(qdev);
- return;
-
- case SOFT_ECC_ERROR_EVENT:
- netdev_err(qdev->ndev, "Soft ECC error detected.\n");
- ql_queue_asic_error(qdev);
- break;
-
- case PCI_ERR_ANON_BUF_RD:
- netdev_err(qdev->ndev, "PCI error occurred when reading "
- "anonymous buffers from rx_ring %d.\n",
- ib_ae_rsp->q_id);
- ql_queue_asic_error(qdev);
- break;
-
- default:
- netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
- ib_ae_rsp->event);
- ql_queue_asic_error(qdev);
- break;
- }
-}
-
-static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
-{
- struct ql_adapter *qdev = rx_ring->qdev;
- u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
- struct ob_mac_iocb_rsp *net_rsp = NULL;
- int count = 0;
-
- struct tx_ring *tx_ring;
- /* While there are entries in the completion queue. */
- while (prod != rx_ring->cnsmr_idx) {
-
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "cq_id = %d, prod = %d, cnsmr = %d\n",
- rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
-
- net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
- rmb();
- switch (net_rsp->opcode) {
-
- case OPCODE_OB_MAC_TSO_IOCB:
- case OPCODE_OB_MAC_IOCB:
- ql_process_mac_tx_intr(qdev, net_rsp);
- break;
- default:
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Hit default case, not handled! dropping the packet, opcode = %x.\n",
- net_rsp->opcode);
- }
- count++;
- ql_update_cq(rx_ring);
- prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
- }
- if (!net_rsp)
- return 0;
- ql_write_cq_idx(rx_ring);
- tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
- if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
- if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
- /*
- * The queue got stopped because the tx_ring was full.
- * Wake it up, because it's now at least 25% empty.
- */
- netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
- }
-
- return count;
-}
-
-static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
-{
- struct ql_adapter *qdev = rx_ring->qdev;
- u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
- struct ql_net_rsp_iocb *net_rsp;
- int count = 0;
-
- /* While there are entries in the completion queue. */
- while (prod != rx_ring->cnsmr_idx) {
-
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "cq_id = %d, prod = %d, cnsmr = %d\n",
- rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
-
- net_rsp = rx_ring->curr_entry;
- rmb();
- switch (net_rsp->opcode) {
- case OPCODE_IB_MAC_IOCB:
- ql_process_mac_rx_intr(qdev, rx_ring,
- (struct ib_mac_iocb_rsp *)
- net_rsp);
- break;
-
- case OPCODE_IB_AE_IOCB:
- ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
- net_rsp);
- break;
- default:
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Hit default case, not handled! dropping the packet, opcode = %x.\n",
- net_rsp->opcode);
- break;
- }
- count++;
- ql_update_cq(rx_ring);
- prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
- if (count == budget)
- break;
- }
- ql_update_buffer_queues(qdev, rx_ring);
- ql_write_cq_idx(rx_ring);
- return count;
-}
-
-static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
-{
- struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
- struct ql_adapter *qdev = rx_ring->qdev;
- struct rx_ring *trx_ring;
- int i, work_done = 0;
- struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
-
- netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
- "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
-
- /* Service the TX rings first. They start
- * right after the RSS rings. */
- for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
- trx_ring = &qdev->rx_ring[i];
- /* If this TX completion ring belongs to this vector and
- * it's not empty then service it.
- */
- if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
- (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
- trx_ring->cnsmr_idx)) {
- netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
- "%s: Servicing TX completion ring %d.\n",
- __func__, trx_ring->cq_id);
- ql_clean_outbound_rx_ring(trx_ring);
- }
- }
-
- /*
- * Now service the RSS ring if it's active.
- */
- if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
- rx_ring->cnsmr_idx) {
- netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
- "%s: Servicing RX completion ring %d.\n",
- __func__, rx_ring->cq_id);
- work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
- }
-
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- ql_enable_completion_interrupt(qdev, rx_ring->irq);
- }
- return work_done;
-}
-
-static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- if (features & NETIF_F_HW_VLAN_CTAG_RX) {
- ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
- NIC_RCV_CFG_VLAN_MATCH_AND_NON);
- } else {
- ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
- }
-}
-
-/**
- * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
- * based on the features to enable/disable hardware vlan accel
- */
-static int qlge_update_hw_vlan_features(struct net_device *ndev,
- netdev_features_t features)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- int status = 0;
- bool need_restart = netif_running(ndev);
-
- if (need_restart) {
- status = ql_adapter_down(qdev);
- if (status) {
- netif_err(qdev, link, qdev->ndev,
- "Failed to bring down the adapter\n");
- return status;
- }
- }
-
- /* update the features with resent change */
- ndev->features = features;
-
- if (need_restart) {
- status = ql_adapter_up(qdev);
- if (status) {
- netif_err(qdev, link, qdev->ndev,
- "Failed to bring up the adapter\n");
- return status;
- }
- }
-
- return status;
-}
-
-static int qlge_set_features(struct net_device *ndev,
- netdev_features_t features)
-{
- netdev_features_t changed = ndev->features ^ features;
- int err;
-
- if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
- /* Update the behavior of vlan accel in the adapter */
- err = qlge_update_hw_vlan_features(ndev, features);
- if (err)
- return err;
-
- qlge_vlan_mode(ndev, features);
- }
-
- return 0;
-}
-
-static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
-{
- u32 enable_bit = MAC_ADDR_E;
- int err;
-
- err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
- MAC_ADDR_TYPE_VLAN, vid);
- if (err)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init vlan address.\n");
- return err;
-}
-
-static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- int status;
- int err;
-
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
-
- err = __qlge_vlan_rx_add_vid(qdev, vid);
- set_bit(vid, qdev->active_vlans);
-
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
-
- return err;
-}
-
-static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
-{
- u32 enable_bit = 0;
- int err;
-
- err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
- MAC_ADDR_TYPE_VLAN, vid);
- if (err)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to clear vlan address.\n");
- return err;
-}
-
-static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- int status;
- int err;
-
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
-
- err = __qlge_vlan_rx_kill_vid(qdev, vid);
- clear_bit(vid, qdev->active_vlans);
-
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
-
- return err;
-}
-
-static void qlge_restore_vlan(struct ql_adapter *qdev)
-{
- int status;
- u16 vid;
-
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return;
-
- for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
- __qlge_vlan_rx_add_vid(qdev, vid);
-
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
-}
-
-/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
-static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
-{
- struct rx_ring *rx_ring = dev_id;
- napi_schedule(&rx_ring->napi);
- return IRQ_HANDLED;
-}
-
-/* This handles a fatal error, MPI activity, and the default
- * rx_ring in an MSI-X multiple vector environment.
- * In MSI/Legacy environment it also process the rest of
- * the rx_rings.
- */
-static irqreturn_t qlge_isr(int irq, void *dev_id)
-{
- struct rx_ring *rx_ring = dev_id;
- struct ql_adapter *qdev = rx_ring->qdev;
- struct intr_context *intr_context = &qdev->intr_context[0];
- u32 var;
- int work_done = 0;
-
- spin_lock(&qdev->hw_lock);
- if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
- netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
- "Shared Interrupt, Not ours!\n");
- spin_unlock(&qdev->hw_lock);
- return IRQ_NONE;
- }
- spin_unlock(&qdev->hw_lock);
-
- var = ql_disable_completion_interrupt(qdev, intr_context->intr);
-
- /*
- * Check for fatal error.
- */
- if (var & STS_FE) {
- ql_queue_asic_error(qdev);
- netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
- var = ql_read32(qdev, ERR_STS);
- netdev_err(qdev->ndev, "Resetting chip. "
- "Error Status Register = 0x%x\n", var);
- return IRQ_HANDLED;
- }
-
- /*
- * Check MPI processor activity.
- */
- if ((var & STS_PI) &&
- (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
- /*
- * We've got an async event or mailbox completion.
- * Handle it and clear the source of the interrupt.
- */
- netif_err(qdev, intr, qdev->ndev,
- "Got MPI processor interrupt.\n");
- ql_disable_completion_interrupt(qdev, intr_context->intr);
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
- queue_delayed_work_on(smp_processor_id(),
- qdev->workqueue, &qdev->mpi_work, 0);
- work_done++;
- }
-
- /*
- * Get the bit-mask that shows the active queues for this
- * pass. Compare it to the queues that this irq services
- * and call napi if there's a match.
- */
- var = ql_read32(qdev, ISR1);
- if (var & intr_context->irq_mask) {
- netif_info(qdev, intr, qdev->ndev,
- "Waking handler for rx_ring[0].\n");
- ql_disable_completion_interrupt(qdev, intr_context->intr);
- napi_schedule(&rx_ring->napi);
- work_done++;
- }
- ql_enable_completion_interrupt(qdev, intr_context->intr);
- return work_done ? IRQ_HANDLED : IRQ_NONE;
-}
-
-static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
-{
-
- if (skb_is_gso(skb)) {
- int err;
- __be16 l3_proto = vlan_get_protocol(skb);
-
- err = skb_cow_head(skb, 0);
- if (err < 0)
- return err;
-
- mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
- mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
- mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
- mac_iocb_ptr->total_hdrs_len =
- cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
- mac_iocb_ptr->net_trans_offset =
- cpu_to_le16(skb_network_offset(skb) |
- skb_transport_offset(skb)
- << OB_MAC_TRANSPORT_HDR_SHIFT);
- mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
- mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
- if (likely(l3_proto == htons(ETH_P_IP))) {
- struct iphdr *iph = ip_hdr(skb);
- iph->check = 0;
- mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
- } else if (l3_proto == htons(ETH_P_IPV6)) {
- mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- }
- return 1;
- }
- return 0;
-}
-
-static void ql_hw_csum_setup(struct sk_buff *skb,
- struct ob_mac_tso_iocb_req *mac_iocb_ptr)
-{
- int len;
- struct iphdr *iph = ip_hdr(skb);
- __sum16 *check;
- mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
- mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
- mac_iocb_ptr->net_trans_offset =
- cpu_to_le16(skb_network_offset(skb) |
- skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
-
- mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
- len = (ntohs(iph->tot_len) - (iph->ihl << 2));
- if (likely(iph->protocol == IPPROTO_TCP)) {
- check = &(tcp_hdr(skb)->check);
- mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
- mac_iocb_ptr->total_hdrs_len =
- cpu_to_le16(skb_transport_offset(skb) +
- (tcp_hdr(skb)->doff << 2));
- } else {
- check = &(udp_hdr(skb)->check);
- mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
- mac_iocb_ptr->total_hdrs_len =
- cpu_to_le16(skb_transport_offset(skb) +
- sizeof(struct udphdr));
- }
- *check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, len, iph->protocol, 0);
-}
-
-static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
-{
- struct tx_ring_desc *tx_ring_desc;
- struct ob_mac_iocb_req *mac_iocb_ptr;
- struct ql_adapter *qdev = netdev_priv(ndev);
- int tso;
- struct tx_ring *tx_ring;
- u32 tx_ring_idx = (u32) skb->queue_mapping;
-
- tx_ring = &qdev->tx_ring[tx_ring_idx];
-
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
-
- if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
- netif_info(qdev, tx_queued, qdev->ndev,
- "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
- __func__, tx_ring_idx);
- netif_stop_subqueue(ndev, tx_ring->wq_id);
- tx_ring->tx_errors++;
- return NETDEV_TX_BUSY;
- }
- tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
- mac_iocb_ptr = tx_ring_desc->queue_entry;
- memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
-
- mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
- mac_iocb_ptr->tid = tx_ring_desc->index;
- /* We use the upper 32-bits to store the tx queue for this IO.
- * When we get the completion we can use it to establish the context.
- */
- mac_iocb_ptr->txq_idx = tx_ring_idx;
- tx_ring_desc->skb = skb;
-
- mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
-
- if (skb_vlan_tag_present(skb)) {
- netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
- "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
- mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
- mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
- }
- tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
- ql_hw_csum_setup(skb,
- (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
- }
- if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
- NETDEV_TX_OK) {
- netif_err(qdev, tx_queued, qdev->ndev,
- "Could not map the segments.\n");
- tx_ring->tx_errors++;
- return NETDEV_TX_BUSY;
- }
- QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
- tx_ring->prod_idx++;
- if (tx_ring->prod_idx == tx_ring->wq_len)
- tx_ring->prod_idx = 0;
- wmb();
-
- ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
- netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
- "tx queued, slot %d, len %d\n",
- tx_ring->prod_idx, skb->len);
-
- atomic_dec(&tx_ring->tx_count);
-
- if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
- netif_stop_subqueue(ndev, tx_ring->wq_id);
- if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
- /*
- * The queue got stopped because the tx_ring was full.
- * Wake it up, because it's now at least 25% empty.
- */
- netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
- }
- return NETDEV_TX_OK;
-}
-
-
-static void ql_free_shadow_space(struct ql_adapter *qdev)
-{
- if (qdev->rx_ring_shadow_reg_area) {
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->rx_ring_shadow_reg_area,
- qdev->rx_ring_shadow_reg_dma);
- qdev->rx_ring_shadow_reg_area = NULL;
- }
- if (qdev->tx_ring_shadow_reg_area) {
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->tx_ring_shadow_reg_area,
- qdev->tx_ring_shadow_reg_dma);
- qdev->tx_ring_shadow_reg_area = NULL;
- }
-}
-
-static int ql_alloc_shadow_space(struct ql_adapter *qdev)
-{
- qdev->rx_ring_shadow_reg_area =
- pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
- &qdev->rx_ring_shadow_reg_dma);
- if (qdev->rx_ring_shadow_reg_area == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Allocation of RX shadow space failed.\n");
- return -ENOMEM;
- }
-
- qdev->tx_ring_shadow_reg_area =
- pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
- &qdev->tx_ring_shadow_reg_dma);
- if (qdev->tx_ring_shadow_reg_area == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Allocation of TX shadow space failed.\n");
- goto err_wqp_sh_area;
- }
- return 0;
-
-err_wqp_sh_area:
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->rx_ring_shadow_reg_area,
- qdev->rx_ring_shadow_reg_dma);
- return -ENOMEM;
-}
-
-static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
-{
- struct tx_ring_desc *tx_ring_desc;
- int i;
- struct ob_mac_iocb_req *mac_iocb_ptr;
-
- mac_iocb_ptr = tx_ring->wq_base;
- tx_ring_desc = tx_ring->q;
- for (i = 0; i < tx_ring->wq_len; i++) {
- tx_ring_desc->index = i;
- tx_ring_desc->skb = NULL;
- tx_ring_desc->queue_entry = mac_iocb_ptr;
- mac_iocb_ptr++;
- tx_ring_desc++;
- }
- atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
-}
-
-static void ql_free_tx_resources(struct ql_adapter *qdev,
- struct tx_ring *tx_ring)
-{
- if (tx_ring->wq_base) {
- pci_free_consistent(qdev->pdev, tx_ring->wq_size,
- tx_ring->wq_base, tx_ring->wq_base_dma);
- tx_ring->wq_base = NULL;
- }
- kfree(tx_ring->q);
- tx_ring->q = NULL;
-}
-
-static int ql_alloc_tx_resources(struct ql_adapter *qdev,
- struct tx_ring *tx_ring)
-{
- tx_ring->wq_base =
- pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
- &tx_ring->wq_base_dma);
-
- if ((tx_ring->wq_base == NULL) ||
- tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
- goto pci_alloc_err;
-
- tx_ring->q =
- kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
- GFP_KERNEL);
- if (tx_ring->q == NULL)
- goto err;
-
- return 0;
-err:
- pci_free_consistent(qdev->pdev, tx_ring->wq_size,
- tx_ring->wq_base, tx_ring->wq_base_dma);
- tx_ring->wq_base = NULL;
-pci_alloc_err:
- netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
- return -ENOMEM;
-}
-
-static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
-{
- struct bq_desc *lbq_desc;
-
- uint32_t curr_idx, clean_idx;
-
- curr_idx = rx_ring->lbq_curr_idx;
- clean_idx = rx_ring->lbq_clean_idx;
- while (curr_idx != clean_idx) {
- lbq_desc = &rx_ring->lbq[curr_idx];
-
- if (lbq_desc->p.pg_chunk.last_flag) {
- pci_unmap_page(qdev->pdev,
- lbq_desc->p.pg_chunk.map,
- ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
- lbq_desc->p.pg_chunk.last_flag = 0;
- }
-
- put_page(lbq_desc->p.pg_chunk.page);
- lbq_desc->p.pg_chunk.page = NULL;
-
- if (++curr_idx == rx_ring->lbq_len)
- curr_idx = 0;
-
- }
- if (rx_ring->pg_chunk.page) {
- pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
- ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
- put_page(rx_ring->pg_chunk.page);
- rx_ring->pg_chunk.page = NULL;
- }
-}
-
-static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
-{
- int i;
- struct bq_desc *sbq_desc;
-
- for (i = 0; i < rx_ring->sbq_len; i++) {
- sbq_desc = &rx_ring->sbq[i];
- if (sbq_desc == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "sbq_desc %d is NULL.\n", i);
- return;
- }
- if (sbq_desc->p.skb) {
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(sbq_desc, mapaddr),
- dma_unmap_len(sbq_desc, maplen),
- PCI_DMA_FROMDEVICE);
- dev_kfree_skb(sbq_desc->p.skb);
- sbq_desc->p.skb = NULL;
- }
- }
-}
-
-/* Free all large and small rx buffers associated
- * with the completion queues for this device.
- */
-static void ql_free_rx_buffers(struct ql_adapter *qdev)
-{
- int i;
- struct rx_ring *rx_ring;
-
- for (i = 0; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- if (rx_ring->lbq)
- ql_free_lbq_buffers(qdev, rx_ring);
- if (rx_ring->sbq)
- ql_free_sbq_buffers(qdev, rx_ring);
- }
-}
-
-static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
-{
- struct rx_ring *rx_ring;
- int i;
-
- for (i = 0; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- if (rx_ring->type != TX_Q)
- ql_update_buffer_queues(qdev, rx_ring);
- }
-}
-
-static void ql_init_lbq_ring(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- int i;
- struct bq_desc *lbq_desc;
- __le64 *bq = rx_ring->lbq_base;
-
- memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
- for (i = 0; i < rx_ring->lbq_len; i++) {
- lbq_desc = &rx_ring->lbq[i];
- memset(lbq_desc, 0, sizeof(*lbq_desc));
- lbq_desc->index = i;
- lbq_desc->addr = bq;
- bq++;
- }
-}
-
-static void ql_init_sbq_ring(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- int i;
- struct bq_desc *sbq_desc;
- __le64 *bq = rx_ring->sbq_base;
-
- memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
- for (i = 0; i < rx_ring->sbq_len; i++) {
- sbq_desc = &rx_ring->sbq[i];
- memset(sbq_desc, 0, sizeof(*sbq_desc));
- sbq_desc->index = i;
- sbq_desc->addr = bq;
- bq++;
- }
-}
-
-static void ql_free_rx_resources(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
- /* Free the small buffer queue. */
- if (rx_ring->sbq_base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->sbq_size,
- rx_ring->sbq_base, rx_ring->sbq_base_dma);
- rx_ring->sbq_base = NULL;
- }
-
- /* Free the small buffer queue control blocks. */
- kfree(rx_ring->sbq);
- rx_ring->sbq = NULL;
-
- /* Free the large buffer queue. */
- if (rx_ring->lbq_base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->lbq_size,
- rx_ring->lbq_base, rx_ring->lbq_base_dma);
- rx_ring->lbq_base = NULL;
- }
-
- /* Free the large buffer queue control blocks. */
- kfree(rx_ring->lbq);
- rx_ring->lbq = NULL;
-
- /* Free the rx queue. */
- if (rx_ring->cq_base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->cq_size,
- rx_ring->cq_base, rx_ring->cq_base_dma);
- rx_ring->cq_base = NULL;
- }
-}
-
-/* Allocate queues and buffers for this completions queue based
- * on the values in the parameter structure. */
-static int ql_alloc_rx_resources(struct ql_adapter *qdev,
- struct rx_ring *rx_ring)
-{
-
- /*
- * Allocate the completion queue for this rx_ring.
- */
- rx_ring->cq_base =
- pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
- &rx_ring->cq_base_dma);
-
- if (rx_ring->cq_base == NULL) {
- netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
- return -ENOMEM;
- }
-
- if (rx_ring->sbq_len) {
- /*
- * Allocate small buffer queue.
- */
- rx_ring->sbq_base =
- pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
- &rx_ring->sbq_base_dma);
-
- if (rx_ring->sbq_base == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Small buffer queue allocation failed.\n");
- goto err_mem;
- }
-
- /*
- * Allocate small buffer queue control blocks.
- */
- rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
- sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->sbq == NULL)
- goto err_mem;
-
- ql_init_sbq_ring(qdev, rx_ring);
- }
-
- if (rx_ring->lbq_len) {
- /*
- * Allocate large buffer queue.
- */
- rx_ring->lbq_base =
- pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
- &rx_ring->lbq_base_dma);
-
- if (rx_ring->lbq_base == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Large buffer queue allocation failed.\n");
- goto err_mem;
- }
- /*
- * Allocate large buffer queue control blocks.
- */
- rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
- sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->lbq == NULL)
- goto err_mem;
-
- ql_init_lbq_ring(qdev, rx_ring);
- }
-
- return 0;
-
-err_mem:
- ql_free_rx_resources(qdev, rx_ring);
- return -ENOMEM;
-}
-
-static void ql_tx_ring_clean(struct ql_adapter *qdev)
-{
- struct tx_ring *tx_ring;
- struct tx_ring_desc *tx_ring_desc;
- int i, j;
-
- /*
- * Loop through all queues and free
- * any resources.
- */
- for (j = 0; j < qdev->tx_ring_count; j++) {
- tx_ring = &qdev->tx_ring[j];
- for (i = 0; i < tx_ring->wq_len; i++) {
- tx_ring_desc = &tx_ring->q[i];
- if (tx_ring_desc && tx_ring_desc->skb) {
- netif_err(qdev, ifdown, qdev->ndev,
- "Freeing lost SKB %p, from queue %d, index %d.\n",
- tx_ring_desc->skb, j,
- tx_ring_desc->index);
- ql_unmap_send(qdev, tx_ring_desc,
- tx_ring_desc->map_cnt);
- dev_kfree_skb(tx_ring_desc->skb);
- tx_ring_desc->skb = NULL;
- }
- }
- }
-}
-
-static void ql_free_mem_resources(struct ql_adapter *qdev)
-{
- int i;
-
- for (i = 0; i < qdev->tx_ring_count; i++)
- ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
- for (i = 0; i < qdev->rx_ring_count; i++)
- ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
- ql_free_shadow_space(qdev);
-}
-
-static int ql_alloc_mem_resources(struct ql_adapter *qdev)
-{
- int i;
-
- /* Allocate space for our shadow registers and such. */
- if (ql_alloc_shadow_space(qdev))
- return -ENOMEM;
-
- for (i = 0; i < qdev->rx_ring_count; i++) {
- if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
- netif_err(qdev, ifup, qdev->ndev,
- "RX resource allocation failed.\n");
- goto err_mem;
- }
- }
- /* Allocate tx queue resources */
- for (i = 0; i < qdev->tx_ring_count; i++) {
- if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
- netif_err(qdev, ifup, qdev->ndev,
- "TX resource allocation failed.\n");
- goto err_mem;
- }
- }
- return 0;
-
-err_mem:
- ql_free_mem_resources(qdev);
- return -ENOMEM;
-}
-
-/* Set up the rx ring control block and pass it to the chip.
- * The control block is defined as
- * "Completion Queue Initialization Control Block", or cqicb.
- */
-static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
-{
- struct cqicb *cqicb = &rx_ring->cqicb;
- void *shadow_reg = qdev->rx_ring_shadow_reg_area +
- (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
- u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
- (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
- void __iomem *doorbell_area =
- qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
- int err = 0;
- u16 bq_len;
- u64 tmp;
- __le64 *base_indirect_ptr;
- int page_entries;
-
- /* Set up the shadow registers for this ring. */
- rx_ring->prod_idx_sh_reg = shadow_reg;
- rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
- *rx_ring->prod_idx_sh_reg = 0;
- shadow_reg += sizeof(u64);
- shadow_reg_dma += sizeof(u64);
- rx_ring->lbq_base_indirect = shadow_reg;
- rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
- shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
- shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
- rx_ring->sbq_base_indirect = shadow_reg;
- rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
-
- /* PCI doorbell mem area + 0x00 for consumer index register */
- rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
- rx_ring->cnsmr_idx = 0;
- rx_ring->curr_entry = rx_ring->cq_base;
-
- /* PCI doorbell mem area + 0x04 for valid register */
- rx_ring->valid_db_reg = doorbell_area + 0x04;
-
- /* PCI doorbell mem area + 0x18 for large buffer consumer */
- rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
-
- /* PCI doorbell mem area + 0x1c */
- rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
-
- memset((void *)cqicb, 0, sizeof(struct cqicb));
- cqicb->msix_vect = rx_ring->irq;
-
- bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
- cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
-
- cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
-
- cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
-
- /*
- * Set up the control block load flags.
- */
- cqicb->flags = FLAGS_LC | /* Load queue base address */
- FLAGS_LV | /* Load MSI-X vector */
- FLAGS_LI; /* Load irq delay values */
- if (rx_ring->lbq_len) {
- cqicb->flags |= FLAGS_LL; /* Load lbq values */
- tmp = (u64)rx_ring->lbq_base_dma;
- base_indirect_ptr = rx_ring->lbq_base_indirect;
- page_entries = 0;
- do {
- *base_indirect_ptr = cpu_to_le64(tmp);
- tmp += DB_PAGE_SIZE;
- base_indirect_ptr++;
- page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
- cqicb->lbq_addr =
- cpu_to_le64(rx_ring->lbq_base_indirect_dma);
- bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
- (u16) rx_ring->lbq_buf_size;
- cqicb->lbq_buf_size = cpu_to_le16(bq_len);
- bq_len = (rx_ring->lbq_len == 65536) ? 0 :
- (u16) rx_ring->lbq_len;
- cqicb->lbq_len = cpu_to_le16(bq_len);
- rx_ring->lbq_prod_idx = 0;
- rx_ring->lbq_curr_idx = 0;
- rx_ring->lbq_clean_idx = 0;
- rx_ring->lbq_free_cnt = rx_ring->lbq_len;
- }
- if (rx_ring->sbq_len) {
- cqicb->flags |= FLAGS_LS; /* Load sbq values */
- tmp = (u64)rx_ring->sbq_base_dma;
- base_indirect_ptr = rx_ring->sbq_base_indirect;
- page_entries = 0;
- do {
- *base_indirect_ptr = cpu_to_le64(tmp);
- tmp += DB_PAGE_SIZE;
- base_indirect_ptr++;
- page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
- cqicb->sbq_addr =
- cpu_to_le64(rx_ring->sbq_base_indirect_dma);
- cqicb->sbq_buf_size =
- cpu_to_le16((u16)(rx_ring->sbq_buf_size));
- bq_len = (rx_ring->sbq_len == 65536) ? 0 :
- (u16) rx_ring->sbq_len;
- cqicb->sbq_len = cpu_to_le16(bq_len);
- rx_ring->sbq_prod_idx = 0;
- rx_ring->sbq_curr_idx = 0;
- rx_ring->sbq_clean_idx = 0;
- rx_ring->sbq_free_cnt = rx_ring->sbq_len;
- }
- switch (rx_ring->type) {
- case TX_Q:
- cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
- cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
- break;
- case RX_Q:
- /* Inbound completion handling rx_rings run in
- * separate NAPI contexts.
- */
- netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
- 64);
- cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
- cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
- break;
- default:
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Invalid rx_ring->type = %d.\n", rx_ring->type);
- }
- err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
- CFG_LCQ, rx_ring->cq_id);
- if (err) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
- return err;
- }
- return err;
-}
-
-static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
-{
- struct wqicb *wqicb = (struct wqicb *)tx_ring;
- void __iomem *doorbell_area =
- qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
- void *shadow_reg = qdev->tx_ring_shadow_reg_area +
- (tx_ring->wq_id * sizeof(u64));
- u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
- (tx_ring->wq_id * sizeof(u64));
- int err = 0;
-
- /*
- * Assign doorbell registers for this tx_ring.
- */
- /* TX PCI doorbell mem area for tx producer index */
- tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
- tx_ring->prod_idx = 0;
- /* TX PCI doorbell mem area + 0x04 */
- tx_ring->valid_db_reg = doorbell_area + 0x04;
-
- /*
- * Assign shadow registers for this tx_ring.
- */
- tx_ring->cnsmr_idx_sh_reg = shadow_reg;
- tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
-
- wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
- wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
- Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
- wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
- wqicb->rid = 0;
- wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
-
- wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
-
- ql_init_tx_ring(qdev, tx_ring);
-
- err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
- (u16) tx_ring->wq_id);
- if (err) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
- return err;
- }
- return err;
-}
-
-static void ql_disable_msix(struct ql_adapter *qdev)
-{
- if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
- pci_disable_msix(qdev->pdev);
- clear_bit(QL_MSIX_ENABLED, &qdev->flags);
- kfree(qdev->msi_x_entry);
- qdev->msi_x_entry = NULL;
- } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
- pci_disable_msi(qdev->pdev);
- clear_bit(QL_MSI_ENABLED, &qdev->flags);
- }
-}
-
-/* We start by trying to get the number of vectors
- * stored in qdev->intr_count. If we don't get that
- * many then we reduce the count and try again.
- */
-static void ql_enable_msix(struct ql_adapter *qdev)
-{
- int i, err;
-
- /* Get the MSIX vectors. */
- if (qlge_irq_type == MSIX_IRQ) {
- /* Try to alloc space for the msix struct,
- * if it fails then go to MSI/legacy.
- */
- qdev->msi_x_entry = kcalloc(qdev->intr_count,
- sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!qdev->msi_x_entry) {
- qlge_irq_type = MSI_IRQ;
- goto msi;
- }
-
- for (i = 0; i < qdev->intr_count; i++)
- qdev->msi_x_entry[i].entry = i;
-
- err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
- 1, qdev->intr_count);
- if (err < 0) {
- kfree(qdev->msi_x_entry);
- qdev->msi_x_entry = NULL;
- netif_warn(qdev, ifup, qdev->ndev,
- "MSI-X Enable failed, trying MSI.\n");
- qlge_irq_type = MSI_IRQ;
- } else {
- qdev->intr_count = err;
- set_bit(QL_MSIX_ENABLED, &qdev->flags);
- netif_info(qdev, ifup, qdev->ndev,
- "MSI-X Enabled, got %d vectors.\n",
- qdev->intr_count);
- return;
- }
- }
-msi:
- qdev->intr_count = 1;
- if (qlge_irq_type == MSI_IRQ) {
- if (!pci_enable_msi(qdev->pdev)) {
- set_bit(QL_MSI_ENABLED, &qdev->flags);
- netif_info(qdev, ifup, qdev->ndev,
- "Running with MSI interrupts.\n");
- return;
- }
- }
- qlge_irq_type = LEG_IRQ;
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "Running with legacy interrupts.\n");
-}
-
-/* Each vector services 1 RSS ring and and 1 or more
- * TX completion rings. This function loops through
- * the TX completion rings and assigns the vector that
- * will service it. An example would be if there are
- * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
- * This would mean that vector 0 would service RSS ring 0
- * and TX completion rings 0,1,2 and 3. Vector 1 would
- * service RSS ring 1 and TX completion rings 4,5,6 and 7.
- */
-static void ql_set_tx_vect(struct ql_adapter *qdev)
-{
- int i, j, vect;
- u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
-
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
- /* Assign irq vectors to TX rx_rings.*/
- for (vect = 0, j = 0, i = qdev->rss_ring_count;
- i < qdev->rx_ring_count; i++) {
- if (j == tx_rings_per_vector) {
- vect++;
- j = 0;
- }
- qdev->rx_ring[i].irq = vect;
- j++;
- }
- } else {
- /* For single vector all rings have an irq
- * of zero.
- */
- for (i = 0; i < qdev->rx_ring_count; i++)
- qdev->rx_ring[i].irq = 0;
- }
-}
-
-/* Set the interrupt mask for this vector. Each vector
- * will service 1 RSS ring and 1 or more TX completion
- * rings. This function sets up a bit mask per vector
- * that indicates which rings it services.
- */
-static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
-{
- int j, vect = ctx->intr;
- u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
-
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
- /* Add the RSS ring serviced by this vector
- * to the mask.
- */
- ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
- /* Add the TX ring(s) serviced by this vector
- * to the mask. */
- for (j = 0; j < tx_rings_per_vector; j++) {
- ctx->irq_mask |=
- (1 << qdev->rx_ring[qdev->rss_ring_count +
- (vect * tx_rings_per_vector) + j].cq_id);
- }
- } else {
- /* For single vector we just shift each queue's
- * ID into the mask.
- */
- for (j = 0; j < qdev->rx_ring_count; j++)
- ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
- }
-}
-
-/*
- * Here we build the intr_context structures based on
- * our rx_ring count and intr vector count.
- * The intr_context structure is used to hook each vector
- * to possibly different handlers.
- */
-static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
-{
- int i = 0;
- struct intr_context *intr_context = &qdev->intr_context[0];
-
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
- /* Each rx_ring has it's
- * own intr_context since we have separate
- * vectors for each queue.
- */
- for (i = 0; i < qdev->intr_count; i++, intr_context++) {
- qdev->rx_ring[i].irq = i;
- intr_context->intr = i;
- intr_context->qdev = qdev;
- /* Set up this vector's bit-mask that indicates
- * which queues it services.
- */
- ql_set_irq_mask(qdev, intr_context);
- /*
- * We set up each vectors enable/disable/read bits so
- * there's no bit/mask calculations in the critical path.
- */
- intr_context->intr_en_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
- | i;
- intr_context->intr_dis_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
- INTR_EN_IHD | i;
- intr_context->intr_read_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
- i;
- if (i == 0) {
- /* The first vector/queue handles
- * broadcast/multicast, fatal errors,
- * and firmware events. This in addition
- * to normal inbound NAPI processing.
- */
- intr_context->handler = qlge_isr;
- sprintf(intr_context->name, "%s-rx-%d",
- qdev->ndev->name, i);
- } else {
- /*
- * Inbound queues handle unicast frames only.
- */
- intr_context->handler = qlge_msix_rx_isr;
- sprintf(intr_context->name, "%s-rx-%d",
- qdev->ndev->name, i);
- }
- }
- } else {
- /*
- * All rx_rings use the same intr_context since
- * there is only one vector.
- */
- intr_context->intr = 0;
- intr_context->qdev = qdev;
- /*
- * We set up each vectors enable/disable/read bits so
- * there's no bit/mask calculations in the critical path.
- */
- intr_context->intr_en_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
- intr_context->intr_dis_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
- INTR_EN_TYPE_DISABLE;
- intr_context->intr_read_mask =
- INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
- /*
- * Single interrupt means one handler for all rings.
- */
- intr_context->handler = qlge_isr;
- sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
- /* Set up this vector's bit-mask that indicates
- * which queues it services. In this case there is
- * a single vector so it will service all RSS and
- * TX completion rings.
- */
- ql_set_irq_mask(qdev, intr_context);
- }
- /* Tell the TX completion rings which MSIx vector
- * they will be using.
- */
- ql_set_tx_vect(qdev);
-}
-
-static void ql_free_irq(struct ql_adapter *qdev)
-{
- int i;
- struct intr_context *intr_context = &qdev->intr_context[0];
-
- for (i = 0; i < qdev->intr_count; i++, intr_context++) {
- if (intr_context->hooked) {
- if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
- free_irq(qdev->msi_x_entry[i].vector,
- &qdev->rx_ring[i]);
- } else {
- free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
- }
- }
- }
- ql_disable_msix(qdev);
-}
-
-static int ql_request_irq(struct ql_adapter *qdev)
-{
- int i;
- int status = 0;
- struct pci_dev *pdev = qdev->pdev;
- struct intr_context *intr_context = &qdev->intr_context[0];
-
- ql_resolve_queues_to_irqs(qdev);
-
- for (i = 0; i < qdev->intr_count; i++, intr_context++) {
- atomic_set(&intr_context->irq_cnt, 0);
- if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
- status = request_irq(qdev->msi_x_entry[i].vector,
- intr_context->handler,
- 0,
- intr_context->name,
- &qdev->rx_ring[i]);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed request for MSIX interrupt %d.\n",
- i);
- goto err_irq;
- }
- } else {
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "trying msi or legacy interrupts.\n");
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "%s: irq = %d.\n", __func__, pdev->irq);
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "%s: context->name = %s.\n", __func__,
- intr_context->name);
- netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
- "%s: dev_id = 0x%p.\n", __func__,
- &qdev->rx_ring[0]);
- status =
- request_irq(pdev->irq, qlge_isr,
- test_bit(QL_MSI_ENABLED,
- &qdev->
- flags) ? 0 : IRQF_SHARED,
- intr_context->name, &qdev->rx_ring[0]);
- if (status)
- goto err_irq;
-
- netif_err(qdev, ifup, qdev->ndev,
- "Hooked intr %d, queue type %s, with name %s.\n",
- i,
- qdev->rx_ring[0].type == DEFAULT_Q ?
- "DEFAULT_Q" :
- qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
- qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
- intr_context->name);
- }
- intr_context->hooked = 1;
- }
- return status;
-err_irq:
- netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
- ql_free_irq(qdev);
- return status;
-}
-
-static int ql_start_rss(struct ql_adapter *qdev)
-{
- static const u8 init_hash_seed[] = {
- 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
- 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
- 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
- 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
- 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
- };
- struct ricb *ricb = &qdev->ricb;
- int status = 0;
- int i;
- u8 *hash_id = (u8 *) ricb->hash_cq_id;
-
- memset((void *)ricb, 0, sizeof(*ricb));
-
- ricb->base_cq = RSS_L4K;
- ricb->flags =
- (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
- ricb->mask = cpu_to_le16((u16)(0x3ff));
-
- /*
- * Fill out the Indirection Table.
- */
- for (i = 0; i < 1024; i++)
- hash_id[i] = (i & (qdev->rss_ring_count - 1));
-
- memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
- memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
-
- status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
- return status;
- }
- return status;
-}
-
-static int ql_clear_routing_entries(struct ql_adapter *qdev)
-{
- int i, status = 0;
-
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return status;
- /* Clear all the entries in the routing table. */
- for (i = 0; i < 16; i++) {
- status = ql_set_routing_reg(qdev, i, 0, 0);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register for CAM packets.\n");
- break;
- }
- }
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
- return status;
-}
-
-/* Initialize the frame-to-queue routing. */
-static int ql_route_initialize(struct ql_adapter *qdev)
-{
- int status = 0;
-
- /* Clear all the entries in the routing table. */
- status = ql_clear_routing_entries(qdev);
- if (status)
- return status;
-
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return status;
-
- status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
- RT_IDX_IP_CSUM_ERR, 1);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register "
- "for IP CSUM error packets.\n");
- goto exit;
- }
- status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
- RT_IDX_TU_CSUM_ERR, 1);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register "
- "for TCP/UDP CSUM error packets.\n");
- goto exit;
- }
- status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register for broadcast packets.\n");
- goto exit;
- }
- /* If we have more than one inbound queue, then turn on RSS in the
- * routing block.
- */
- if (qdev->rss_ring_count > 1) {
- status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
- RT_IDX_RSS_MATCH, 1);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register for MATCH RSS packets.\n");
- goto exit;
- }
- }
-
- status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
- RT_IDX_CAM_HIT, 1);
- if (status)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init routing register for CAM packets.\n");
-exit:
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
- return status;
-}
-
-int ql_cam_route_initialize(struct ql_adapter *qdev)
-{
- int status, set;
-
- /* If check if the link is up and use to
- * determine if we are setting or clearing
- * the MAC address in the CAM.
- */
- set = ql_read32(qdev, STS);
- set &= qdev->port_link_up;
- status = ql_set_mac_addr(qdev, set);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
- return status;
- }
-
- status = ql_route_initialize(qdev);
- if (status)
- netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
-
- return status;
-}
-
-static int ql_adapter_initialize(struct ql_adapter *qdev)
-{
- u32 value, mask;
- int i;
- int status = 0;
-
- /*
- * Set up the System register to halt on errors.
- */
- value = SYS_EFE | SYS_FAE;
- mask = value << 16;
- ql_write32(qdev, SYS, mask | value);
-
- /* Set the default queue, and VLAN behavior. */
- value = NIC_RCV_CFG_DFQ;
- mask = NIC_RCV_CFG_DFQ_MASK;
- if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
- value |= NIC_RCV_CFG_RV;
- mask |= (NIC_RCV_CFG_RV << 16);
- }
- ql_write32(qdev, NIC_RCV_CFG, (mask | value));
-
- /* Set the MPI interrupt to enabled. */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
-
- /* Enable the function, set pagesize, enable error checking. */
- value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
- FSC_EC | FSC_VM_PAGE_4K;
- value |= SPLT_SETTING;
-
- /* Set/clear header splitting. */
- mask = FSC_VM_PAGESIZE_MASK |
- FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
- ql_write32(qdev, FSC, mask | value);
-
- ql_write32(qdev, SPLT_HDR, SPLT_LEN);
-
- /* Set RX packet routing to use port/pci function on which the
- * packet arrived on in addition to usual frame routing.
- * This is helpful on bonding where both interfaces can have
- * the same MAC address.
- */
- ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
- /* Reroute all packets to our Interface.
- * They may have been routed to MPI firmware
- * due to WOL.
- */
- value = ql_read32(qdev, MGMT_RCV_CFG);
- value &= ~MGMT_RCV_CFG_RM;
- mask = 0xffff0000;
-
- /* Sticky reg needs clearing due to WOL. */
- ql_write32(qdev, MGMT_RCV_CFG, mask);
- ql_write32(qdev, MGMT_RCV_CFG, mask | value);
-
- /* Default WOL is enable on Mezz cards */
- if (qdev->pdev->subsystem_device == 0x0068 ||
- qdev->pdev->subsystem_device == 0x0180)
- qdev->wol = WAKE_MAGIC;
-
- /* Start up the rx queues. */
- for (i = 0; i < qdev->rx_ring_count; i++) {
- status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to start rx ring[%d].\n", i);
- return status;
- }
- }
-
- /* If there is more than one inbound completion queue
- * then download a RICB to configure RSS.
- */
- if (qdev->rss_ring_count > 1) {
- status = ql_start_rss(qdev);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
- return status;
- }
- }
-
- /* Start up the tx queues. */
- for (i = 0; i < qdev->tx_ring_count; i++) {
- status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to start tx ring[%d].\n", i);
- return status;
- }
- }
-
- /* Initialize the port and set the max framesize. */
- status = qdev->nic_ops->port_initialize(qdev);
- if (status)
- netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
-
- /* Set up the MAC address and frame routing filter. */
- status = ql_cam_route_initialize(qdev);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init CAM/Routing tables.\n");
- return status;
- }
-
- /* Start NAPI for the RSS queues. */
- for (i = 0; i < qdev->rss_ring_count; i++)
- napi_enable(&qdev->rx_ring[i].napi);
-
- return status;
-}
-
-/* Issue soft reset to chip. */
-static int ql_adapter_reset(struct ql_adapter *qdev)
-{
- u32 value;
- int status = 0;
- unsigned long end_jiffies;
-
- /* Clear all the entries in the routing table. */
- status = ql_clear_routing_entries(qdev);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
- return status;
- }
-
- /* Check if bit is set then skip the mailbox command and
- * clear the bit, else we are in normal reset process.
- */
- if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
- /* Stop management traffic. */
- ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
-
- /* Wait for the NIC and MGMNT FIFOs to empty. */
- ql_wait_fifo_empty(qdev);
- } else
- clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
-
- ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
-
- end_jiffies = jiffies + usecs_to_jiffies(30);
- do {
- value = ql_read32(qdev, RST_FO);
- if ((value & RST_FO_FR) == 0)
- break;
- cpu_relax();
- } while (time_before(jiffies, end_jiffies));
-
- if (value & RST_FO_FR) {
- netif_err(qdev, ifdown, qdev->ndev,
- "ETIMEDOUT!!! errored out of resetting the chip!\n");
- status = -ETIMEDOUT;
- }
-
- /* Resume management traffic. */
- ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
- return status;
-}
-
-static void ql_display_dev_info(struct net_device *ndev)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- netif_info(qdev, probe, qdev->ndev,
- "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
- "XG Roll = %d, XG Rev = %d.\n",
- qdev->func,
- qdev->port,
- qdev->chip_rev_id & 0x0000000f,
- qdev->chip_rev_id >> 4 & 0x0000000f,
- qdev->chip_rev_id >> 8 & 0x0000000f,
- qdev->chip_rev_id >> 12 & 0x0000000f);
- netif_info(qdev, probe, qdev->ndev,
- "MAC address %pM\n", ndev->dev_addr);
-}
-
-static int ql_wol(struct ql_adapter *qdev)
-{
- int status = 0;
- u32 wol = MB_WOL_DISABLE;
-
- /* The CAM is still intact after a reset, but if we
- * are doing WOL, then we may need to program the
- * routing regs. We would also need to issue the mailbox
- * commands to instruct the MPI what to do per the ethtool
- * settings.
- */
-
- if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
- WAKE_MCAST | WAKE_BCAST)) {
- netif_err(qdev, ifdown, qdev->ndev,
- "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
- qdev->wol);
- return -EINVAL;
- }
-
- if (qdev->wol & WAKE_MAGIC) {
- status = ql_mb_wol_set_magic(qdev, 1);
- if (status) {
- netif_err(qdev, ifdown, qdev->ndev,
- "Failed to set magic packet on %s.\n",
- qdev->ndev->name);
- return status;
- } else
- netif_info(qdev, drv, qdev->ndev,
- "Enabled magic packet successfully on %s.\n",
- qdev->ndev->name);
-
- wol |= MB_WOL_MAGIC_PKT;
- }
-
- if (qdev->wol) {
- wol |= MB_WOL_MODE_ON;
- status = ql_mb_wol_mode(qdev, wol);
- netif_err(qdev, drv, qdev->ndev,
- "WOL %s (wol code 0x%x) on %s\n",
- (status == 0) ? "Successfully set" : "Failed",
- wol, qdev->ndev->name);
- }
-
- return status;
-}
-
-static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
-{
-
- /* Don't kill the reset worker thread if we
- * are in the process of recovery.
- */
- if (test_bit(QL_ADAPTER_UP, &qdev->flags))
- cancel_delayed_work_sync(&qdev->asic_reset_work);
- cancel_delayed_work_sync(&qdev->mpi_reset_work);
- cancel_delayed_work_sync(&qdev->mpi_work);
- cancel_delayed_work_sync(&qdev->mpi_idc_work);
- cancel_delayed_work_sync(&qdev->mpi_core_to_log);
- cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
-}
-
-static int ql_adapter_down(struct ql_adapter *qdev)
-{
- int i, status = 0;
-
- ql_link_off(qdev);
-
- ql_cancel_all_work_sync(qdev);
-
- for (i = 0; i < qdev->rss_ring_count; i++)
- napi_disable(&qdev->rx_ring[i].napi);
-
- clear_bit(QL_ADAPTER_UP, &qdev->flags);
-
- ql_disable_interrupts(qdev);
-
- ql_tx_ring_clean(qdev);
-
- /* Call netif_napi_del() from common point.
- */
- for (i = 0; i < qdev->rss_ring_count; i++)
- netif_napi_del(&qdev->rx_ring[i].napi);
-
- status = ql_adapter_reset(qdev);
- if (status)
- netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
- qdev->func);
- ql_free_rx_buffers(qdev);
-
- return status;
-}
-
-static int ql_adapter_up(struct ql_adapter *qdev)
-{
- int err = 0;
-
- err = ql_adapter_initialize(qdev);
- if (err) {
- netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
- goto err_init;
- }
- set_bit(QL_ADAPTER_UP, &qdev->flags);
- ql_alloc_rx_buffers(qdev);
- /* If the port is initialized and the
- * link is up the turn on the carrier.
- */
- if ((ql_read32(qdev, STS) & qdev->port_init) &&
- (ql_read32(qdev, STS) & qdev->port_link_up))
- ql_link_on(qdev);
- /* Restore rx mode. */
- clear_bit(QL_ALLMULTI, &qdev->flags);
- clear_bit(QL_PROMISCUOUS, &qdev->flags);
- qlge_set_multicast_list(qdev->ndev);
-
- /* Restore vlan setting. */
- qlge_restore_vlan(qdev);
-
- ql_enable_interrupts(qdev);
- ql_enable_all_completion_interrupts(qdev);
- netif_tx_start_all_queues(qdev->ndev);
-
- return 0;
-err_init:
- ql_adapter_reset(qdev);
- return err;
-}
-
-static void ql_release_adapter_resources(struct ql_adapter *qdev)
-{
- ql_free_mem_resources(qdev);
- ql_free_irq(qdev);
-}
-
-static int ql_get_adapter_resources(struct ql_adapter *qdev)
-{
- int status = 0;
-
- if (ql_alloc_mem_resources(qdev)) {
- netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
- return -ENOMEM;
- }
- status = ql_request_irq(qdev);
- return status;
-}
-
-static int qlge_close(struct net_device *ndev)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- /* If we hit pci_channel_io_perm_failure
- * failure condition, then we already
- * brought the adapter down.
- */
- if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
- netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
- clear_bit(QL_EEH_FATAL, &qdev->flags);
- return 0;
- }
-
- /*
- * Wait for device to recover from a reset.
- * (Rarely happens, but possible.)
- */
- while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
- msleep(1);
- ql_adapter_down(qdev);
- ql_release_adapter_resources(qdev);
- return 0;
-}
-
-static int ql_configure_rings(struct ql_adapter *qdev)
-{
- int i;
- struct rx_ring *rx_ring;
- struct tx_ring *tx_ring;
- int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
- unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
- LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
-
- qdev->lbq_buf_order = get_order(lbq_buf_len);
-
- /* In a perfect world we have one RSS ring for each CPU
- * and each has it's own vector. To do that we ask for
- * cpu_cnt vectors. ql_enable_msix() will adjust the
- * vector count to what we actually get. We then
- * allocate an RSS ring for each.
- * Essentially, we are doing min(cpu_count, msix_vector_count).
- */
- qdev->intr_count = cpu_cnt;
- ql_enable_msix(qdev);
- /* Adjust the RSS ring count to the actual vector count. */
- qdev->rss_ring_count = qdev->intr_count;
- qdev->tx_ring_count = cpu_cnt;
- qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
-
- for (i = 0; i < qdev->tx_ring_count; i++) {
- tx_ring = &qdev->tx_ring[i];
- memset((void *)tx_ring, 0, sizeof(*tx_ring));
- tx_ring->qdev = qdev;
- tx_ring->wq_id = i;
- tx_ring->wq_len = qdev->tx_ring_size;
- tx_ring->wq_size =
- tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
-
- /*
- * The completion queue ID for the tx rings start
- * immediately after the rss rings.
- */
- tx_ring->cq_id = qdev->rss_ring_count + i;
- }
-
- for (i = 0; i < qdev->rx_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- memset((void *)rx_ring, 0, sizeof(*rx_ring));
- rx_ring->qdev = qdev;
- rx_ring->cq_id = i;
- rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
- if (i < qdev->rss_ring_count) {
- /*
- * Inbound (RSS) queues.
- */
- rx_ring->cq_len = qdev->rx_ring_size;
- rx_ring->cq_size =
- rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
- rx_ring->lbq_len = NUM_LARGE_BUFFERS;
- rx_ring->lbq_size =
- rx_ring->lbq_len * sizeof(__le64);
- rx_ring->lbq_buf_size = (u16)lbq_buf_len;
- rx_ring->sbq_len = NUM_SMALL_BUFFERS;
- rx_ring->sbq_size =
- rx_ring->sbq_len * sizeof(__le64);
- rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
- rx_ring->type = RX_Q;
- } else {
- /*
- * Outbound queue handles outbound completions only.
- */
- /* outbound cq is same size as tx_ring it services. */
- rx_ring->cq_len = qdev->tx_ring_size;
- rx_ring->cq_size =
- rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
- rx_ring->lbq_len = 0;
- rx_ring->lbq_size = 0;
- rx_ring->lbq_buf_size = 0;
- rx_ring->sbq_len = 0;
- rx_ring->sbq_size = 0;
- rx_ring->sbq_buf_size = 0;
- rx_ring->type = TX_Q;
- }
- }
- return 0;
-}
-
-static int qlge_open(struct net_device *ndev)
-{
- int err = 0;
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- err = ql_adapter_reset(qdev);
- if (err)
- return err;
-
- err = ql_configure_rings(qdev);
- if (err)
- return err;
-
- err = ql_get_adapter_resources(qdev);
- if (err)
- goto error_up;
-
- err = ql_adapter_up(qdev);
- if (err)
- goto error_up;
-
- return err;
-
-error_up:
- ql_release_adapter_resources(qdev);
- return err;
-}
-
-static int ql_change_rx_buffers(struct ql_adapter *qdev)
-{
- struct rx_ring *rx_ring;
- int i, status;
- u32 lbq_buf_len;
-
- /* Wait for an outstanding reset to complete. */
- if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
- int i = 4;
-
- while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
- netif_err(qdev, ifup, qdev->ndev,
- "Waiting for adapter UP...\n");
- ssleep(1);
- }
-
- if (!i) {
- netif_err(qdev, ifup, qdev->ndev,
- "Timed out waiting for adapter UP\n");
- return -ETIMEDOUT;
- }
- }
-
- status = ql_adapter_down(qdev);
- if (status)
- goto error;
-
- /* Get the new rx buffer size. */
- lbq_buf_len = (qdev->ndev->mtu > 1500) ?
- LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
- qdev->lbq_buf_order = get_order(lbq_buf_len);
-
- for (i = 0; i < qdev->rss_ring_count; i++) {
- rx_ring = &qdev->rx_ring[i];
- /* Set the new size. */
- rx_ring->lbq_buf_size = lbq_buf_len;
- }
-
- status = ql_adapter_up(qdev);
- if (status)
- goto error;
-
- return status;
-error:
- netif_alert(qdev, ifup, qdev->ndev,
- "Driver up/down cycle failed, closing device.\n");
- set_bit(QL_ADAPTER_UP, &qdev->flags);
- dev_close(qdev->ndev);
- return status;
-}
-
-static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- int status;
-
- if (ndev->mtu == 1500 && new_mtu == 9000) {
- netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
- } else if (ndev->mtu == 9000 && new_mtu == 1500) {
- netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
- } else
- return -EINVAL;
-
- queue_delayed_work(qdev->workqueue,
- &qdev->mpi_port_cfg_work, 3*HZ);
-
- ndev->mtu = new_mtu;
-
- if (!netif_running(qdev->ndev)) {
- return 0;
- }
-
- status = ql_change_rx_buffers(qdev);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Changing MTU failed.\n");
- }
-
- return status;
-}
-
-static struct net_device_stats *qlge_get_stats(struct net_device
- *ndev)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- struct rx_ring *rx_ring = &qdev->rx_ring[0];
- struct tx_ring *tx_ring = &qdev->tx_ring[0];
- unsigned long pkts, mcast, dropped, errors, bytes;
- int i;
-
- /* Get RX stats. */
- pkts = mcast = dropped = errors = bytes = 0;
- for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
- pkts += rx_ring->rx_packets;
- bytes += rx_ring->rx_bytes;
- dropped += rx_ring->rx_dropped;
- errors += rx_ring->rx_errors;
- mcast += rx_ring->rx_multicast;
- }
- ndev->stats.rx_packets = pkts;
- ndev->stats.rx_bytes = bytes;
- ndev->stats.rx_dropped = dropped;
- ndev->stats.rx_errors = errors;
- ndev->stats.multicast = mcast;
-
- /* Get TX stats. */
- pkts = errors = bytes = 0;
- for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
- pkts += tx_ring->tx_packets;
- bytes += tx_ring->tx_bytes;
- errors += tx_ring->tx_errors;
- }
- ndev->stats.tx_packets = pkts;
- ndev->stats.tx_bytes = bytes;
- ndev->stats.tx_errors = errors;
- return &ndev->stats;
-}
-
-static void qlge_set_multicast_list(struct net_device *ndev)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- struct netdev_hw_addr *ha;
- int i, status;
-
- status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
- if (status)
- return;
- /*
- * Set or clear promiscuous mode if a
- * transition is taking place.
- */
- if (ndev->flags & IFF_PROMISC) {
- if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
- if (ql_set_routing_reg
- (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to set promiscuous mode.\n");
- } else {
- set_bit(QL_PROMISCUOUS, &qdev->flags);
- }
- }
- } else {
- if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
- if (ql_set_routing_reg
- (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to clear promiscuous mode.\n");
- } else {
- clear_bit(QL_PROMISCUOUS, &qdev->flags);
- }
- }
- }
-
- /*
- * Set or clear all multicast mode if a
- * transition is taking place.
- */
- if ((ndev->flags & IFF_ALLMULTI) ||
- (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
- if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
- if (ql_set_routing_reg
- (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to set all-multi mode.\n");
- } else {
- set_bit(QL_ALLMULTI, &qdev->flags);
- }
- }
- } else {
- if (test_bit(QL_ALLMULTI, &qdev->flags)) {
- if (ql_set_routing_reg
- (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to clear all-multi mode.\n");
- } else {
- clear_bit(QL_ALLMULTI, &qdev->flags);
- }
- }
- }
-
- if (!netdev_mc_empty(ndev)) {
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- goto exit;
- i = 0;
- netdev_for_each_mc_addr(ha, ndev) {
- if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
- MAC_ADDR_TYPE_MULTI_MAC, i)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to loadmulticast address.\n");
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- goto exit;
- }
- i++;
- }
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- if (ql_set_routing_reg
- (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
- netif_err(qdev, hw, qdev->ndev,
- "Failed to set multicast match mode.\n");
- } else {
- set_bit(QL_ALLMULTI, &qdev->flags);
- }
- }
-exit:
- ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
-}
-
-static int qlge_set_mac_address(struct net_device *ndev, void *p)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- struct sockaddr *addr = p;
- int status;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
- /* Update local copy of current mac address. */
- memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
-
- status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
- if (status)
- return status;
- status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
- MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
- if (status)
- netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
- ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
- return status;
-}
-
-static void qlge_tx_timeout(struct net_device *ndev)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- ql_queue_asic_error(qdev);
-}
-
-static void ql_asic_reset_work(struct work_struct *work)
-{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, asic_reset_work.work);
- int status;
- rtnl_lock();
- status = ql_adapter_down(qdev);
- if (status)
- goto error;
-
- status = ql_adapter_up(qdev);
- if (status)
- goto error;
-
- /* Restore rx mode. */
- clear_bit(QL_ALLMULTI, &qdev->flags);
- clear_bit(QL_PROMISCUOUS, &qdev->flags);
- qlge_set_multicast_list(qdev->ndev);
-
- rtnl_unlock();
- return;
-error:
- netif_alert(qdev, ifup, qdev->ndev,
- "Driver up/down cycle failed, closing device\n");
-
- set_bit(QL_ADAPTER_UP, &qdev->flags);
- dev_close(qdev->ndev);
- rtnl_unlock();
-}
-
-static const struct nic_operations qla8012_nic_ops = {
- .get_flash = ql_get_8012_flash_params,
- .port_initialize = ql_8012_port_initialize,
-};
-
-static const struct nic_operations qla8000_nic_ops = {
- .get_flash = ql_get_8000_flash_params,
- .port_initialize = ql_8000_port_initialize,
-};
-
-/* Find the pcie function number for the other NIC
- * on this chip. Since both NIC functions share a
- * common firmware we have the lowest enabled function
- * do any common work. Examples would be resetting
- * after a fatal firmware error, or doing a firmware
- * coredump.
- */
-static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
-{
- int status = 0;
- u32 temp;
- u32 nic_func1, nic_func2;
-
- status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
- &temp);
- if (status)
- return status;
-
- nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
- MPI_TEST_NIC_FUNC_MASK);
- nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
- MPI_TEST_NIC_FUNC_MASK);
-
- if (qdev->func == nic_func1)
- qdev->alt_func = nic_func2;
- else if (qdev->func == nic_func2)
- qdev->alt_func = nic_func1;
- else
- status = -EIO;
-
- return status;
-}
-
-static int ql_get_board_info(struct ql_adapter *qdev)
-{
- int status;
- qdev->func =
- (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
- if (qdev->func > 3)
- return -EIO;
-
- status = ql_get_alt_pcie_func(qdev);
- if (status)
- return status;
-
- qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
- if (qdev->port) {
- qdev->xg_sem_mask = SEM_XGMAC1_MASK;
- qdev->port_link_up = STS_PL1;
- qdev->port_init = STS_PI1;
- qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
- qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
- } else {
- qdev->xg_sem_mask = SEM_XGMAC0_MASK;
- qdev->port_link_up = STS_PL0;
- qdev->port_init = STS_PI0;
- qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
- qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
- }
- qdev->chip_rev_id = ql_read32(qdev, REV_ID);
- qdev->device_id = qdev->pdev->device;
- if (qdev->device_id == QLGE_DEVICE_ID_8012)
- qdev->nic_ops = &qla8012_nic_ops;
- else if (qdev->device_id == QLGE_DEVICE_ID_8000)
- qdev->nic_ops = &qla8000_nic_ops;
- return status;
-}
-
-static void ql_release_all(struct pci_dev *pdev)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- if (qdev->workqueue) {
- destroy_workqueue(qdev->workqueue);
- qdev->workqueue = NULL;
- }
-
- if (qdev->reg_base)
- iounmap(qdev->reg_base);
- if (qdev->doorbell_area)
- iounmap(qdev->doorbell_area);
- vfree(qdev->mpi_coredump);
- pci_release_regions(pdev);
-}
-
-static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
- int cards_found)
-{
- struct ql_adapter *qdev = netdev_priv(ndev);
- int err = 0;
-
- memset((void *)qdev, 0, sizeof(*qdev));
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "PCI device enable failed.\n");
- return err;
- }
-
- qdev->ndev = ndev;
- qdev->pdev = pdev;
- pci_set_drvdata(pdev, ndev);
-
- /* Set PCIe read request size */
- err = pcie_set_readrq(pdev, 4096);
- if (err) {
- dev_err(&pdev->dev, "Set readrq failed.\n");
- goto err_out1;
- }
-
- err = pci_request_regions(pdev, DRV_NAME);
- if (err) {
- dev_err(&pdev->dev, "PCI region request failed.\n");
- return err;
- }
-
- pci_set_master(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- set_bit(QL_DMA64, &qdev->flags);
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- } else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- }
-
- if (err) {
- dev_err(&pdev->dev, "No usable DMA configuration.\n");
- goto err_out2;
- }
-
- /* Set PCIe reset type for EEH to fundamental. */
- pdev->needs_freset = 1;
- pci_save_state(pdev);
- qdev->reg_base =
- ioremap_nocache(pci_resource_start(pdev, 1),
- pci_resource_len(pdev, 1));
- if (!qdev->reg_base) {
- dev_err(&pdev->dev, "Register mapping failed.\n");
- err = -ENOMEM;
- goto err_out2;
- }
-
- qdev->doorbell_area_size = pci_resource_len(pdev, 3);
- qdev->doorbell_area =
- ioremap_nocache(pci_resource_start(pdev, 3),
- pci_resource_len(pdev, 3));
- if (!qdev->doorbell_area) {
- dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
- err = -ENOMEM;
- goto err_out2;
- }
-
- err = ql_get_board_info(qdev);
- if (err) {
- dev_err(&pdev->dev, "Register access failed.\n");
- err = -EIO;
- goto err_out2;
- }
- qdev->msg_enable = netif_msg_init(debug, default_msg);
- spin_lock_init(&qdev->hw_lock);
- spin_lock_init(&qdev->stats_lock);
-
- if (qlge_mpi_coredump) {
- qdev->mpi_coredump =
- vmalloc(sizeof(struct ql_mpi_coredump));
- if (qdev->mpi_coredump == NULL) {
- err = -ENOMEM;
- goto err_out2;
- }
- if (qlge_force_coredump)
- set_bit(QL_FRC_COREDUMP, &qdev->flags);
- }
- /* make sure the EEPROM is good */
- err = qdev->nic_ops->get_flash(qdev);
- if (err) {
- dev_err(&pdev->dev, "Invalid FLASH.\n");
- goto err_out2;
- }
-
- /* Keep local copy of current mac address. */
- memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
-
- /* Set up the default ring sizes. */
- qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
- qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
-
- /* Set up the coalescing parameters. */
- qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
- qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
- qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
- qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
-
- /*
- * Set up the operating parameters.
- */
- qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
- ndev->name);
- if (!qdev->workqueue) {
- err = -ENOMEM;
- goto err_out2;
- }
-
- INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
- INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
- INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
- INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
- INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
- INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
- init_completion(&qdev->ide_completion);
- mutex_init(&qdev->mpi_mutex);
-
- if (!cards_found) {
- dev_info(&pdev->dev, "%s\n", DRV_STRING);
- dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
- DRV_NAME, DRV_VERSION);
- }
- return 0;
-err_out2:
- ql_release_all(pdev);
-err_out1:
- pci_disable_device(pdev);
- return err;
-}
-
-static const struct net_device_ops qlge_netdev_ops = {
- .ndo_open = qlge_open,
- .ndo_stop = qlge_close,
- .ndo_start_xmit = qlge_send,
- .ndo_change_mtu = qlge_change_mtu,
- .ndo_get_stats = qlge_get_stats,
- .ndo_set_rx_mode = qlge_set_multicast_list,
- .ndo_set_mac_address = qlge_set_mac_address,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_tx_timeout = qlge_tx_timeout,
- .ndo_set_features = qlge_set_features,
- .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
-};
-
-static void ql_timer(struct timer_list *t)
-{
- struct ql_adapter *qdev = from_timer(qdev, t, timer);
- u32 var = 0;
-
- var = ql_read32(qdev, STS);
- if (pci_channel_offline(qdev->pdev)) {
- netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
- return;
- }
-
- mod_timer(&qdev->timer, jiffies + (5*HZ));
-}
-
-static int qlge_probe(struct pci_dev *pdev,
- const struct pci_device_id *pci_entry)
-{
- struct net_device *ndev = NULL;
- struct ql_adapter *qdev = NULL;
- static int cards_found = 0;
- int err = 0;
-
- ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
- min(MAX_CPUS, netif_get_num_default_rss_queues()));
- if (!ndev)
- return -ENOMEM;
-
- err = ql_init_device(pdev, ndev, cards_found);
- if (err < 0) {
- free_netdev(ndev);
- return err;
- }
-
- qdev = netdev_priv(ndev);
- SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO_ECN |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_RXCSUM;
- ndev->features = ndev->hw_features;
- ndev->vlan_features = ndev->hw_features;
- /* vlan gets same features (except vlan filter) */
- ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX);
-
- if (test_bit(QL_DMA64, &qdev->flags))
- ndev->features |= NETIF_F_HIGHDMA;
-
- /*
- * Set up net_device structure.
- */
- ndev->tx_queue_len = qdev->tx_ring_size;
- ndev->irq = pdev->irq;
-
- ndev->netdev_ops = &qlge_netdev_ops;
- ndev->ethtool_ops = &qlge_ethtool_ops;
- ndev->watchdog_timeo = 10 * HZ;
-
- /* MTU range: this driver only supports 1500 or 9000, so this only
- * filters out values above or below, and we'll rely on
- * qlge_change_mtu to make sure only 1500 or 9000 are allowed
- */
- ndev->min_mtu = ETH_DATA_LEN;
- ndev->max_mtu = 9000;
-
- err = register_netdev(ndev);
- if (err) {
- dev_err(&pdev->dev, "net device registration failed.\n");
- ql_release_all(pdev);
- pci_disable_device(pdev);
- free_netdev(ndev);
- return err;
- }
- /* Start up the timer to trigger EEH if
- * the bus goes dead
- */
- timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
- mod_timer(&qdev->timer, jiffies + (5*HZ));
- ql_link_off(qdev);
- ql_display_dev_info(ndev);
- atomic_set(&qdev->lb_count, 0);
- cards_found++;
- return 0;
-}
-
-netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
-{
- return qlge_send(skb, ndev);
-}
-
-int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
-{
- return ql_clean_inbound_rx_ring(rx_ring, budget);
-}
-
-static void qlge_remove(struct pci_dev *pdev)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
- del_timer_sync(&qdev->timer);
- ql_cancel_all_work_sync(qdev);
- unregister_netdev(ndev);
- ql_release_all(pdev);
- pci_disable_device(pdev);
- free_netdev(ndev);
-}
-
-/* Clean up resources without touching hardware. */
-static void ql_eeh_close(struct net_device *ndev)
-{
- int i;
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- if (netif_carrier_ok(ndev)) {
- netif_carrier_off(ndev);
- netif_stop_queue(ndev);
- }
-
- /* Disabling the timer */
- ql_cancel_all_work_sync(qdev);
-
- for (i = 0; i < qdev->rss_ring_count; i++)
- netif_napi_del(&qdev->rx_ring[i].napi);
-
- clear_bit(QL_ADAPTER_UP, &qdev->flags);
- ql_tx_ring_clean(qdev);
- ql_free_rx_buffers(qdev);
- ql_release_adapter_resources(qdev);
-}
-
-/*
- * This callback is called by the PCI subsystem whenever
- * a PCI bus error is detected.
- */
-static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
- enum pci_channel_state state)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- switch (state) {
- case pci_channel_io_normal:
- return PCI_ERS_RESULT_CAN_RECOVER;
- case pci_channel_io_frozen:
- netif_device_detach(ndev);
- del_timer_sync(&qdev->timer);
- if (netif_running(ndev))
- ql_eeh_close(ndev);
- pci_disable_device(pdev);
- return PCI_ERS_RESULT_NEED_RESET;
- case pci_channel_io_perm_failure:
- dev_err(&pdev->dev,
- "%s: pci_channel_io_perm_failure.\n", __func__);
- del_timer_sync(&qdev->timer);
- ql_eeh_close(ndev);
- set_bit(QL_EEH_FATAL, &qdev->flags);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- /* Request a slot reset. */
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/*
- * This callback is called after the PCI buss has been reset.
- * Basically, this tries to restart the card from scratch.
- * This is a shortened version of the device probe/discovery code,
- * it resembles the first-half of the () routine.
- */
-static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
-
- pdev->error_state = pci_channel_io_normal;
-
- pci_restore_state(pdev);
- if (pci_enable_device(pdev)) {
- netif_err(qdev, ifup, qdev->ndev,
- "Cannot re-enable PCI device after reset.\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
- pci_set_master(pdev);
-
- if (ql_adapter_reset(qdev)) {
- netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
- set_bit(QL_EEH_FATAL, &qdev->flags);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void qlge_io_resume(struct pci_dev *pdev)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
- int err = 0;
-
- if (netif_running(ndev)) {
- err = qlge_open(ndev);
- if (err) {
- netif_err(qdev, ifup, qdev->ndev,
- "Device initialization failed after reset.\n");
- return;
- }
- } else {
- netif_err(qdev, ifup, qdev->ndev,
- "Device was not running prior to EEH.\n");
- }
- mod_timer(&qdev->timer, jiffies + (5*HZ));
- netif_device_attach(ndev);
-}
-
-static const struct pci_error_handlers qlge_err_handler = {
- .error_detected = qlge_io_error_detected,
- .slot_reset = qlge_io_slot_reset,
- .resume = qlge_io_resume,
-};
-
-static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
- int err;
-
- netif_device_detach(ndev);
- del_timer_sync(&qdev->timer);
-
- if (netif_running(ndev)) {
- err = ql_adapter_down(qdev);
- if (!err)
- return err;
- }
-
- ql_wol(qdev);
- err = pci_save_state(pdev);
- if (err)
- return err;
-
- pci_disable_device(pdev);
-
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int qlge_resume(struct pci_dev *pdev)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct ql_adapter *qdev = netdev_priv(ndev);
- int err;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- err = pci_enable_device(pdev);
- if (err) {
- netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
- return err;
- }
- pci_set_master(pdev);
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
-
- if (netif_running(ndev)) {
- err = ql_adapter_up(qdev);
- if (err)
- return err;
- }
-
- mod_timer(&qdev->timer, jiffies + (5*HZ));
- netif_device_attach(ndev);
-
- return 0;
-}
-#endif /* CONFIG_PM */
-
-static void qlge_shutdown(struct pci_dev *pdev)
-{
- qlge_suspend(pdev, PMSG_SUSPEND);
-}
-
-static struct pci_driver qlge_driver = {
- .name = DRV_NAME,
- .id_table = qlge_pci_tbl,
- .probe = qlge_probe,
- .remove = qlge_remove,
-#ifdef CONFIG_PM
- .suspend = qlge_suspend,
- .resume = qlge_resume,
-#endif
- .shutdown = qlge_shutdown,
- .err_handler = &qlge_err_handler
-};
-
-module_pci_driver(qlge_driver);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
deleted file mode 100644
index 957c72985a06..000000000000
--- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
+++ /dev/null
@@ -1,1285 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "qlge.h"
-
-int ql_unpause_mpi_risc(struct ql_adapter *qdev)
-{
- u32 tmp;
-
- /* Un-pause the RISC */
- tmp = ql_read32(qdev, CSR);
- if (!(tmp & CSR_RP))
- return -EIO;
-
- ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
- return 0;
-}
-
-int ql_pause_mpi_risc(struct ql_adapter *qdev)
-{
- u32 tmp;
- int count = UDELAY_COUNT;
-
- /* Pause the RISC */
- ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
- do {
- tmp = ql_read32(qdev, CSR);
- if (tmp & CSR_RP)
- break;
- mdelay(UDELAY_DELAY);
- count--;
- } while (count);
- return (count == 0) ? -ETIMEDOUT : 0;
-}
-
-int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
-{
- u32 tmp;
- int count = UDELAY_COUNT;
-
- /* Reset the RISC */
- ql_write32(qdev, CSR, CSR_CMD_SET_RST);
- do {
- tmp = ql_read32(qdev, CSR);
- if (tmp & CSR_RR) {
- ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
- break;
- }
- mdelay(UDELAY_DELAY);
- count--;
- } while (count);
- return (count == 0) ? -ETIMEDOUT : 0;
-}
-
-int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
-{
- int status;
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
- if (status)
- goto exit;
- /* set up for reg read */
- ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R);
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
- if (status)
- goto exit;
- /* get the data */
- *data = ql_read32(qdev, PROC_DATA);
-exit:
- return status;
-}
-
-int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data)
-{
- int status = 0;
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
- if (status)
- goto exit;
- /* write the data to the data reg */
- ql_write32(qdev, PROC_DATA, data);
- /* trigger the write */
- ql_write32(qdev, PROC_ADDR, reg);
- /* wait for reg to come ready */
- status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR);
- if (status)
- goto exit;
-exit:
- return status;
-}
-
-int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
-{
- int status;
- status = ql_write_mpi_reg(qdev, 0x00001010, 1);
- return status;
-}
-
-/* Determine if we are in charge of the firwmare. If
- * we are the lower of the 2 NIC pcie functions, or if
- * we are the higher function and the lower function
- * is not enabled.
- */
-int ql_own_firmware(struct ql_adapter *qdev)
-{
- u32 temp;
-
- /* If we are the lower of the 2 NIC functions
- * on the chip the we are responsible for
- * core dump and firmware reset after an error.
- */
- if (qdev->func < qdev->alt_func)
- return 1;
-
- /* If we are the higher of the 2 NIC functions
- * on the chip and the lower function is not
- * enabled, then we are responsible for
- * core dump and firmware reset after an error.
- */
- temp = ql_read32(qdev, STS);
- if (!(temp & (1 << (8 + qdev->alt_func))))
- return 1;
-
- return 0;
-
-}
-
-static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int i, status;
-
- status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
- if (status)
- return -EBUSY;
- for (i = 0; i < mbcp->out_count; i++) {
- status =
- ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
- &mbcp->mbox_out[i]);
- if (status) {
- netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
- break;
- }
- }
- ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
- return status;
-}
-
-/* Wait for a single mailbox command to complete.
- * Returns zero on success.
- */
-static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev)
-{
- int count = 100;
- u32 value;
-
- do {
- value = ql_read32(qdev, STS);
- if (value & STS_PI)
- return 0;
- mdelay(UDELAY_DELAY); /* 100ms */
- } while (--count);
- return -ETIMEDOUT;
-}
-
-/* Execute a single mailbox command.
- * Caller must hold PROC_ADDR semaphore.
- */
-static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int i, status;
-
- /*
- * Make sure there's nothing pending.
- * This shouldn't happen.
- */
- if (ql_read32(qdev, CSR) & CSR_HRI)
- return -EIO;
-
- status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
- if (status)
- return status;
-
- /*
- * Fill the outbound mailboxes.
- */
- for (i = 0; i < mbcp->in_count; i++) {
- status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i,
- mbcp->mbox_in[i]);
- if (status)
- goto end;
- }
- /*
- * Wake up the MPI firmware.
- */
- ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT);
-end:
- ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
- return status;
-}
-
-/* We are being asked by firmware to accept
- * a change to the port. This is only
- * a change to max frame sizes (Tx/Rx), pause
- * parameters, or loopback mode. We wake up a worker
- * to handler processing this since a mailbox command
- * will need to be sent to ACK the request.
- */
-static int ql_idc_req_aen(struct ql_adapter *qdev)
-{
- int status;
- struct mbox_params *mbcp = &qdev->idc_mbc;
-
- netif_err(qdev, drv, qdev->ndev, "Enter!\n");
- /* Get the status data and start up a thread to
- * handle the request.
- */
- mbcp->out_count = 4;
- status = ql_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Could not read MPI, resetting ASIC!\n");
- ql_queue_asic_error(qdev);
- } else {
- /* Begin polled mode early so
- * we don't get another interrupt
- * when we leave mpi_worker.
- */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
- queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0);
- }
- return status;
-}
-
-/* Process an inter-device event completion.
- * If good, signal the caller's completion.
- */
-static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
-{
- int status;
- struct mbox_params *mbcp = &qdev->idc_mbc;
- mbcp->out_count = 4;
- status = ql_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Could not read MPI, resetting RISC!\n");
- ql_queue_fw_error(qdev);
- } else
- /* Wake up the sleeping mpi_idc_work thread that is
- * waiting for this event.
- */
- complete(&qdev->ide_completion);
-
- return status;
-}
-
-static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
- mbcp->out_count = 2;
-
- status = ql_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "%s: Could not get mailbox status.\n", __func__);
- return;
- }
-
- qdev->link_status = mbcp->mbox_out[1];
- netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
-
- /* If we're coming back from an IDC event
- * then set up the CAM and frame routing.
- */
- if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
- status = ql_cam_route_initialize(qdev);
- if (status) {
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init CAM/Routing tables.\n");
- return;
- } else
- clear_bit(QL_CAM_RT_SET, &qdev->flags);
- }
-
- /* Queue up a worker to check the frame
- * size information, and fix it if it's not
- * to our liking.
- */
- if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
- netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
- set_bit(QL_PORT_CFG, &qdev->flags);
- /* Begin polled mode early so
- * we don't get another interrupt
- * when we leave mpi_worker dpc.
- */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
- queue_delayed_work(qdev->workqueue,
- &qdev->mpi_port_cfg_work, 0);
- }
-
- ql_link_on(qdev);
-}
-
-static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 3;
-
- status = ql_get_mb_sts(qdev, mbcp);
- if (status)
- netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
-
- ql_link_off(qdev);
-}
-
-static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 5;
-
- status = ql_get_mb_sts(qdev, mbcp);
- if (status)
- netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
- else
- netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
-
- return status;
-}
-
-static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 1;
-
- status = ql_get_mb_sts(qdev, mbcp);
- if (status)
- netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
- else
- netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
-
- return status;
-}
-
-static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 6;
-
- status = ql_get_mb_sts(qdev, mbcp);
- if (status)
- netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
- else {
- int i;
- netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
- for (i = 0; i < mbcp->out_count; i++)
- netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
- i, mbcp->mbox_out[i]);
-
- }
-
- return status;
-}
-
-static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
-
- mbcp->out_count = 2;
-
- status = ql_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
- } else {
- netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n",
- mbcp->mbox_out[1]);
- qdev->fw_rev_id = mbcp->mbox_out[1];
- status = ql_cam_route_initialize(qdev);
- if (status)
- netif_err(qdev, ifup, qdev->ndev,
- "Failed to init CAM/Routing tables.\n");
- }
-}
-
-/* Process an async event and clear it unless it's an
- * error condition.
- * This can get called iteratively from the mpi_work thread
- * when events arrive via an interrupt.
- * It also gets called when a mailbox command is polling for
- * it's completion. */
-static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
- int orig_count = mbcp->out_count;
-
- /* Just get mailbox zero for now. */
- mbcp->out_count = 1;
- status = ql_get_mb_sts(qdev, mbcp);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Could not read MPI, resetting ASIC!\n");
- ql_queue_asic_error(qdev);
- goto end;
- }
-
- switch (mbcp->mbox_out[0]) {
-
- /* This case is only active when we arrive here
- * as a result of issuing a mailbox command to
- * the firmware.
- */
- case MB_CMD_STS_INTRMDT:
- case MB_CMD_STS_GOOD:
- case MB_CMD_STS_INVLD_CMD:
- case MB_CMD_STS_XFC_ERR:
- case MB_CMD_STS_CSUM_ERR:
- case MB_CMD_STS_ERR:
- case MB_CMD_STS_PARAM_ERR:
- /* We can only get mailbox status if we're polling from an
- * unfinished command. Get the rest of the status data and
- * return back to the caller.
- * We only end up here when we're polling for a mailbox
- * command completion.
- */
- mbcp->out_count = orig_count;
- status = ql_get_mb_sts(qdev, mbcp);
- return status;
-
- /* We are being asked by firmware to accept
- * a change to the port. This is only
- * a change to max frame sizes (Tx/Rx), pause
- * parameters, or loopback mode.
- */
- case AEN_IDC_REQ:
- status = ql_idc_req_aen(qdev);
- break;
-
- /* Process and inbound IDC event.
- * This will happen when we're trying to
- * change tx/rx max frame size, change pause
- * parameters or loopback mode.
- */
- case AEN_IDC_CMPLT:
- case AEN_IDC_EXT:
- status = ql_idc_cmplt_aen(qdev);
- break;
-
- case AEN_LINK_UP:
- ql_link_up(qdev, mbcp);
- break;
-
- case AEN_LINK_DOWN:
- ql_link_down(qdev, mbcp);
- break;
-
- case AEN_FW_INIT_DONE:
- /* If we're in process on executing the firmware,
- * then convert the status to normal mailbox status.
- */
- if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
- mbcp->out_count = orig_count;
- status = ql_get_mb_sts(qdev, mbcp);
- mbcp->mbox_out[0] = MB_CMD_STS_GOOD;
- return status;
- }
- ql_init_fw_done(qdev, mbcp);
- break;
-
- case AEN_AEN_SFP_IN:
- ql_sfp_in(qdev, mbcp);
- break;
-
- case AEN_AEN_SFP_OUT:
- ql_sfp_out(qdev, mbcp);
- break;
-
- /* This event can arrive at boot time or after an
- * MPI reset if the firmware failed to initialize.
- */
- case AEN_FW_INIT_FAIL:
- /* If we're in process on executing the firmware,
- * then convert the status to normal mailbox status.
- */
- if (mbcp->mbox_in[0] == MB_CMD_EX_FW) {
- mbcp->out_count = orig_count;
- status = ql_get_mb_sts(qdev, mbcp);
- mbcp->mbox_out[0] = MB_CMD_STS_ERR;
- return status;
- }
- netif_err(qdev, drv, qdev->ndev,
- "Firmware initialization failed.\n");
- status = -EIO;
- ql_queue_fw_error(qdev);
- break;
-
- case AEN_SYS_ERR:
- netif_err(qdev, drv, qdev->ndev, "System Error.\n");
- ql_queue_fw_error(qdev);
- status = -EIO;
- break;
-
- case AEN_AEN_LOST:
- ql_aen_lost(qdev, mbcp);
- break;
-
- case AEN_DCBX_CHG:
- /* Need to support AEN 8110 */
- break;
- default:
- netif_err(qdev, drv, qdev->ndev,
- "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
- /* Clear the MPI firmware status. */
- }
-end:
- ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
- /* Restore the original mailbox count to
- * what the caller asked for. This can get
- * changed when a mailbox command is waiting
- * for a response and an AEN arrives and
- * is handled.
- * */
- mbcp->out_count = orig_count;
- return status;
-}
-
-/* Execute a single mailbox command.
- * mbcp is a pointer to an array of u32. Each
- * element in the array contains the value for it's
- * respective mailbox register.
- */
-static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
-{
- int status;
- unsigned long count;
-
- mutex_lock(&qdev->mpi_mutex);
-
- /* Begin polled mode for MPI */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
-
- /* Load the mailbox registers and wake up MPI RISC. */
- status = ql_exec_mb_cmd(qdev, mbcp);
- if (status)
- goto end;
-
-
- /* If we're generating a system error, then there's nothing
- * to wait for.
- */
- if (mbcp->mbox_in[0] == MB_CMD_MAKE_SYS_ERR)
- goto end;
-
- /* Wait for the command to complete. We loop
- * here because some AEN might arrive while
- * we're waiting for the mailbox command to
- * complete. If more than 5 seconds expire we can
- * assume something is wrong. */
- count = jiffies + HZ * MAILBOX_TIMEOUT;
- do {
- /* Wait for the interrupt to come in. */
- status = ql_wait_mbx_cmd_cmplt(qdev);
- if (status)
- continue;
-
- /* Process the event. If it's an AEN, it
- * will be handled in-line or a worker
- * will be spawned. If it's our completion
- * we will catch it below.
- */
- status = ql_mpi_handler(qdev, mbcp);
- if (status)
- goto end;
-
- /* It's either the completion for our mailbox
- * command complete or an AEN. If it's our
- * completion then get out.
- */
- if (((mbcp->mbox_out[0] & 0x0000f000) ==
- MB_CMD_STS_GOOD) ||
- ((mbcp->mbox_out[0] & 0x0000f000) ==
- MB_CMD_STS_INTRMDT))
- goto done;
- } while (time_before(jiffies, count));
-
- netif_err(qdev, drv, qdev->ndev,
- "Timed out waiting for mailbox complete.\n");
- status = -ETIMEDOUT;
- goto end;
-
-done:
-
- /* Now we can clear the interrupt condition
- * and look at our status.
- */
- ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT);
-
- if (((mbcp->mbox_out[0] & 0x0000f000) !=
- MB_CMD_STS_GOOD) &&
- ((mbcp->mbox_out[0] & 0x0000f000) !=
- MB_CMD_STS_INTRMDT)) {
- status = -EIO;
- }
-end:
- /* End polled mode for MPI */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
- mutex_unlock(&qdev->mpi_mutex);
- return status;
-}
-
-/* Get MPI firmware version. This will be used for
- * driver banner and for ethtool info.
- * Returns zero on success.
- */
-int ql_mb_about_fw(struct ql_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status = 0;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 1;
- mbcp->out_count = 3;
-
- mbcp->mbox_in[0] = MB_CMD_ABOUT_FW;
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed about firmware command\n");
- status = -EIO;
- }
-
- /* Store the firmware version */
- qdev->fw_rev_id = mbcp->mbox_out[1];
-
- return status;
-}
-
-/* Get functional state for MPI firmware.
- * Returns zero on success.
- */
-int ql_mb_get_fw_state(struct ql_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status = 0;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 1;
- mbcp->out_count = 2;
-
- mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE;
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Get Firmware State.\n");
- status = -EIO;
- }
-
- /* If bit zero is set in mbx 1 then the firmware is
- * running, but not initialized. This should never
- * happen.
- */
- if (mbcp->mbox_out[1] & 1) {
- netif_err(qdev, drv, qdev->ndev,
- "Firmware waiting for initialization.\n");
- status = -EIO;
- }
-
- return status;
-}
-
-/* Send and ACK mailbox command to the firmware to
- * let it continue with the change.
- */
-static int ql_mb_idc_ack(struct ql_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status = 0;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 5;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_IDC_ACK;
- mbcp->mbox_in[1] = qdev->idc_mbc.mbox_out[1];
- mbcp->mbox_in[2] = qdev->idc_mbc.mbox_out[2];
- mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3];
- mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4];
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
- status = -EIO;
- }
- return status;
-}
-
-/* Get link settings and maximum frame size settings
- * for the current port.
- * Most likely will block.
- */
-int ql_mb_set_port_cfg(struct ql_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status = 0;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 3;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_SET_PORT_CFG;
- mbcp->mbox_in[1] = qdev->link_config;
- mbcp->mbox_in[2] = qdev->max_frame_size;
-
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
- netif_err(qdev, drv, qdev->ndev,
- "Port Config sent, wait for IDC.\n");
- } else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Set Port Configuration.\n");
- status = -EIO;
- }
- return status;
-}
-
-static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
- u32 size)
-{
- int status = 0;
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 9;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
- mbcp->mbox_in[1] = LSW(addr);
- mbcp->mbox_in[2] = MSW(req_dma);
- mbcp->mbox_in[3] = LSW(req_dma);
- mbcp->mbox_in[4] = MSW(size);
- mbcp->mbox_in[5] = LSW(size);
- mbcp->mbox_in[6] = MSW(MSD(req_dma));
- mbcp->mbox_in[7] = LSW(MSD(req_dma));
- mbcp->mbox_in[8] = MSW(addr);
-
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
- status = -EIO;
- }
- return status;
-}
-
-/* Issue a mailbox command to dump RISC RAM. */
-int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
- u32 ram_addr, int word_count)
-{
- int status;
- char *my_buf;
- dma_addr_t buf_dma;
-
- my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
- &buf_dma);
- if (!my_buf)
- return -EIO;
-
- status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
- if (!status)
- memcpy(buf, my_buf, word_count * sizeof(u32));
-
- pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
- buf_dma);
- return status;
-}
-
-/* Get link settings and maximum frame size settings
- * for the current port.
- * Most likely will block.
- */
-int ql_mb_get_port_cfg(struct ql_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status = 0;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 1;
- mbcp->out_count = 3;
-
- mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG;
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed Get Port Configuration.\n");
- status = -EIO;
- } else {
- netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
- "Passed Get Port Configuration.\n");
- qdev->link_config = mbcp->mbox_out[1];
- qdev->max_frame_size = mbcp->mbox_out[2];
- }
- return status;
-}
-
-int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 2;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
- mbcp->mbox_in[1] = wol;
-
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
- status = -EIO;
- }
- return status;
-}
-
-int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
- u8 *addr = qdev->ndev->dev_addr;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 8;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
- if (enable_wol) {
- mbcp->mbox_in[1] = (u32)addr[0];
- mbcp->mbox_in[2] = (u32)addr[1];
- mbcp->mbox_in[3] = (u32)addr[2];
- mbcp->mbox_in[4] = (u32)addr[3];
- mbcp->mbox_in[5] = (u32)addr[4];
- mbcp->mbox_in[6] = (u32)addr[5];
- mbcp->mbox_in[7] = 0;
- } else {
- mbcp->mbox_in[1] = 0;
- mbcp->mbox_in[2] = 1;
- mbcp->mbox_in[3] = 1;
- mbcp->mbox_in[4] = 1;
- mbcp->mbox_in[5] = 1;
- mbcp->mbox_in[6] = 1;
- mbcp->mbox_in[7] = 0;
- }
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
- status = -EIO;
- }
- return status;
-}
-
-/* IDC - Inter Device Communication...
- * Some firmware commands require consent of adjacent FCOE
- * function. This function waits for the OK, or a
- * counter-request for a little more time.i
- * The firmware will complete the request if the other
- * function doesn't respond.
- */
-static int ql_idc_wait(struct ql_adapter *qdev)
-{
- int status = -ETIMEDOUT;
- long wait_time = 1 * HZ;
- struct mbox_params *mbcp = &qdev->idc_mbc;
- do {
- /* Wait here for the command to complete
- * via the IDC process.
- */
- wait_time =
- wait_for_completion_timeout(&qdev->ide_completion,
- wait_time);
- if (!wait_time) {
- netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
- break;
- }
- /* Now examine the response from the IDC process.
- * We might have a good completion or a request for
- * more wait time.
- */
- if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
- netif_err(qdev, drv, qdev->ndev,
- "IDC Time Extension from function.\n");
- wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
- } else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
- netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
- status = 0;
- break;
- } else {
- netif_err(qdev, drv, qdev->ndev,
- "IDC: Invalid State 0x%.04x.\n",
- mbcp->mbox_out[0]);
- status = -EIO;
- break;
- }
- } while (wait_time);
-
- return status;
-}
-
-int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 2;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
- mbcp->mbox_in[1] = led_config;
-
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed to set LED Configuration.\n");
- status = -EIO;
- }
-
- return status;
-}
-
-int ql_mb_get_led_cfg(struct ql_adapter *qdev)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 1;
- mbcp->out_count = 2;
-
- mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed to get LED Configuration.\n");
- status = -EIO;
- } else
- qdev->led_config = mbcp->mbox_out[1];
-
- return status;
-}
-
-int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
-
- mbcp->in_count = 1;
- mbcp->out_count = 2;
-
- mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL;
- mbcp->mbox_in[1] = control;
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD)
- return status;
-
- if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
- netif_err(qdev, drv, qdev->ndev,
- "Command not supported by firmware.\n");
- status = -EINVAL;
- } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
- /* This indicates that the firmware is
- * already in the state we are trying to
- * change it to.
- */
- netif_err(qdev, drv, qdev->ndev,
- "Command parameters make no change.\n");
- }
- return status;
-}
-
-/* Returns a negative error code or the mailbox command status. */
-static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
-{
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int status;
-
- memset(mbcp, 0, sizeof(struct mbox_params));
- *control = 0;
-
- mbcp->in_count = 1;
- mbcp->out_count = 1;
-
- mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL;
-
- status = ql_mailbox_command(qdev, mbcp);
- if (status)
- return status;
-
- if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) {
- *control = mbcp->mbox_in[1];
- return status;
- }
-
- if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
- netif_err(qdev, drv, qdev->ndev,
- "Command not supported by firmware.\n");
- status = -EINVAL;
- } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
- netif_err(qdev, drv, qdev->ndev,
- "Failed to get MPI traffic control.\n");
- status = -EIO;
- }
- return status;
-}
-
-int ql_wait_fifo_empty(struct ql_adapter *qdev)
-{
- int count = 5;
- u32 mgmnt_fifo_empty;
- u32 nic_fifo_empty;
-
- do {
- nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE;
- ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
- mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
- if (nic_fifo_empty && mgmnt_fifo_empty)
- return 0;
- msleep(100);
- } while (count-- > 0);
- return -ETIMEDOUT;
-}
-
-/* API called in work thread context to set new TX/RX
- * maximum frame size values to match MTU.
- */
-static int ql_set_port_cfg(struct ql_adapter *qdev)
-{
- int status;
- status = ql_mb_set_port_cfg(qdev);
- if (status)
- return status;
- status = ql_idc_wait(qdev);
- return status;
-}
-
-/* The following routines are worker threads that process
- * events that may sleep waiting for completion.
- */
-
-/* This thread gets the maximum TX and RX frame size values
- * from the firmware and, if necessary, changes them to match
- * the MTU setting.
- */
-void ql_mpi_port_cfg_work(struct work_struct *work)
-{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_port_cfg_work.work);
- int status;
-
- status = ql_mb_get_port_cfg(qdev);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Bug: Failed to get port config data.\n");
- goto err;
- }
-
- if (qdev->link_config & CFG_JUMBO_FRAME_SIZE &&
- qdev->max_frame_size ==
- CFG_DEFAULT_MAX_FRAME_SIZE)
- goto end;
-
- qdev->link_config |= CFG_JUMBO_FRAME_SIZE;
- qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
- status = ql_set_port_cfg(qdev);
- if (status) {
- netif_err(qdev, drv, qdev->ndev,
- "Bug: Failed to set port config data.\n");
- goto err;
- }
-end:
- clear_bit(QL_PORT_CFG, &qdev->flags);
- return;
-err:
- ql_queue_fw_error(qdev);
- goto end;
-}
-
-/* Process an inter-device request. This is issues by
- * the firmware in response to another function requesting
- * a change to the port. We set a flag to indicate a change
- * has been made and then send a mailbox command ACKing
- * the change request.
- */
-void ql_mpi_idc_work(struct work_struct *work)
-{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_idc_work.work);
- int status;
- struct mbox_params *mbcp = &qdev->idc_mbc;
- u32 aen;
- int timeout;
-
- aen = mbcp->mbox_out[1] >> 16;
- timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
-
- switch (aen) {
- default:
- netif_err(qdev, drv, qdev->ndev,
- "Bug: Unhandled IDC action.\n");
- break;
- case MB_CMD_PORT_RESET:
- case MB_CMD_STOP_FW:
- ql_link_off(qdev);
- /* Fall through */
- case MB_CMD_SET_PORT_CFG:
- /* Signal the resulting link up AEN
- * that the frame routing and mac addr
- * needs to be set.
- * */
- set_bit(QL_CAM_RT_SET, &qdev->flags);
- /* Do ACK if required */
- if (timeout) {
- status = ql_mb_idc_ack(qdev);
- if (status)
- netif_err(qdev, drv, qdev->ndev,
- "Bug: No pending IDC!\n");
- } else {
- netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
- "IDC ACK not required\n");
- status = 0; /* success */
- }
- break;
-
- /* These sub-commands issued by another (FCoE)
- * function are requesting to do an operation
- * on the shared resource (MPI environment).
- * We currently don't issue these so we just
- * ACK the request.
- */
- case MB_CMD_IOP_RESTART_MPI:
- case MB_CMD_IOP_PREP_LINK_DOWN:
- /* Drop the link, reload the routing
- * table when link comes up.
- */
- ql_link_off(qdev);
- set_bit(QL_CAM_RT_SET, &qdev->flags);
- /* Fall through. */
- case MB_CMD_IOP_DVR_START:
- case MB_CMD_IOP_FLASH_ACC:
- case MB_CMD_IOP_CORE_DUMP_MPI:
- case MB_CMD_IOP_PREP_UPDATE_MPI:
- case MB_CMD_IOP_COMP_UPDATE_MPI:
- case MB_CMD_IOP_NONE: /* an IDC without params */
- /* Do ACK if required */
- if (timeout) {
- status = ql_mb_idc_ack(qdev);
- if (status)
- netif_err(qdev, drv, qdev->ndev,
- "Bug: No pending IDC!\n");
- } else {
- netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
- "IDC ACK not required\n");
- status = 0; /* success */
- }
- break;
- }
-}
-
-void ql_mpi_work(struct work_struct *work)
-{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_work.work);
- struct mbox_params mbc;
- struct mbox_params *mbcp = &mbc;
- int err = 0;
-
- mutex_lock(&qdev->mpi_mutex);
- /* Begin polled mode for MPI */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
-
- while (ql_read32(qdev, STS) & STS_PI) {
- memset(mbcp, 0, sizeof(struct mbox_params));
- mbcp->out_count = 1;
- /* Don't continue if an async event
- * did not complete properly.
- */
- err = ql_mpi_handler(qdev, mbcp);
- if (err)
- break;
- }
-
- /* End polled mode for MPI */
- ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
- mutex_unlock(&qdev->mpi_mutex);
- ql_enable_completion_interrupt(qdev, 0);
-}
-
-void ql_mpi_reset_work(struct work_struct *work)
-{
- struct ql_adapter *qdev =
- container_of(work, struct ql_adapter, mpi_reset_work.work);
- cancel_delayed_work_sync(&qdev->mpi_work);
- cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
- cancel_delayed_work_sync(&qdev->mpi_idc_work);
- /* If we're not the dominant NIC function,
- * then there is nothing to do.
- */
- if (!ql_own_firmware(qdev)) {
- netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
- return;
- }
-
- if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
- netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
- qdev->core_is_dumped = 1;
- queue_delayed_work(qdev->workqueue,
- &qdev->mpi_core_to_log, 5 * HZ);
- }
- ql_soft_reset_mpi_risc(qdev);
-}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 707665b62eb7..bebe38d74d66 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1385,15 +1385,13 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
}
for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
- tpbuf->length = frag->size;
- tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
- frag->page.p, frag->page_offset,
- tpbuf->length, DMA_TO_DEVICE);
+ tpbuf->length = skb_frag_size(frag);
+ tpbuf->dma_addr = skb_frag_dma_map(adpt->netdev->dev.parent,
+ frag, 0, tpbuf->length,
+ DMA_TO_DEVICE);
ret = dma_mapping_error(adpt->netdev->dev.parent,
tpbuf->dma_addr);
if (ret)
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 59c2349b59df..c84ab052ef26 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -544,7 +544,6 @@ static int emac_probe_resources(struct platform_device *pdev,
struct emac_adapter *adpt)
{
struct net_device *netdev = adpt->netdev;
- struct resource *res;
char maddr[ETH_ALEN];
int ret = 0;
@@ -556,22 +555,17 @@ static int emac_probe_resources(struct platform_device *pdev,
/* Core 0 interrupt */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "error: missing core0 irq resource (error=%i)\n", ret);
+ if (ret < 0)
return ret;
- }
adpt->irq.irq = ret;
/* base register address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adpt->base = devm_ioremap_resource(&pdev->dev, res);
+ adpt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adpt->base))
return PTR_ERR(adpt->base);
/* CSR register address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- adpt->csr = devm_ioremap_resource(&pdev->dev, res);
+ adpt->csr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(adpt->csr))
return PTR_ERR(adpt->csr);
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index bcb890b18a94..702aa217a27a 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -131,17 +131,10 @@ DEFINE_SHOW_ATTRIBUTE(qcaspi_info);
void
qcaspi_init_device_debugfs(struct qcaspi *qca)
{
- struct dentry *device_root;
+ qca->device_root = debugfs_create_dir(dev_name(&qca->net_dev->dev),
+ NULL);
- device_root = debugfs_create_dir(dev_name(&qca->net_dev->dev), NULL);
- qca->device_root = device_root;
-
- if (IS_ERR(device_root) || !device_root) {
- pr_warn("failed to create debugfs directory for %s\n",
- dev_name(&qca->net_dev->dev));
- return;
- }
- debugfs_create_file("info", S_IFREG | 0444, device_root, qca,
+ debugfs_create_file("info", S_IFREG | 0444, qca->device_root, qca,
&qcaspi_info_fops);
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index b28360bc2255..5ecf61df78bd 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -837,8 +837,7 @@ qcaspi_netdev_uninit(struct net_device *dev)
kfree(qca->rx_buffer);
qca->buffer_size = 0;
- if (qca->rx_skb)
- dev_kfree_skb(qca->rx_skb);
+ dev_kfree_skb(qca->rx_skb);
}
static const struct net_device_ops qcaspi_netdev_ops = {
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 590616846cd1..0981068504fa 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -285,8 +285,7 @@ static void qcauart_netdev_uninit(struct net_device *dev)
{
struct qcauart *qca = netdev_priv(dev);
- if (qca->rx_skb)
- dev_kfree_skb(qca->rx_skb);
+ dev_kfree_skb(qca->rx_skb);
}
static const struct net_device_ops qcauart_netdev_ops = {
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index bae0074ab9aa..faa4041cfb11 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -61,7 +61,7 @@
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
-static const int multicast_filter_limit = 32;
+#define MC_FILTER_LIMIT 32
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -271,7 +271,6 @@ enum rtl_registers {
Config3 = 0x54,
Config4 = 0x55,
Config5 = 0x56,
- MultiIntr = 0x5c,
PHYAR = 0x60,
PHYstatus = 0x6c,
RxMaxSize = 0xda,
@@ -539,11 +538,11 @@ enum rtl_tx_desc_bit_1 {
TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */
TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */
#define GTTCPHO_SHIFT 18
-#define GTTCPHO_MAX 0x7fU
+#define GTTCPHO_MAX 0x7f
/* Second doubleword. */
#define TCPHO_SHIFT 18
-#define TCPHO_MAX 0x3ffU
+#define TCPHO_MAX 0x3ff
#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */
TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */
@@ -569,6 +568,11 @@ enum rtl_rx_desc_bit {
#define RsvdMask 0x3fffc000
+#define RTL_GSO_MAX_SIZE_V1 32000
+#define RTL_GSO_MAX_SEGS_V1 24
+#define RTL_GSO_MAX_SIZE_V2 64000
+#define RTL_GSO_MAX_SEGS_V2 64
+
struct TxDesc {
__le32 opts1;
__le32 opts2;
@@ -638,7 +642,7 @@ struct rtl8169_private {
struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
dma_addr_t TxPhyAddr;
dma_addr_t RxPhyAddr;
- void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
+ struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
u16 cp_cmd;
u16 irq_mask;
@@ -729,6 +733,13 @@ static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
tp->mac_version != RTL_GIGA_MAC_VER_39;
}
+static bool rtl_supports_eee(struct rtl8169_private *tp)
+{
+ return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_37 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_39;
+}
+
struct rtl_cond {
bool (*check)(struct rtl8169_private *);
const char *msg;
@@ -846,6 +857,14 @@ static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
return RTL_R32(tp, OCPDR);
}
+static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
+ u16 set)
+{
+ u16 data = r8168_mac_ocp_read(tp, reg);
+
+ r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
+}
+
#define OCP_STD_PHY_BASE 0xa400
static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
@@ -1414,18 +1433,22 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
}
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
options = RTL_R8(tp, Config1) & ~PMEnable;
if (wolopts)
options |= PMEnable;
RTL_W8(tp, Config1, options);
break;
- default:
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_51:
options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
if (wolopts)
options |= PME_SIGNAL;
RTL_W8(tp, Config2, options);
break;
+ default:
+ break;
}
rtl_lock_config_regs(tp);
@@ -1929,144 +1952,40 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
return 0;
}
-static int rtl_get_eee_supp(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int ret;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_38:
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
- break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- ret = phy_read_paged(phydev, 0x0a5c, 0x12);
- break;
- default:
- ret = -EPROTONOSUPPORT;
- break;
- }
-
- return ret;
-}
-
-static int rtl_get_eee_lpadv(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int ret;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_38:
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
- break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- ret = phy_read_paged(phydev, 0x0a5d, 0x11);
- break;
- default:
- ret = -EPROTONOSUPPORT;
- break;
- }
-
- return ret;
-}
-
-static int rtl_get_eee_adv(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int ret;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_38:
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
- break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- ret = phy_read_paged(phydev, 0x0a5d, 0x10);
- break;
- default:
- ret = -EPROTONOSUPPORT;
- break;
- }
-
- return ret;
-}
-
-static int rtl_set_eee_adv(struct rtl8169_private *tp, int val)
-{
- struct phy_device *phydev = tp->phydev;
- int ret = 0;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_38:
- ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
- break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- phy_write_paged(phydev, 0x0a5d, 0x10, val);
- break;
- default:
- ret = -EPROTONOSUPPORT;
- break;
- }
-
- return ret;
-}
-
static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct device *d = tp_to_dev(tp);
int ret;
+ if (!rtl_supports_eee(tp))
+ return -EOPNOTSUPP;
+
pm_runtime_get_noresume(d);
if (!pm_runtime_active(d)) {
ret = -EOPNOTSUPP;
- goto out;
+ } else {
+ ret = phy_ethtool_get_eee(tp->phydev, data);
}
- /* Get Supported EEE */
- ret = rtl_get_eee_supp(tp);
- if (ret < 0)
- goto out;
- data->supported = mmd_eee_cap_to_ethtool_sup_t(ret);
-
- /* Get advertisement EEE */
- ret = rtl_get_eee_adv(tp);
- if (ret < 0)
- goto out;
- data->advertised = mmd_eee_adv_to_ethtool_adv_t(ret);
- data->eee_enabled = !!data->advertised;
-
- /* Get LP advertisement EEE */
- ret = rtl_get_eee_lpadv(tp);
- if (ret < 0)
- goto out;
- data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(ret);
- data->eee_active = !!(data->advertised & data->lp_advertised);
-out:
pm_runtime_put_noidle(d);
- return ret < 0 ? ret : 0;
+
+ return ret;
}
static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct device *d = tp_to_dev(tp);
- int old_adv, adv = 0, cap, ret;
+ int ret;
+
+ if (!rtl_supports_eee(tp))
+ return -EOPNOTSUPP;
pm_runtime_get_noresume(d);
- if (!dev->phydev || !pm_runtime_active(d)) {
+ if (!pm_runtime_active(d)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -2077,38 +1996,10 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
goto out;
}
- /* Get Supported EEE */
- ret = rtl_get_eee_supp(tp);
- if (ret < 0)
- goto out;
- cap = ret;
-
- ret = rtl_get_eee_adv(tp);
- if (ret < 0)
- goto out;
- old_adv = ret;
-
- if (data->eee_enabled) {
- adv = !data->advertised ? cap :
- ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
- /* Mask prohibited EEE modes */
- adv &= ~dev->phydev->eee_broken_modes;
- }
-
- if (old_adv != adv) {
- ret = rtl_set_eee_adv(tp, adv);
- if (ret < 0)
- goto out;
-
- /* Restart autonegotiation so the new modes get sent to the
- * link partner.
- */
- ret = phy_restart_aneg(dev->phydev);
- }
-
+ ret = phy_ethtool_set_eee(tp->phydev, data);
out:
pm_runtime_put_noidle(d);
- return ret < 0 ? ret : 0;
+ return ret;
}
static const struct ethtool_ops rtl8169_ethtool_ops = {
@@ -2135,10 +2026,11 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
static void rtl_enable_eee(struct rtl8169_private *tp)
{
- int supported = rtl_get_eee_supp(tp);
+ struct phy_device *phydev = tp->phydev;
+ int supported = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
if (supported > 0)
- rtl_set_eee_adv(tp, supported);
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, supported);
}
static void rtl8169_get_mac_version(struct rtl8169_private *tp)
@@ -2324,6 +2216,16 @@ static void rtl8168g_config_eee_phy(struct rtl8169_private *tp)
phy_modify_paged(tp->phydev, 0x0a43, 0x11, 0, BIT(4));
}
+static void rtl8168h_config_eee_phy(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+
+ rtl8168g_config_eee_phy(tp);
+
+ phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200);
+ phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080);
+}
+
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3391,7 +3293,7 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
rtl8168g_disable_aldps(tp);
- rtl8168g_config_eee_phy(tp);
+ rtl8168h_config_eee_phy(tp);
rtl_enable_eee(tp);
}
@@ -4146,54 +4048,46 @@ static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_versi
static void rtl_set_rx_mode(struct net_device *dev)
{
+ u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast;
+ /* Multicast hash filter */
+ u32 mc_filter[2] = { 0xffffffff, 0xffffffff };
struct rtl8169_private *tp = netdev_priv(dev);
- u32 mc_filter[2]; /* Multicast hash filter */
- int rx_mode;
- u32 tmp = 0;
+ u32 tmp;
if (dev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
- rx_mode =
- AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
- AcceptAllPhys;
- mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
- (dev->flags & IFF_ALLMULTI)) {
- /* Too many to filter perfectly -- accept all multicasts. */
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
- mc_filter[1] = mc_filter[0] = 0xffffffff;
+ rx_mode |= AcceptAllPhys;
+ } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
+ dev->flags & IFF_ALLMULTI ||
+ tp->mac_version == RTL_GIGA_MAC_VER_35) {
+ /* accept all multicasts */
+ } else if (netdev_mc_empty(dev)) {
+ rx_mode &= ~AcceptMulticast;
} else {
struct netdev_hw_addr *ha;
- rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
netdev_for_each_mc_addr(ha, dev) {
- int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- rx_mode |= AcceptMulticast;
+ u32 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31);
+ }
+
+ if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
+ tmp = mc_filter[0];
+ mc_filter[0] = swab32(mc_filter[1]);
+ mc_filter[1] = swab32(tmp);
}
}
if (dev->features & NETIF_F_RXALL)
rx_mode |= (AcceptErr | AcceptRunt);
- tmp = (RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
-
- if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
- u32 data = mc_filter[0];
-
- mc_filter[0] = swab32(mc_filter[1]);
- mc_filter[1] = swab32(data);
- }
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_35)
- mc_filter[1] = mc_filter[0] = 0xffffffff;
-
RTL_W32(tp, MAR0 + 4, mc_filter[1]);
RTL_W32(tp, MAR0 + 0, mc_filter[0]);
- RTL_W32(tp, RxConfig, tmp);
+ tmp = RTL_R32(tp, RxConfig);
+ RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_MASK) | rx_mode);
}
DECLARE_RTL_COND(rtl_csiar_cond)
@@ -4407,7 +4301,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8168c_2[] = {
{ 0x01, 0, 0x0001 },
- { 0x03, 0x0400, 0x0220 }
+ { 0x03, 0x0400, 0x0020 }
};
rtl_set_def_aspm_entry_latency(tp);
@@ -4454,7 +4348,8 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
static const struct ephy_info e_info_8168d_4[] = {
{ 0x0b, 0x0000, 0x0048 },
{ 0x19, 0x0020, 0x0050 },
- { 0x0c, 0x0100, 0x0020 }
+ { 0x0c, 0x0100, 0x0020 },
+ { 0x10, 0x0004, 0x0000 },
};
rtl_set_def_aspm_entry_latency(tp);
@@ -4504,7 +4399,9 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8168e_2[] = {
{ 0x09, 0x0000, 0x0080 },
- { 0x19, 0x0000, 0x0224 }
+ { 0x19, 0x0000, 0x0224 },
+ { 0x00, 0x0000, 0x0004 },
+ { 0x0c, 0x3df0, 0x0200 },
};
rtl_set_def_aspm_entry_latency(tp);
@@ -4566,7 +4463,9 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
{ 0x06, 0x00c0, 0x0020 },
{ 0x08, 0x0001, 0x0002 },
{ 0x09, 0x0000, 0x0080 },
- { 0x19, 0x0000, 0x0224 }
+ { 0x19, 0x0000, 0x0224 },
+ { 0x00, 0x0000, 0x0004 },
+ { 0x0c, 0x3df0, 0x0200 },
};
rtl_hw_start_8168f(tp);
@@ -4581,8 +4480,9 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
static const struct ephy_info e_info_8168f_1[] = {
{ 0x06, 0x00c0, 0x0020 },
{ 0x0f, 0xffff, 0x5200 },
- { 0x1e, 0x0000, 0x4000 },
- { 0x19, 0x0000, 0x0224 }
+ { 0x19, 0x0000, 0x0224 },
+ { 0x00, 0x0000, 0x0004 },
+ { 0x0c, 0x3df0, 0x0200 },
};
rtl_hw_start_8168f(tp);
@@ -4621,8 +4521,8 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8168g_1[] = {
- { 0x00, 0x0000, 0x0008 },
- { 0x0c, 0x37d0, 0x0820 },
+ { 0x00, 0x0008, 0x0000 },
+ { 0x0c, 0x3ff0, 0x0820 },
{ 0x1e, 0x0000, 0x0001 },
{ 0x19, 0x8000, 0x0000 }
};
@@ -4638,10 +4538,15 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8168g_2[] = {
- { 0x00, 0x0000, 0x0008 },
- { 0x0c, 0x3df0, 0x0200 },
- { 0x19, 0xffff, 0xfc00 },
- { 0x1e, 0xffff, 0x20eb }
+ { 0x00, 0x0008, 0x0000 },
+ { 0x0c, 0x3ff0, 0x0820 },
+ { 0x19, 0xffff, 0x7c00 },
+ { 0x1e, 0xffff, 0x20eb },
+ { 0x0d, 0xffff, 0x1666 },
+ { 0x00, 0xffff, 0x10a3 },
+ { 0x06, 0xffff, 0xf050 },
+ { 0x04, 0x0000, 0x0010 },
+ { 0x1d, 0x4000, 0x0000 },
};
rtl_hw_start_8168g(tp);
@@ -4655,11 +4560,16 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8411_2[] = {
- { 0x00, 0x0000, 0x0008 },
- { 0x0c, 0x3df0, 0x0200 },
- { 0x0f, 0xffff, 0x5200 },
- { 0x19, 0x0020, 0x0000 },
- { 0x1e, 0x0000, 0x2000 }
+ { 0x00, 0x0008, 0x0000 },
+ { 0x0c, 0x37d0, 0x0820 },
+ { 0x1e, 0x0000, 0x0001 },
+ { 0x19, 0x8021, 0x0000 },
+ { 0x1e, 0x0000, 0x2000 },
+ { 0x0d, 0x0100, 0x0200 },
+ { 0x00, 0x0000, 0x0080 },
+ { 0x06, 0x0000, 0x0010 },
+ { 0x04, 0x0000, 0x0010 },
+ { 0x1d, 0x0000, 0x4000 },
};
rtl_hw_start_8168g(tp);
@@ -4809,16 +4719,15 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
{
- int rg_saw_cnt;
- u32 data;
static const struct ephy_info e_info_8168h_1[] = {
{ 0x1e, 0x0800, 0x0001 },
{ 0x1d, 0x0000, 0x0800 },
{ 0x05, 0xffff, 0x2089 },
{ 0x06, 0xffff, 0x5881 },
- { 0x04, 0xffff, 0x154a },
+ { 0x04, 0xffff, 0x854a },
{ 0x01, 0xffff, 0x068b }
};
+ int rg_saw_cnt;
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
@@ -4863,31 +4772,13 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
sw_cnt_1ms_ini &= 0x0fff;
- data = r8168_mac_ocp_read(tp, 0xd412);
- data &= ~0x0fff;
- data |= sw_cnt_1ms_ini;
- r8168_mac_ocp_write(tp, 0xd412, data);
+ r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
}
- data = r8168_mac_ocp_read(tp, 0xe056);
- data &= ~0xf0;
- data |= 0x70;
- r8168_mac_ocp_write(tp, 0xe056, data);
-
- data = r8168_mac_ocp_read(tp, 0xe052);
- data &= ~0x6000;
- data |= 0x8008;
- r8168_mac_ocp_write(tp, 0xe052, data);
-
- data = r8168_mac_ocp_read(tp, 0xe0d6);
- data &= ~0x01ff;
- data |= 0x017f;
- r8168_mac_ocp_write(tp, 0xe0d6, data);
-
- data = r8168_mac_ocp_read(tp, 0xd420);
- data &= ~0x0fff;
- data |= 0x047f;
- r8168_mac_ocp_write(tp, 0xd420, data);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
+ r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008);
+ r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f);
+ r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
@@ -4969,12 +4860,11 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
{
- u32 data;
static const struct ephy_info e_info_8168ep_3[] = {
- { 0x00, 0xffff, 0x10a3 },
- { 0x19, 0xffff, 0x7c00 },
- { 0x1e, 0xffff, 0x20eb },
- { 0x0d, 0xffff, 0x1666 }
+ { 0x00, 0x0000, 0x0080 },
+ { 0x0d, 0x0100, 0x0200 },
+ { 0x19, 0x8021, 0x0000 },
+ { 0x1e, 0x0000, 0x2000 },
};
/* disable aspm and clock request before access ephy */
@@ -4986,18 +4876,9 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
- data = r8168_mac_ocp_read(tp, 0xd3e2);
- data &= 0xf000;
- data |= 0x0271;
- r8168_mac_ocp_write(tp, 0xd3e2, data);
-
- data = r8168_mac_ocp_read(tp, 0xd3e4);
- data &= 0xff00;
- r8168_mac_ocp_write(tp, 0xd3e4, data);
-
- data = r8168_mac_ocp_read(tp, 0xe860);
- data |= 0x0080;
- r8168_mac_ocp_write(tp, 0xe860, data);
+ r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271);
+ r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000);
+ r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080);
rtl_hw_aspm_clkreq_enable(tp, true);
}
@@ -5240,10 +5121,7 @@ static void rtl_hw_start(struct rtl8169_private *tp)
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_init_rxcfg(tp);
rtl_set_tx_config_registers(tp);
-
rtl_set_rx_mode(tp->dev);
- /* no early-rx interrupts */
- RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
rtl_irq_enable(tp);
}
@@ -5268,17 +5146,6 @@ static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
}
-static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
- void **data_buff, struct RxDesc *desc)
-{
- dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr),
- R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
-
- kfree(*data_buff);
- *data_buff = NULL;
- rtl8169_make_unusable_by_asic(desc);
-}
-
static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
{
u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
@@ -5289,49 +5156,43 @@ static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE);
}
-static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
- struct RxDesc *desc)
+static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
+ struct RxDesc *desc)
{
- void *data;
- dma_addr_t mapping;
struct device *d = tp_to_dev(tp);
int node = dev_to_node(d);
+ dma_addr_t mapping;
+ struct page *data;
- data = kmalloc_node(R8169_RX_BUF_SIZE, GFP_KERNEL, node);
+ data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE));
if (!data)
return NULL;
- /* Memory should be properly aligned, but better check. */
- if (!IS_ALIGNED((unsigned long)data, 8)) {
- netdev_err_once(tp->dev, "RX buffer not 8-byte-aligned\n");
- goto err_out;
- }
-
- mapping = dma_map_single(d, data, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(d, mapping))) {
if (net_ratelimit())
netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
- goto err_out;
+ __free_pages(data, get_order(R8169_RX_BUF_SIZE));
+ return NULL;
}
desc->addr = cpu_to_le64(mapping);
rtl8169_mark_to_asic(desc);
- return data;
-err_out:
- kfree(data);
- return NULL;
+ return data;
}
static void rtl8169_rx_clear(struct rtl8169_private *tp)
{
unsigned int i;
- for (i = 0; i < NUM_RX_DESC; i++) {
- if (tp->Rx_databuff[i]) {
- rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
- tp->RxDescArray + i);
- }
+ for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) {
+ dma_unmap_page(tp_to_dev(tp),
+ le64_to_cpu(tp->RxDescArray[i].addr),
+ R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ __free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE));
+ tp->Rx_databuff[i] = NULL;
+ rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
}
}
@@ -5345,7 +5206,7 @@ static int rtl8169_rx_fill(struct rtl8169_private *tp)
unsigned int i;
for (i = 0; i < NUM_RX_DESC; i++) {
- void *data;
+ struct page *data;
data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
if (!data) {
@@ -5507,44 +5368,6 @@ static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
}
-static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-/* r8169_csum_workaround()
- * The hw limites the value the transport offset. When the offset is out of the
- * range, calculate the checksum by sw.
- */
-static void r8169_csum_workaround(struct rtl8169_private *tp,
- struct sk_buff *skb)
-{
- if (skb_is_gso(skb)) {
- netdev_features_t features = tp->dev->features;
- struct sk_buff *segs, *nskb;
-
- features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
- segs = skb_gso_segment(skb, features);
- if (IS_ERR(segs) || !segs)
- goto drop;
-
- do {
- nskb = segs;
- segs = segs->next;
- nskb->next = NULL;
- rtl8169_start_xmit(nskb, tp->dev);
- } while (segs);
-
- dev_consume_skb_any(skb);
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (skb_checksum_help(skb) < 0)
- goto drop;
-
- rtl8169_start_xmit(skb, tp->dev);
- } else {
-drop:
- tp->dev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
- }
-}
-
/* msdn_giant_send_check()
* According to the document of microsoft, the TCP Pseudo Header excludes the
* packet length for IPv6 TCP large packets.
@@ -5594,13 +5417,6 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
u32 mss = skb_shinfo(skb)->gso_size;
if (mss) {
- if (transport_offset > GTTCPHO_MAX) {
- netif_warn(tp, tx_err, tp->dev,
- "Invalid transport offset 0x%x for TSO\n",
- transport_offset);
- return false;
- }
-
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
opts[0] |= TD1_GTSENV4;
@@ -5623,16 +5439,6 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ip_protocol;
- if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
- return !(skb_checksum_help(skb) || eth_skb_pad(skb));
-
- if (transport_offset > TCPHO_MAX) {
- netif_warn(tp, tx_err, tp->dev,
- "Invalid transport offset 0x%x\n",
- transport_offset);
- return false;
- }
-
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
opts[1] |= TD1_IPv4_CS;
@@ -5695,6 +5501,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct device *d = tp_to_dev(tp);
dma_addr_t mapping;
u32 opts[2], len;
+ bool stop_queue;
+ bool door_bell;
int frags;
if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
@@ -5709,10 +5517,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
opts[0] = DescOwn;
if (rtl_chip_supports_csum_v2(tp)) {
- if (!rtl8169_tso_csum_v2(tp, skb, opts)) {
- r8169_csum_workaround(tp, skb);
- return NETDEV_TX_OK;
- }
+ if (!rtl8169_tso_csum_v2(tp, skb, opts))
+ goto err_dma_0;
} else {
rtl8169_tso_csum_v1(skb, opts);
}
@@ -5740,13 +5546,13 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
- netdev_sent_queue(dev, skb->len);
-
skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */
dma_wmb();
+ door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more());
+
txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
/* Force all memory writes to complete before notifying device */
@@ -5754,14 +5560,20 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->cur_tx += frags + 1;
- RTL_W8(tp, TxPoll, NPQ);
-
- if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
+ stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
+ if (unlikely(stop_queue)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
smp_wmb();
netif_stop_queue(dev);
+ door_bell = true;
+ }
+
+ if (door_bell)
+ RTL_W8(tp, TxPoll, NPQ);
+
+ if (unlikely(stop_queue)) {
/* Sync with rtl_tx:
* - publish queue status and cur_tx ring index (write barrier)
* - refresh dirty_tx ring index (read barrier).
@@ -5789,6 +5601,39 @@ err_stop_0:
return NETDEV_TX_BUSY;
}
+static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ int transport_offset = skb_transport_offset(skb);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (skb_is_gso(skb)) {
+ if (transport_offset > GTTCPHO_MAX &&
+ rtl_chip_supports_csum_v2(tp))
+ features &= ~NETIF_F_ALL_TSO;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->len < ETH_ZLEN) {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_34:
+ features &= ~NETIF_F_CSUM_MASK;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (transport_offset > TCPHO_MAX &&
+ rtl_chip_supports_csum_v2(tp))
+ features &= ~NETIF_F_CSUM_MASK;
+ }
+
+ return vlan_features_check(skb, features);
+}
+
static void rtl8169_pcierr_interrupt(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -5908,24 +5753,6 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
skb_checksum_none_assert(skb);
}
-static struct sk_buff *rtl8169_try_rx_copy(void *data,
- struct rtl8169_private *tp,
- int pkt_size,
- dma_addr_t addr)
-{
- struct sk_buff *skb;
- struct device *d = tp_to_dev(tp);
-
- dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
- prefetch(data);
- skb = napi_alloc_skb(&tp->napi, pkt_size);
- if (skb)
- skb_copy_to_linear_data(skb, data, pkt_size);
- dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
-
- return skb;
-}
-
static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
{
unsigned int cur_rx, rx_left;
@@ -5935,6 +5762,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
unsigned int entry = cur_rx % NUM_RX_DESC;
+ const void *rx_buf = page_address(tp->Rx_databuff[entry]);
struct RxDesc *desc = tp->RxDescArray + entry;
u32 status;
@@ -5961,17 +5789,13 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
goto process_pkt;
}
} else {
+ unsigned int pkt_size;
struct sk_buff *skb;
- dma_addr_t addr;
- int pkt_size;
process_pkt:
- addr = le64_to_cpu(desc->addr);
+ pkt_size = status & GENMASK(13, 0);
if (likely(!(dev->features & NETIF_F_RXFCS)))
- pkt_size = (status & 0x00003fff) - 4;
- else
- pkt_size = status & 0x00003fff;
-
+ pkt_size -= ETH_FCS_LEN;
/*
* The driver does not support incoming fragmented
* frames. They are seen as a symptom of over-mtu
@@ -5983,15 +5807,25 @@ process_pkt:
goto release_descriptor;
}
- skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
- tp, pkt_size, addr);
- if (!skb) {
+ skb = napi_alloc_skb(&tp->napi, pkt_size);
+ if (unlikely(!skb)) {
dev->stats.rx_dropped++;
goto release_descriptor;
}
+ dma_sync_single_for_cpu(tp_to_dev(tp),
+ le64_to_cpu(desc->addr),
+ pkt_size, DMA_FROM_DEVICE);
+ prefetch(rx_buf);
+ skb_copy_to_linear_data(skb, rx_buf, pkt_size);
+ skb->tail += pkt_size;
+ skb->len = pkt_size;
+
+ dma_sync_single_for_device(tp_to_dev(tp),
+ le64_to_cpu(desc->addr),
+ pkt_size, DMA_FROM_DEVICE);
+
rtl8169_rx_csum(skb, status);
- skb_put(skb, pkt_size);
skb->protocol = eth_type_trans(skb, dev);
rtl8169_rx_vlan_tag(desc, skb);
@@ -6332,7 +6166,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->multicast = dev->stats.multicast;
/*
- * Fetch additonal counter values missing in stats collected by driver
+ * Fetch additional counter values missing in stats collected by driver
* from tally counters.
*/
if (pm_runtime_active(&pdev->dev))
@@ -6556,6 +6390,7 @@ static const struct net_device_ops rtl_netdev_ops = {
.ndo_stop = rtl8169_close,
.ndo_get_stats64 = rtl8169_get_stats64,
.ndo_start_xmit = rtl8169_start_xmit,
+ .ndo_features_check = rtl8169_features_check,
.ndo_tx_timeout = rtl8169_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = rtl8169_change_mtu,
@@ -6692,8 +6527,6 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
static void rtl_hw_init_8168g(struct rtl8169_private *tp)
{
- u32 data;
-
tp->ocp_base = OCP_STD_PHY_BASE;
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
@@ -6708,16 +6541,12 @@ static void rtl_hw_init_8168g(struct rtl8169_private *tp)
msleep(1);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
- data = r8168_mac_ocp_read(tp, 0xe8de);
- data &= ~(1 << 14);
- r8168_mac_ocp_write(tp, 0xe8de, data);
+ r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
return;
- data = r8168_mac_ocp_read(tp, 0xe8de);
- data |= (1 << 15);
- r8168_mac_ocp_write(tp, 0xe8de, data);
+ r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15));
rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
}
@@ -6917,11 +6746,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
- /* don't enable SG, IP_CSUM and TSO by default - it might not work
- * properly for all devices */
- dev->features |= NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
-
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
@@ -6939,8 +6766,22 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- if (rtl_chip_supports_csum_v2(tp))
+ if (rtl_chip_supports_csum_v2(tp)) {
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+ dev->features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+ dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
+ dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
+ } else {
+ dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
+ dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
+ }
+
+ /* RTL8168e-vl has a HW issue with TSO */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
+ dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
+ dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
+ }
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index d2c48116f181..2412c87561e0 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -78,7 +78,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
{
int ret;
int i, chan;
- struct resource *res;
struct device *dev = &pdev->dev;
void __iomem *addr;
struct sxgbe_priv_data *priv = NULL;
@@ -88,8 +87,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
/* Get memory resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(dev, res);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr))
return PTR_ERR(addr);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 16d6952c312a..0ec13f520e90 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -508,7 +508,7 @@ static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n",
((efx->mcdi->fn_flags) &
@@ -520,7 +520,7 @@ static ssize_t efx_ef10_show_primary_flag(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n",
((efx->mcdi->fn_flags) &
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index ab58b837df47..2fef7402233e 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2517,7 +2517,7 @@ static struct notifier_block efx_netdev_notifier = {
static ssize_t
show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", efx->phy_type);
}
static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
@@ -2526,7 +2526,7 @@ static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
@@ -2534,7 +2534,7 @@ static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
bool enable = count > 0 && *buf != '0';
@@ -3654,7 +3654,7 @@ static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
static int efx_pm_freeze(struct device *dev)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
rtnl_lock();
@@ -3675,7 +3675,7 @@ static int efx_pm_freeze(struct device *dev)
static int efx_pm_thaw(struct device *dev)
{
int rc;
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
rtnl_lock();
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 9b15c39ac670..eecc348b1c32 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2256,7 +2256,7 @@ static struct notifier_block ef4_netdev_notifier = {
static ssize_t
show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", efx->phy_type);
}
static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
@@ -2999,7 +2999,7 @@ static int ef4_pci_probe(struct pci_dev *pci_dev,
static int ef4_pm_freeze(struct device *dev)
{
- struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = dev_get_drvdata(dev);
rtnl_lock();
@@ -3020,7 +3020,7 @@ static int ef4_pm_freeze(struct device *dev)
static int ef4_pm_thaw(struct device *dev)
{
int rc;
- struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = dev_get_drvdata(dev);
rtnl_lock();
diff --git a/drivers/net/ethernet/sfc/falcon/falcon_boards.c b/drivers/net/ethernet/sfc/falcon/falcon_boards.c
index 839189dab98e..605f486fa675 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon_boards.c
@@ -357,7 +357,7 @@ fail_on:
static ssize_t show_phy_flash_cfg(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
}
@@ -365,7 +365,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = dev_get_drvdata(dev);
enum ef4_phy_mode old_mode, new_mode;
int err;
@@ -454,13 +454,13 @@ static int sfe4001_init(struct ef4_nic *efx)
#if IS_ENABLED(CONFIG_SENSORS_LM90)
board->hwmon_client =
- i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info);
+ i2c_new_client_device(&board->i2c_adap, &sfe4001_hwmon_info);
#else
board->hwmon_client =
- i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr);
+ i2c_new_dummy_device(&board->i2c_adap, sfe4001_hwmon_info.addr);
#endif
- if (!board->hwmon_client)
- return -EIO;
+ if (IS_ERR(board->hwmon_client))
+ return PTR_ERR(board->hwmon_client);
/* Raise board/PHY high limit from 85 to 90 degrees Celsius */
rc = i2c_smbus_write_byte_data(board->hwmon_client,
@@ -468,9 +468,9 @@ static int sfe4001_init(struct ef4_nic *efx)
if (rc)
goto fail_hwmon;
- board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539);
- if (!board->ioexp_client) {
- rc = -EIO;
+ board->ioexp_client = i2c_new_dummy_device(&board->i2c_adap, PCA9539);
+ if (IS_ERR(board->ioexp_client)) {
+ rc = PTR_ERR(board->ioexp_client);
goto fail_hwmon;
}
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index fd850d3d8ec0..05ea3523890a 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -424,7 +424,6 @@ ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh)
{
struct napi_struct *napi = &channel->napi_str;
- gro_result_t gro_result;
struct ef4_nic *efx = channel->efx;
struct sk_buff *skb;
@@ -460,9 +459,7 @@ ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
skb_record_rx_queue(skb, channel->rx_queue.core_index);
- gro_result = napi_gro_frags(napi);
- if (gro_result != GRO_DROP)
- channel->irq_mod_score += 2;
+ napi_gro_frags(napi);
}
/* Allocate and construct an SKB around page fragments */
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index d5db045535d3..85ec07f5a674 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -412,7 +412,6 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh)
{
struct napi_struct *napi = &channel->napi_str;
- gro_result_t gro_result;
struct efx_nic *efx = channel->efx;
struct sk_buff *skb;
@@ -449,9 +448,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
skb_record_rx_queue(skb, channel->rx_queue.core_index);
- gro_result = napi_gro_frags(napi);
- if (gro_result != GRO_DROP)
- channel->irq_mod_score += 2;
+ napi_gro_frags(napi);
}
/* Allocate and construct an SKB around page fragments */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 31ec56091a5d..65e81ec1b314 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -274,7 +274,7 @@ static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
vaddr = kmap_atomic(skb_frag_page(f));
- efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
+ efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
skb_frag_size(f), copy_buf);
kunmap_atomic(vaddr);
}
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 00660dd820e2..539bc5db989c 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -247,8 +247,7 @@ static void meth_free_tx_ring(struct meth_private *priv)
/* Remove any pending skb */
for (i = 0; i < TX_RING_ENTRIES; i++) {
- if (priv->tx_skbs[i])
- dev_kfree_skb(priv->tx_skbs[i]);
+ dev_kfree_skb(priv->tx_skbs[i]);
priv->tx_skbs[i] = NULL;
}
dma_free_coherent(&priv->pdev->dev, TX_RING_BUFFER_SIZE, priv->tx_ring,
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 6e07f5ebacfc..85eaccbbbac1 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -191,6 +191,8 @@ struct sis900_private {
unsigned int tx_full; /* The Tx queue is full. */
u8 host_bridge_rev;
u8 chipset_rev;
+ /* EEPROM data */
+ int eeprom_size;
};
MODULE_AUTHOR("Jim Huang <cmhuang@sis.com.tw>, Ollie Lho <ollie@sis.com.tw>");
@@ -475,6 +477,8 @@ static int sis900_probe(struct pci_dev *pci_dev,
sis_priv->pci_dev = pci_dev;
spin_lock_init(&sis_priv->lock);
+ sis_priv->eeprom_size = 24;
+
pci_set_drvdata(pci_dev, net_dev);
ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma);
@@ -2122,6 +2126,68 @@ static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *w
wol->supported = (WAKE_PHY | WAKE_MAGIC);
}
+static int sis900_get_eeprom_len(struct net_device *dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(dev);
+
+ return sis_priv->eeprom_size;
+}
+
+static int sis900_read_eeprom(struct net_device *net_dev, u8 *buf)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ void __iomem *ioaddr = sis_priv->ioaddr;
+ int wait, ret = -EAGAIN;
+ u16 signature;
+ u16 *ebuf = (u16 *)buf;
+ int i;
+
+ if (sis_priv->chipset_rev == SIS96x_900_REV) {
+ sw32(mear, EEREQ);
+ for (wait = 0; wait < 2000; wait++) {
+ if (sr32(mear) & EEGNT) {
+ /* read 16 bits, and index by 16 bits */
+ for (i = 0; i < sis_priv->eeprom_size / 2; i++)
+ ebuf[i] = (u16)read_eeprom(ioaddr, i);
+ ret = 0;
+ break;
+ }
+ udelay(1);
+ }
+ sw32(mear, EEDONE);
+ } else {
+ signature = (u16)read_eeprom(ioaddr, EEPROMSignature);
+ if (signature != 0xffff && signature != 0x0000) {
+ /* read 16 bits, and index by 16 bits */
+ for (i = 0; i < sis_priv->eeprom_size / 2; i++)
+ ebuf[i] = (u16)read_eeprom(ioaddr, i);
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
+#define SIS900_EEPROM_MAGIC 0xBABE
+static int sis900_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct sis900_private *sis_priv = netdev_priv(dev);
+ u8 *eebuf;
+ int res;
+
+ eebuf = kmalloc(sis_priv->eeprom_size, GFP_KERNEL);
+ if (!eebuf)
+ return -ENOMEM;
+
+ eeprom->magic = SIS900_EEPROM_MAGIC;
+ spin_lock_irq(&sis_priv->lock);
+ res = sis900_read_eeprom(dev, eebuf);
+ spin_unlock_irq(&sis_priv->lock);
+ if (!res)
+ memcpy(data, eebuf + eeprom->offset, eeprom->len);
+ kfree(eebuf);
+ return res;
+}
+
static const struct ethtool_ops sis900_ethtool_ops = {
.get_drvinfo = sis900_get_drvinfo,
.get_msglevel = sis900_get_msglevel,
@@ -2132,6 +2198,8 @@ static const struct ethtool_ops sis900_ethtool_ops = {
.set_wol = sis900_set_wol,
.get_link_ksettings = sis900_get_link_ksettings,
.set_link_ksettings = sis900_set_link_ksettings,
+ .get_eeprom_len = sis900_get_eeprom_len,
+ .get_eeprom = sis900_get_eeprom,
};
/**
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 601e76ad99a0..3a6761131f4c 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -378,8 +378,7 @@ static void smc_shutdown(struct net_device *dev)
pending_skb = lp->pending_tx_skb;
lp->pending_tx_skb = NULL;
spin_unlock_irq(&lp->lock);
- if (pending_skb)
- dev_kfree_skb(pending_skb);
+ dev_kfree_skb(pending_skb);
/* and tell the card to stay away from that nasty outside world */
SMC_SELECT_BANK(lp, 0);
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 51a7b48db4bc..10d0c3e478ab 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1553,7 +1553,6 @@ static int ave_probe(struct platform_device *pdev)
struct ave_private *priv;
struct net_device *ndev;
struct device_node *np;
- struct resource *res;
const void *mac_addr;
void __iomem *base;
const char *name;
@@ -1573,13 +1572,10 @@ static int ave_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "IRQ not found\n");
+ if (irq < 0)
return irq;
- }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index ed872eed1cab..49aa56ca09cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -75,6 +75,7 @@ struct stmmac_extra_stats {
unsigned long rx_missed_cntr;
unsigned long rx_overflow_cntr;
unsigned long rx_vlan;
+ unsigned long rx_split_hdr_pkt_n;
/* Tx/Rx IRQ error info */
unsigned long tx_undeflow_irq;
unsigned long tx_process_stopped_irq;
@@ -354,6 +355,11 @@ struct dma_features {
unsigned int frpbs;
unsigned int frpes;
unsigned int addr64;
+ unsigned int rssen;
+ unsigned int vlhash;
+ unsigned int sphen;
+ unsigned int vlins;
+ unsigned int dvlan;
};
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
@@ -381,6 +387,16 @@ struct dma_features {
#define JUMBO_LEN 9000
+/* Receive Side Scaling */
+#define STMMAC_RSS_HASH_KEY_SIZE 40
+#define STMMAC_RSS_MAX_TABLE_SIZE 256
+
+/* VLAN */
+#define STMMAC_VLAN_NONE 0x0
+#define STMMAC_VLAN_REMOVE 0x1
+#define STMMAC_VLAN_INSERT 0x2
+#define STMMAC_VLAN_REPLACE 0x3
+
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 6ce3a7fb41ab..527f93320a5a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -62,12 +62,10 @@ static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
{
int phy_mode;
- struct resource *res;
void __iomem *ctl_block;
struct anarion_gmac *gmac;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ctl_block = devm_ioremap_resource(&pdev->dev, res);
+ ctl_block = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctl_block)) {
dev_err(&pdev->dev, "Cannot get reset region (%ld)!\n",
PTR_ERR(ctl_block));
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 3a14cdd01f5f..dd9967aeda22 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -333,6 +333,9 @@ static void *tegra_eqos_probe(struct platform_device *pdev,
usleep_range(2000, 4000);
gpiod_set_value(eqos->reset, 0);
+ /* MDIO bus was already reset just above */
+ data->mdio_bus_data->needs_reset = false;
+
eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
if (IS_ERR(eqos->rst)) {
err = PTR_ERR(eqos->rst);
@@ -415,7 +418,6 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
const struct dwc_eth_dwmac_data *data;
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
- struct resource *res;
void *priv;
int ret;
@@ -428,17 +430,11 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
* resource initialization is done in the glue logic.
*/
stmmac_res.irq = platform_get_irq(pdev, 0);
- if (stmmac_res.irq < 0) {
- if (stmmac_res.irq != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "IRQ configuration information not found\n");
-
+ if (stmmac_res.irq < 0)
return stmmac_res.irq;
- }
stmmac_res.wol_irq = stmmac_res.irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- stmmac_res.addr = devm_ioremap_resource(&pdev->dev, res);
+ stmmac_res.addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(stmmac_res.addr))
return PTR_ERR(stmmac_res.addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 88eb16954627..bbc16b5a410a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -46,7 +46,6 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
struct meson_dwmac *dwmac;
- struct resource *res;
int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
@@ -63,8 +62,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
+ dwmac->reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dwmac->reg)) {
ret = PTR_ERR(dwmac->reg);
goto err_remove_config_dt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 786ca4a7bf36..9cda29e4b89d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -308,7 +308,6 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
- struct resource *res;
struct meson8b_dwmac *dwmac;
int ret;
@@ -332,8 +331,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
ret = -EINVAL;
goto err_remove_config_dt;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dwmac->regs = devm_ioremap_resource(&pdev->dev, res);
+ dwmac->regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dwmac->regs)) {
ret = PTR_ERR(dwmac->regs);
goto err_remove_config_dt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 3174b701aa90..7357b8bdc128 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -32,6 +32,9 @@
#define XGMAC_CONFIG_ARPEN BIT(31)
#define XGMAC_CONFIG_GPSL GENMASK(29, 16)
#define XGMAC_CONFIG_GPSL_SHIFT 16
+#define XGMAC_CONFIG_HDSMS GENMASK(14, 12)
+#define XGMAC_CONFIG_HDSMS_SHIFT 12
+#define XGMAC_CONFIG_HDSMS_256 (0x2 << XGMAC_CONFIG_HDSMS_SHIFT)
#define XGMAC_CONFIG_S2KP BIT(11)
#define XGMAC_CONFIG_LM BIT(10)
#define XGMAC_CONFIG_IPC BIT(9)
@@ -44,6 +47,7 @@
#define XGMAC_CORE_INIT_RX 0
#define XGMAC_PACKET_FILTER 0x00000008
#define XGMAC_FILTER_RA BIT(31)
+#define XGMAC_FILTER_VTFE BIT(16)
#define XGMAC_FILTER_HPF BIT(10)
#define XGMAC_FILTER_PCF BIT(7)
#define XGMAC_FILTER_PM BIT(4)
@@ -51,6 +55,19 @@
#define XGMAC_FILTER_PR BIT(0)
#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
#define XGMAC_MAX_HASH_TABLE 8
+#define XGMAC_VLAN_TAG 0x00000050
+#define XGMAC_VLAN_EDVLP BIT(26)
+#define XGMAC_VLAN_VTHM BIT(25)
+#define XGMAC_VLAN_DOVLTC BIT(20)
+#define XGMAC_VLAN_ESVL BIT(18)
+#define XGMAC_VLAN_ETV BIT(16)
+#define XGMAC_VLAN_VID GENMASK(15, 0)
+#define XGMAC_VLAN_HASH_TABLE 0x00000058
+#define XGMAC_VLAN_INCL 0x00000060
+#define XGMAC_VLAN_VLTI BIT(20)
+#define XGMAC_VLAN_CSVL BIT(19)
+#define XGMAC_VLAN_VLC GENMASK(17, 16)
+#define XGMAC_VLAN_VLC_SHIFT 16
#define XGMAC_RXQ_CTRL0 0x000000a0
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
@@ -59,6 +76,7 @@
#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_PSRQ_SHIFT(x) ((x) * 8)
#define XGMAC_INT_STATUS 0x000000b0
+#define XGMAC_LPIIS BIT(5)
#define XGMAC_PMTIS BIT(4)
#define XGMAC_INT_EN 0x000000b4
#define XGMAC_TSIE BIT(12)
@@ -76,19 +94,34 @@
#define XGMAC_RWKPKTEN BIT(2)
#define XGMAC_MGKPKTEN BIT(1)
#define XGMAC_PWRDWN BIT(0)
+#define XGMAC_LPI_CTRL 0x000000d0
+#define XGMAC_TXCGE BIT(21)
+#define XGMAC_LPITXA BIT(19)
+#define XGMAC_PLS BIT(17)
+#define XGMAC_LPITXEN BIT(16)
+#define XGMAC_RLPIEX BIT(3)
+#define XGMAC_RLPIEN BIT(2)
+#define XGMAC_TLPIEX BIT(1)
+#define XGMAC_TLPIEN BIT(0)
+#define XGMAC_LPI_TIMER_CTRL 0x000000d4
#define XGMAC_HW_FEATURE0 0x0000011c
#define XGMAC_HWFEAT_SAVLANINS BIT(27)
#define XGMAC_HWFEAT_RXCOESEL BIT(16)
#define XGMAC_HWFEAT_TXCOESEL BIT(14)
+#define XGMAC_HWFEAT_EEESEL BIT(13)
#define XGMAC_HWFEAT_TSSEL BIT(12)
#define XGMAC_HWFEAT_AVSEL BIT(11)
#define XGMAC_HWFEAT_RAVSEL BIT(10)
#define XGMAC_HWFEAT_ARPOFFSEL BIT(9)
+#define XGMAC_HWFEAT_MMCSEL BIT(8)
#define XGMAC_HWFEAT_MGKSEL BIT(7)
#define XGMAC_HWFEAT_RWKSEL BIT(6)
+#define XGMAC_HWFEAT_VLHASH BIT(4)
#define XGMAC_HWFEAT_GMIISEL BIT(1)
#define XGMAC_HW_FEATURE1 0x00000120
+#define XGMAC_HWFEAT_RSSEN BIT(20)
#define XGMAC_HWFEAT_TSOEN BIT(18)
+#define XGMAC_HWFEAT_SPHEN BIT(17)
#define XGMAC_HWFEAT_ADDR64 GENMASK(15, 14)
#define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6)
#define XGMAC_HWFEAT_RXFIFOSIZE GENMASK(4, 0)
@@ -98,6 +131,16 @@
#define XGMAC_HWFEAT_RXCHCNT GENMASK(15, 12)
#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6)
#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0)
+#define XGMAC_HW_FEATURE3 0x00000128
+#define XGMAC_HWFEAT_ASP GENMASK(15, 14)
+#define XGMAC_HWFEAT_DVLAN BIT(13)
+#define XGMAC_HWFEAT_FRPES GENMASK(12, 11)
+#define XGMAC_HWFEAT_FRPPB GENMASK(10, 9)
+#define XGMAC_HWFEAT_FRPSEL BIT(3)
+#define XGMAC_MAC_DPP_FSM_INT_STATUS 0x00000150
+#define XGMAC_MAC_FSM_CONTROL 0x00000158
+#define XGMAC_PRTYEN BIT(1)
+#define XGMAC_TMOUTEN BIT(0)
#define XGMAC_MDIO_ADDR 0x00000200
#define XGMAC_MDIO_DATA 0x00000204
#define XGMAC_MDIO_C22P 0x00000220
@@ -108,14 +151,45 @@
#define XGMAC_DCS_SHIFT 16
#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8)
#define XGMAC_ARP_ADDR 0x00000c10
+#define XGMAC_RSS_CTRL 0x00000c80
+#define XGMAC_UDP4TE BIT(3)
+#define XGMAC_TCP4TE BIT(2)
+#define XGMAC_IP2TE BIT(1)
+#define XGMAC_RSSE BIT(0)
+#define XGMAC_RSS_ADDR 0x00000c88
+#define XGMAC_RSSIA_SHIFT 8
+#define XGMAC_ADDRT BIT(2)
+#define XGMAC_CT BIT(1)
+#define XGMAC_OB BIT(0)
+#define XGMAC_RSS_DATA 0x00000c8c
#define XGMAC_TIMESTAMP_STATUS 0x00000d20
#define XGMAC_TXTSC BIT(15)
#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30
#define XGMAC_TXTSSTSLO GENMASK(30, 0)
#define XGMAC_TXTIMESTAMP_SEC 0x00000d34
+#define XGMAC_PPS_CONTROL 0x00000d70
+#define XGMAC_PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
+#define XGMAC_PPS_MINIDX(x) ((x) * 8)
+#define XGMAC_PPSx_MASK(x) \
+ GENMASK(XGMAC_PPS_MAXIDX(x), XGMAC_PPS_MINIDX(x))
+#define XGMAC_TRGTMODSELx(x, val) \
+ GENMASK(XGMAC_PPS_MAXIDX(x) - 1, XGMAC_PPS_MAXIDX(x) - 2) & \
+ ((val) << (XGMAC_PPS_MAXIDX(x) - 2))
+#define XGMAC_PPSCMDx(x, val) \
+ GENMASK(XGMAC_PPS_MINIDX(x) + 3, XGMAC_PPS_MINIDX(x)) & \
+ ((val) << XGMAC_PPS_MINIDX(x))
+#define XGMAC_PPSCMD_START 0x2
+#define XGMAC_PPSCMD_STOP 0x5
+#define XGMAC_PPSEN0 BIT(4)
+#define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10)
+#define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10)
+#define XGMAC_TRGTBUSY0 BIT(31)
+#define XGMAC_PPSx_INTERVAL(x) (0x00000d88 + (x) * 0x10)
+#define XGMAC_PPSx_WIDTH(x) (0x00000d8c + (x) * 0x10)
/* MTL Registers */
#define XGMAC_MTL_OPMODE 0x00001000
+#define XGMAC_FRPE BIT(15)
#define XGMAC_ETSALG GENMASK(6, 5)
#define XGMAC_WRR (0x0 << 5)
#define XGMAC_WFQ (0x1 << 5)
@@ -124,8 +198,32 @@
#define XGMAC_MTL_INT_STATUS 0x00001020
#define XGMAC_MTL_RXQ_DMA_MAP0 0x00001030
#define XGMAC_MTL_RXQ_DMA_MAP1 0x00001034
-#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 3, (x) * 8)
+#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_QxMDMACH_SHIFT(x) ((x) * 8)
+#define XGMAC_QDDMACH BIT(7)
+#define XGMAC_TC_PRTY_MAP0 0x00001040
+#define XGMAC_TC_PRTY_MAP1 0x00001044
+#define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8)
+#define XGMAC_PSTC_SHIFT(x) ((x) * 8)
+#define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0
+#define XGMAC_RXPI BIT(31)
+#define XGMAC_NPE GENMASK(23, 16)
+#define XGMAC_NVE GENMASK(7, 0)
+#define XGMAC_MTL_RXP_IACC_CTRL_ST 0x000010b0
+#define XGMAC_STARTBUSY BIT(31)
+#define XGMAC_WRRDN BIT(16)
+#define XGMAC_ADDR GENMASK(9, 0)
+#define XGMAC_MTL_RXP_IACC_DATA 0x000010b4
+#define XGMAC_MTL_ECC_CONTROL 0x000010c0
+#define XGMAC_MTL_SAFETY_INT_STATUS 0x000010c4
+#define XGMAC_MEUIS BIT(1)
+#define XGMAC_MECIS BIT(0)
+#define XGMAC_MTL_ECC_INT_ENABLE 0x000010c8
+#define XGMAC_RPCEIE BIT(12)
+#define XGMAC_ECEIE BIT(8)
+#define XGMAC_RXCEIE BIT(4)
+#define XGMAC_TXCEIE BIT(0)
+#define XGMAC_MTL_ECC_INT_STATUS 0x000010cc
#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
#define XGMAC_TQS GENMASK(25, 16)
#define XGMAC_TQS_SHIFT 16
@@ -164,6 +262,7 @@
#define XGMAC_RXOVFIS BIT(16)
#define XGMAC_ABPSIS BIT(1)
#define XGMAC_TXUNFIS BIT(0)
+#define XGMAC_MAC_REGSIZE (XGMAC_MTL_QINT_STATUS(15) / 4)
/* DMA Registers */
#define XGMAC_DMA_MODE 0x00003000
@@ -190,7 +289,18 @@
#define XGMAC_TDPS GENMASK(29, 0)
#define XGMAC_RX_EDMA_CTRL 0x00003044
#define XGMAC_RDPS GENMASK(29, 0)
+#define XGMAC_DMA_SAFETY_INT_STATUS 0x00003064
+#define XGMAC_MCSIS BIT(31)
+#define XGMAC_MSUIS BIT(29)
+#define XGMAC_MSCIS BIT(28)
+#define XGMAC_DEUIS BIT(1)
+#define XGMAC_DECIS BIT(0)
+#define XGMAC_DMA_ECC_INT_ENABLE 0x00003068
+#define XGMAC_DCEIE BIT(1)
+#define XGMAC_TCEIE BIT(0)
+#define XGMAC_DMA_ECC_INT_STATUS 0x0000306c
#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
+#define XGMAC_SPH BIT(24)
#define XGMAC_PBLx8 BIT(16)
#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
#define XGMAC_TxPBL GENMASK(21, 16)
@@ -230,12 +340,17 @@
#define XGMAC_TBU BIT(2)
#define XGMAC_TPS BIT(1)
#define XGMAC_TI BIT(0)
+#define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4)
/* Descriptors */
+#define XGMAC_TDES2_IVT GENMASK(31, 16)
+#define XGMAC_TDES2_IVT_SHIFT 16
#define XGMAC_TDES2_IOC BIT(31)
#define XGMAC_TDES2_TTSE BIT(30)
#define XGMAC_TDES2_B2L GENMASK(29, 16)
#define XGMAC_TDES2_B2L_SHIFT 16
+#define XGMAC_TDES2_VTIR GENMASK(15, 14)
+#define XGMAC_TDES2_VTIR_SHIFT 14
#define XGMAC_TDES2_B1L GENMASK(13, 0)
#define XGMAC_TDES3_OWN BIT(31)
#define XGMAC_TDES3_CTXT BIT(30)
@@ -244,18 +359,33 @@
#define XGMAC_TDES3_CPC GENMASK(27, 26)
#define XGMAC_TDES3_CPC_SHIFT 26
#define XGMAC_TDES3_TCMSSV BIT(26)
+#define XGMAC_TDES3_SAIC GENMASK(25, 23)
+#define XGMAC_TDES3_SAIC_SHIFT 23
#define XGMAC_TDES3_THL GENMASK(22, 19)
#define XGMAC_TDES3_THL_SHIFT 19
+#define XGMAC_TDES3_IVTIR GENMASK(19, 18)
+#define XGMAC_TDES3_IVTIR_SHIFT 18
#define XGMAC_TDES3_TSE BIT(18)
+#define XGMAC_TDES3_IVLTV BIT(17)
#define XGMAC_TDES3_CIC GENMASK(17, 16)
#define XGMAC_TDES3_CIC_SHIFT 16
#define XGMAC_TDES3_TPL GENMASK(17, 0)
+#define XGMAC_TDES3_VLTV BIT(16)
+#define XGMAC_TDES3_VT GENMASK(15, 0)
#define XGMAC_TDES3_FL GENMASK(14, 0)
+#define XGMAC_RDES2_HL GENMASK(9, 0)
#define XGMAC_RDES3_OWN BIT(31)
#define XGMAC_RDES3_CTXT BIT(30)
#define XGMAC_RDES3_IOC BIT(30)
#define XGMAC_RDES3_LD BIT(28)
#define XGMAC_RDES3_CDA BIT(27)
+#define XGMAC_RDES3_RSV BIT(26)
+#define XGMAC_RDES3_L34T GENMASK(23, 20)
+#define XGMAC_RDES3_L34T_SHIFT 20
+#define XGMAC_L34T_IP4TCP 0x1
+#define XGMAC_L34T_IP4UDP 0x2
+#define XGMAC_L34T_IP6TCP 0x9
+#define XGMAC_L34T_IP6UDP 0xA
#define XGMAC_RDES3_ES BIT(15)
#define XGMAC_RDES3_PL GENMASK(13, 0)
#define XGMAC_RDES3_TSD BIT(6)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 85c68b7ee8c6..e534a3aaf4a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -6,7 +6,9 @@
#include <linux/bitrev.h>
#include <linux/crc32.h>
+#include <linux/iopoll.h>
#include "stmmac.h"
+#include "stmmac_ptp.h"
#include "dwxgmac2.h"
static void dwxgmac2_core_init(struct mac_device_info *hw,
@@ -118,6 +120,23 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
writel(value, ioaddr + reg);
}
+static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value, reg;
+
+ reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
+ if (queue >= 4)
+ queue -= 4;
+
+ value = readl(ioaddr + reg);
+ value &= ~XGMAC_PSTC(queue);
+ value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
+
+ writel(value, ioaddr + reg);
+}
+
static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
u32 rx_alg)
{
@@ -144,7 +163,9 @@ static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
u32 tx_alg)
{
void __iomem *ioaddr = hw->pcsr;
+ bool ets = true;
u32 value;
+ int i;
value = readl(ioaddr + XGMAC_MTL_OPMODE);
value &= ~XGMAC_ETSALG;
@@ -160,10 +181,28 @@ static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
value |= XGMAC_DWRR;
break;
default:
+ ets = false;
break;
}
writel(value, ioaddr + XGMAC_MTL_OPMODE);
+
+ /* Set ETS if desired */
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
+ value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
+ value &= ~XGMAC_TSA;
+ if (ets)
+ value |= XGMAC_ETS;
+ writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
+ }
+}
+
+static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
+ u32 weight, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
}
static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
@@ -200,11 +239,21 @@ static void dwxgmac2_config_cbs(struct mac_device_info *hw,
writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
}
+static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int i;
+
+ for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
+ reg_space[i] = readl(ioaddr + i * 4);
+}
+
static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
struct stmmac_extra_stats *x)
{
void __iomem *ioaddr = hw->pcsr;
u32 stat, en;
+ int ret = 0;
en = readl(ioaddr + XGMAC_INT_EN);
stat = readl(ioaddr + XGMAC_INT_STATUS);
@@ -216,7 +265,24 @@ static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
readl(ioaddr + XGMAC_PMT);
}
- return 0;
+ if (stat & XGMAC_LPIIS) {
+ u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
+
+ if (lpi & XGMAC_TLPIEN) {
+ ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
+ x->irq_tx_path_in_lpi_mode_n++;
+ }
+ if (lpi & XGMAC_TLPIEX) {
+ ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
+ x->irq_tx_path_exit_lpi_mode_n++;
+ }
+ if (lpi & XGMAC_RLPIEN)
+ x->irq_rx_path_in_lpi_mode_n++;
+ if (lpi & XGMAC_RLPIEX)
+ x->irq_rx_path_exit_lpi_mode_n++;
+ }
+
+ return ret;
}
static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
@@ -309,6 +375,53 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
addr[5] = (hi_addr >> 8) & 0xff;
}
+static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
+ bool en_tx_lpi_clockgating)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_LPI_CTRL);
+
+ value |= XGMAC_LPITXEN | XGMAC_LPITXA;
+ if (en_tx_lpi_clockgating)
+ value |= XGMAC_TXCGE;
+
+ writel(value, ioaddr + XGMAC_LPI_CTRL);
+}
+
+static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_LPI_CTRL);
+ value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
+ writel(value, ioaddr + XGMAC_LPI_CTRL);
+}
+
+static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_LPI_CTRL);
+ if (link)
+ value |= XGMAC_PLS;
+ else
+ value &= ~XGMAC_PLS;
+ writel(value, ioaddr + XGMAC_LPI_CTRL);
+}
+
+static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
+ writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
+}
+
static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
int mcbitslog2)
{
@@ -402,36 +515,694 @@ static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
+static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
+ u32 val)
+{
+ u32 ctrl = 0;
+
+ writel(val, ioaddr + XGMAC_RSS_DATA);
+ ctrl |= idx << XGMAC_RSSIA_SHIFT;
+ ctrl |= is_key ? XGMAC_ADDRT : 0x0;
+ ctrl |= XGMAC_OB;
+ writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
+
+ return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
+ !(ctrl & XGMAC_OB), 100, 10000);
+}
+
+static int dwxgmac2_rss_configure(struct mac_device_info *hw,
+ struct stmmac_rss *cfg, u32 num_rxq)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 *key = (u32 *)cfg->key;
+ int i, ret;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_RSS_CTRL);
+ if (!cfg->enable) {
+ value &= ~XGMAC_RSSE;
+ writel(value, ioaddr + XGMAC_RSS_CTRL);
+ return 0;
+ }
+
+ for (i = 0; i < (sizeof(cfg->key) / sizeof(u32)); i++) {
+ ret = dwxgmac2_rss_write_reg(ioaddr, true, i, *key++);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
+ ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < num_rxq; i++)
+ dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
+
+ value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
+ writel(value, ioaddr + XGMAC_RSS_CTRL);
+ return 0;
+}
+
+static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+ bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
+
+ if (hash) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
+ if (is_double) {
+ value |= XGMAC_VLAN_EDVLP;
+ value |= XGMAC_VLAN_ESVL;
+ value |= XGMAC_VLAN_DOVLTC;
+ }
+
+ writel(value, ioaddr + XGMAC_VLAN_TAG);
+ } else {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value &= ~XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + XGMAC_VLAN_TAG);
+
+ value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
+ value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
+ value &= ~XGMAC_VLAN_DOVLTC;
+ value &= ~XGMAC_VLAN_VID;
+
+ writel(value, ioaddr + XGMAC_VLAN_TAG);
+ }
+}
+
+struct dwxgmac3_error_desc {
+ bool valid;
+ const char *desc;
+ const char *detailed_desc;
+};
+
+#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
+
+static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
+ const char *module_name,
+ const struct dwxgmac3_error_desc *desc,
+ unsigned long field_offset,
+ struct stmmac_safety_stats *stats)
+{
+ unsigned long loc, mask;
+ u8 *bptr = (u8 *)stats;
+ unsigned long *ptr;
+
+ ptr = (unsigned long *)(bptr + field_offset);
+
+ mask = value;
+ for_each_set_bit(loc, &mask, 32) {
+ netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
+ "correctable" : "uncorrectable", module_name,
+ desc[loc].desc, desc[loc].detailed_desc);
+
+ /* Update counters */
+ ptr[loc]++;
+ }
+}
+
+static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
+ { true, "ATPES", "Application Transmit Interface Parity Check Error" },
+ { true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
+ { true, "TPES", "TSO Data Path Parity Check Error" },
+ { true, "TSOPES", "TSO Header Data Path Parity Check Error" },
+ { true, "MTPES", "MTL Data Path Parity Check Error" },
+ { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
+ { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
+ { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
+ { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
+ { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
+ { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
+ { true, "CWPES", "CSR Write Data Path Parity Check Error" },
+ { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
+ { true, "TTES", "TX FSM Timeout Error" },
+ { true, "RTES", "RX FSM Timeout Error" },
+ { true, "CTES", "CSR FSM Timeout Error" },
+ { true, "ATES", "APP FSM Timeout Error" },
+ { true, "PTES", "PTP FSM Timeout Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { true, "MSTTES", "Master Read/Write Timeout Error" },
+ { true, "SLVTES", "Slave Read/Write Timeout Error" },
+ { true, "ATITES", "Application Timeout on ATI Interface Error" },
+ { true, "ARITES", "Application Timeout on ARI Interface Error" },
+ { true, "FSMPES", "FSM State Parity Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { true, "CPI", "Control Register Parity Check Error" },
+};
+
+static void dwxgmac3_handle_mac_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
+ writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, correctable, "MAC",
+ dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
+}
+
+static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
+ { true, "TXCES", "MTL TX Memory Error" },
+ { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
+ { true, "TXUES", "MTL TX Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 3 */
+ { true, "RXCES", "MTL RX Memory Error" },
+ { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
+ { true, "RXUES", "MTL RX Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 7 */
+ { true, "ECES", "MTL EST Memory Error" },
+ { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
+ { true, "EUES", "MTL EST Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 11 */
+ { true, "RPCES", "MTL RX Parser Memory Error" },
+ { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
+ { true, "RPUES", "MTL RX Parser Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 15 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 16 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 17 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 21 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 22 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 23 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 24 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 25 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
+};
+
+static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
+ writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, correctable, "MTL",
+ dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
+}
+
+static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
+ { true, "TCES", "DMA TSO Memory Error" },
+ { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
+ { true, "TUES", "DMA TSO Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 3 */
+ { true, "DCES", "DMA DCACHE Memory Error" },
+ { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
+ { true, "DUES", "DMA DCACHE Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 7 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 8 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 9 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 10 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 11 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 12 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 13 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 14 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 15 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 16 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 17 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 21 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 22 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 23 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 24 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 25 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
+};
+
+static void dwxgmac3_handle_dma_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
+ writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, correctable, "DMA",
+ dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
+}
+
+static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
+{
+ u32 value;
+
+ if (!asp)
+ return -EINVAL;
+
+ /* 1. Enable Safety Features */
+ writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
+
+ /* 2. Enable MTL Safety Interrupts */
+ value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
+ value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
+ value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
+ value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
+ value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
+ writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
+
+ /* 3. Enable DMA Safety Interrupts */
+ value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
+ value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
+ value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
+ writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
+
+ /* Only ECC Protection for External Memory feature is selected */
+ if (asp <= 0x1)
+ return 0;
+
+ /* 4. Enable Parity and Timeout for FSM */
+ value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
+ value |= XGMAC_PRTYEN; /* FSM Parity Feature */
+ value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
+ writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
+
+ return 0;
+}
+
+static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
+ void __iomem *ioaddr,
+ unsigned int asp,
+ struct stmmac_safety_stats *stats)
+{
+ bool err, corr;
+ u32 mtl, dma;
+ int ret = 0;
+
+ if (!asp)
+ return -EINVAL;
+
+ mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
+ dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
+
+ err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
+ corr = false;
+ if (err) {
+ dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
+ (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
+ corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
+ if (err) {
+ dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
+ corr = dma & XGMAC_DECIS;
+ if (err) {
+ dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ return ret;
+}
+
+static const struct dwxgmac3_error {
+ const struct dwxgmac3_error_desc *desc;
+} dwxgmac3_all_errors[] = {
+ { dwxgmac3_mac_errors },
+ { dwxgmac3_mtl_errors },
+ { dwxgmac3_dma_errors },
+};
+
+static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
+ int index, unsigned long *count,
+ const char **desc)
+{
+ int module = index / 32, offset = index % 32;
+ unsigned long *ptr = (unsigned long *)stats;
+
+ if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
+ return -EINVAL;
+ if (!dwxgmac3_all_errors[module].desc[offset].valid)
+ return -EINVAL;
+ if (count)
+ *count = *(ptr + index);
+ if (desc)
+ *desc = dwxgmac3_all_errors[module].desc[offset].desc;
+ return 0;
+}
+
+static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
+{
+ u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
+
+ val &= ~XGMAC_FRPE;
+ writel(val, ioaddr + XGMAC_MTL_OPMODE);
+
+ return 0;
+}
+
+static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
+{
+ u32 val;
+
+ val = readl(ioaddr + XGMAC_MTL_OPMODE);
+ val |= XGMAC_FRPE;
+ writel(val, ioaddr + XGMAC_MTL_OPMODE);
+}
+
+static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
+ struct stmmac_tc_entry *entry,
+ int pos)
+{
+ int ret, i;
+
+ for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
+ int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
+ u32 val;
+
+ /* Wait for ready */
+ ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
+ val, !(val & XGMAC_STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+
+ /* Write data */
+ val = *((u32 *)&entry->val + i);
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
+
+ /* Write pos */
+ val = real_pos & XGMAC_ADDR;
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
+
+ /* Write OP */
+ val |= XGMAC_WRRDN;
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
+
+ /* Start Write */
+ val |= XGMAC_STARTBUSY;
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
+
+ /* Wait for done */
+ ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
+ val, !(val & XGMAC_STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct stmmac_tc_entry *
+dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
+ unsigned int count, u32 curr_prio)
+{
+ struct stmmac_tc_entry *entry;
+ u32 min_prio = ~0x0;
+ int i, min_prio_idx;
+ bool found = false;
+
+ for (i = count - 1; i >= 0; i--) {
+ entry = &entries[i];
+
+ /* Do not update unused entries */
+ if (!entry->in_use)
+ continue;
+ /* Do not update already updated entries (i.e. fragments) */
+ if (entry->in_hw)
+ continue;
+ /* Let last entry be updated last */
+ if (entry->is_last)
+ continue;
+ /* Do not return fragments */
+ if (entry->is_frag)
+ continue;
+ /* Check if we already checked this prio */
+ if (entry->prio < curr_prio)
+ continue;
+ /* Check if this is the minimum prio */
+ if (entry->prio < min_prio) {
+ min_prio = entry->prio;
+ min_prio_idx = i;
+ found = true;
+ }
+ }
+
+ if (found)
+ return &entries[min_prio_idx];
+ return NULL;
+}
+
+static int dwxgmac3_rxp_config(void __iomem *ioaddr,
+ struct stmmac_tc_entry *entries,
+ unsigned int count)
+{
+ struct stmmac_tc_entry *entry, *frag;
+ int i, ret, nve = 0;
+ u32 curr_prio = 0;
+ u32 old_val, val;
+
+ /* Force disable RX */
+ old_val = readl(ioaddr + XGMAC_RX_CONFIG);
+ val = old_val & ~XGMAC_CONFIG_RE;
+ writel(val, ioaddr + XGMAC_RX_CONFIG);
+
+ /* Disable RX Parser */
+ ret = dwxgmac3_rxp_disable(ioaddr);
+ if (ret)
+ goto re_enable;
+
+ /* Set all entries as NOT in HW */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ entry->in_hw = false;
+ }
+
+ /* Update entries by reverse order */
+ while (1) {
+ entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
+ if (!entry)
+ break;
+
+ curr_prio = entry->prio;
+ frag = entry->frag_ptr;
+
+ /* Set special fragment requirements */
+ if (frag) {
+ entry->val.af = 0;
+ entry->val.rf = 0;
+ entry->val.nc = 1;
+ entry->val.ok_index = nve + 2;
+ }
+
+ ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ entry->in_hw = true;
+
+ if (frag && !frag->in_hw) {
+ ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
+ if (ret)
+ goto re_enable;
+ frag->table_pos = nve++;
+ frag->in_hw = true;
+ }
+ }
+
+ if (!nve)
+ goto re_enable;
+
+ /* Update all pass entry */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ if (!entry->is_last)
+ continue;
+
+ ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ }
+
+ /* Assume n. of parsable entries == n. of valid entries */
+ val = (nve << 16) & XGMAC_NPE;
+ val |= nve & XGMAC_NVE;
+ writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
+
+ /* Enable RX Parser */
+ dwxgmac3_rxp_enable(ioaddr);
+
+re_enable:
+ /* Re-enable RX */
+ writel(old_val, ioaddr + XGMAC_RX_CONFIG);
+ return ret;
+}
+
+static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
+ value, value & XGMAC_TXTSC, 100, 10000))
+ return -EBUSY;
+
+ *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
+ *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
+ return 0;
+}
+
+static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
+ struct stmmac_pps_cfg *cfg, bool enable,
+ u32 sub_second_inc, u32 systime_flags)
+{
+ u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
+ u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
+ u64 period;
+
+ if (!cfg->available)
+ return -EINVAL;
+ if (tnsec & XGMAC_TRGTBUSY0)
+ return -EBUSY;
+ if (!sub_second_inc || !systime_flags)
+ return -EINVAL;
+
+ val &= ~XGMAC_PPSx_MASK(index);
+
+ if (!enable) {
+ val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
+ writel(val, ioaddr + XGMAC_PPS_CONTROL);
+ return 0;
+ }
+
+ val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
+ val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
+ val |= XGMAC_PPSEN0;
+
+ writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
+
+ if (!(systime_flags & PTP_TCR_TSCTRLSSR))
+ cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
+ writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
+
+ period = cfg->period.tv_sec * 1000000000;
+ period += cfg->period.tv_nsec;
+
+ do_div(period, sub_second_inc);
+
+ if (period <= 1)
+ return -EINVAL;
+
+ writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
+
+ period >>= 1;
+ if (period <= 1)
+ return -EINVAL;
+
+ writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
+
+ /* Finally, activate it */
+ writel(val, ioaddr + XGMAC_PPS_CONTROL);
+ return 0;
+}
+
+static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
+{
+ u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
+
+ value &= ~XGMAC_CONFIG_SARC;
+ value |= val << XGMAC_CONFIG_SARC_SHIFT;
+
+ writel(value, ioaddr + XGMAC_TX_CONFIG);
+}
+
+static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_VLAN_INCL);
+ value |= XGMAC_VLAN_VLTI;
+ value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
+ value &= ~XGMAC_VLAN_VLC;
+ value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
+ writel(value, ioaddr + XGMAC_VLAN_INCL);
+}
+
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
.rx_queue_prio = dwxgmac2_rx_queue_prio,
- .tx_queue_prio = NULL,
+ .tx_queue_prio = dwxgmac2_tx_queue_prio,
.rx_queue_routing = NULL,
.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
- .set_mtl_tx_queue_weight = NULL,
+ .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
.config_cbs = dwxgmac2_config_cbs,
- .dump_regs = NULL,
+ .dump_regs = dwxgmac2_dump_regs,
.host_irq_status = dwxgmac2_host_irq_status,
.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
.flow_ctrl = dwxgmac2_flow_ctrl,
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = NULL,
- .reset_eee_mode = NULL,
- .set_eee_timer = NULL,
- .set_eee_pls = NULL,
+ .set_eee_mode = dwxgmac2_set_eee_mode,
+ .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_eee_timer = dwxgmac2_set_eee_timer,
+ .set_eee_pls = dwxgmac2_set_eee_pls,
.pcs_ctrl_ane = NULL,
.pcs_rane = NULL,
.pcs_get_adv_lp = NULL,
.debug = NULL,
.set_filter = dwxgmac2_set_filter,
+ .safety_feat_config = dwxgmac3_safety_feat_config,
+ .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
+ .safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback,
+ .rss_configure = dwxgmac2_rss_configure,
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .rxp_config = dwxgmac3_rxp_config,
+ .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
+ .flex_pps_config = dwxgmac2_flex_pps_config,
+ .sarc_configure = dwxgmac2_sarc_configure,
+ .enable_vlan = dwxgmac2_enable_vlan,
};
int dwxgmac2_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index c4c45402b8f8..ae48154f933c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -26,16 +26,17 @@ static int dwxgmac2_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p)
{
unsigned int rdes3 = le32_to_cpu(p->des3);
- int ret = good_frame;
if (unlikely(rdes3 & XGMAC_RDES3_OWN))
return dma_own;
+ if (unlikely(rdes3 & XGMAC_RDES3_CTXT))
+ return discard_frame;
if (likely(!(rdes3 & XGMAC_RDES3_LD)))
+ return rx_not_ls;
+ if (unlikely((rdes3 & XGMAC_RDES3_ES) && (rdes3 & XGMAC_RDES3_LD)))
return discard_frame;
- if (unlikely(rdes3 & XGMAC_RDES3_ES))
- ret = discard_frame;
- return ret;
+ return good_frame;
}
static int dwxgmac2_get_tx_len(struct dma_desc *p)
@@ -55,7 +56,7 @@ static void dwxgmac2_set_tx_owner(struct dma_desc *p)
static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 = cpu_to_le32(XGMAC_RDES3_OWN);
+ p->des3 |= cpu_to_le32(XGMAC_RDES3_OWN);
if (!disable_rx_ic)
p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC);
@@ -98,11 +99,17 @@ static int dwxgmac2_rx_check_timestamp(void *desc)
unsigned int rdes3 = le32_to_cpu(p->des3);
bool desc_valid, ts_valid;
+ dma_rmb();
+
desc_valid = !(rdes3 & XGMAC_RDES3_OWN) && (rdes3 & XGMAC_RDES3_CTXT);
ts_valid = !(rdes3 & XGMAC_RDES3_TSD) && (rdes3 & XGMAC_RDES3_TSA);
- if (likely(desc_valid && ts_valid))
+ if (likely(desc_valid && ts_valid)) {
+ if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+ return -EINVAL;
return 0;
+ }
+
return -EINVAL;
}
@@ -113,13 +120,10 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
unsigned int rdes3 = le32_to_cpu(p->des3);
int ret = -EBUSY;
- if (likely(rdes3 & XGMAC_RDES3_CDA)) {
+ if (likely(rdes3 & XGMAC_RDES3_CDA))
ret = dwxgmac2_rx_check_timestamp(next_desc);
- if (ret)
- return ret;
- }
- return ret;
+ return !ret;
}
static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
@@ -144,7 +148,7 @@ static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L);
- tdes3 = tot_pkt_len & XGMAC_TDES3_FL;
+ tdes3 |= tot_pkt_len & XGMAC_TDES3_FL;
if (is_fs)
tdes3 |= XGMAC_TDES3_FD;
else
@@ -254,6 +258,86 @@ static void dwxgmac2_clear(struct dma_desc *p)
p->des3 = 0;
}
+static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
+ enum pkt_hash_types *type)
+{
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 ptype;
+
+ if (rdes3 & XGMAC_RDES3_RSV) {
+ ptype = (rdes3 & XGMAC_RDES3_L34T) >> XGMAC_RDES3_L34T_SHIFT;
+
+ switch (ptype) {
+ case XGMAC_L34T_IP4TCP:
+ case XGMAC_L34T_IP4UDP:
+ case XGMAC_L34T_IP6TCP:
+ case XGMAC_L34T_IP6UDP:
+ *type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ *type = PKT_HASH_TYPE_L3;
+ break;
+ }
+
+ *hash = le32_to_cpu(p->des1);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
+{
+ *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
+ return 0;
+}
+
+static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des2 = cpu_to_le32(lower_32_bits(addr));
+ p->des3 = cpu_to_le32(upper_32_bits(addr));
+}
+
+static void dwxgmac2_set_sarc(struct dma_desc *p, u32 sarc_type)
+{
+ sarc_type <<= XGMAC_TDES3_SAIC_SHIFT;
+
+ p->des3 |= cpu_to_le32(sarc_type & XGMAC_TDES3_SAIC);
+}
+
+static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
+ u32 inner_type)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+
+ /* Inner VLAN */
+ if (inner_type) {
+ u32 des = inner_tag << XGMAC_TDES2_IVT_SHIFT;
+
+ des &= XGMAC_TDES2_IVT;
+ p->des2 = cpu_to_le32(des);
+
+ des = inner_type << XGMAC_TDES3_IVTIR_SHIFT;
+ des &= XGMAC_TDES3_IVTIR;
+ p->des3 = cpu_to_le32(des | XGMAC_TDES3_IVLTV);
+ }
+
+ /* Outer VLAN */
+ p->des3 |= cpu_to_le32(tag & XGMAC_TDES3_VT);
+ p->des3 |= cpu_to_le32(XGMAC_TDES3_VLTV);
+
+ p->des3 |= cpu_to_le32(XGMAC_TDES3_CTXT);
+}
+
+static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type)
+{
+ type <<= XGMAC_TDES2_VTIR_SHIFT;
+ p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR);
+}
+
const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.tx_status = dwxgmac2_get_tx_status,
.rx_status = dwxgmac2_get_rx_status,
@@ -277,4 +361,10 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.get_addr = dwxgmac2_get_addr,
.set_addr = dwxgmac2_set_addr,
.clear = dwxgmac2_clear,
+ .get_rx_hash = dwxgmac2_get_rx_hash,
+ .get_rx_header_len = dwxgmac2_get_rx_header_len,
+ .set_sec_addr = dwxgmac2_set_sec_addr,
+ .set_sarc = dwxgmac2_set_sarc,
+ .set_vlan_tag = dwxgmac2_set_vlan_tag,
+ .set_vlan = dwxgmac2_set_vlan,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index a4f236e3593e..64956465c030 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -128,6 +128,14 @@ static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL);
}
+static void dwxgmac2_dma_dump_regs(void __iomem *ioaddr, u32 *reg_space)
+{
+ int i;
+
+ for (i = (XGMAC_DMA_MODE / 4); i < XGMAC_REGSIZE; i++)
+ reg_space[i] = readl(ioaddr + i * 4);
+}
+
static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
@@ -351,18 +359,24 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature 0 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
+ dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27;
dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
+ dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
+ dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
+ dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
+ dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
+ dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14;
switch (dma_cap->addr64) {
@@ -396,6 +410,14 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
dma_cap->number_rx_queues =
((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
+
+ /* MAC HW feature 3 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
+ dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14;
+ dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13;
+ dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
+ dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
+ dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
}
static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan)
@@ -462,6 +484,22 @@ static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
}
+static void dwxgmac2_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ value &= ~XGMAC_CONFIG_HDSMS;
+ value |= XGMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+
+ value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+ if (en)
+ value |= XGMAC_SPH;
+ else
+ value &= ~XGMAC_SPH;
+ writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+}
+
const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.reset = dwxgmac2_dma_reset,
.init = dwxgmac2_dma_init,
@@ -469,7 +507,7 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.init_rx_chan = dwxgmac2_dma_init_rx_chan,
.init_tx_chan = dwxgmac2_dma_init_tx_chan,
.axi = dwxgmac2_dma_axi,
- .dump_regs = NULL,
+ .dump_regs = dwxgmac2_dma_dump_regs,
.dma_rx_mode = dwxgmac2_dma_rx_mode,
.dma_tx_mode = dwxgmac2_dma_tx_mode,
.enable_dma_irq = dwxgmac2_enable_dma_irq,
@@ -488,4 +526,5 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.enable_tso = dwxgmac2_enable_tso,
.qmode = dwxgmac2_qmode,
.set_bfsize = dwxgmac2_set_bfsize,
+ .enable_sph = dwxgmac2_enable_sph,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 6c61b753b55e..3af2e5015245 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -201,7 +201,7 @@ static const struct stmmac_hwif_entry {
.min_id = DWXGMAC_CORE_2_10,
.regs = {
.ptp_off = PTP_XGMAC_OFFSET,
- .mmc_off = 0,
+ .mmc_off = MMC_XGMAC_OFFSET,
},
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
@@ -209,7 +209,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
.tc = &dwmac510_tc_ops,
- .mmc = NULL,
+ .mmc = &dwxgmac_mmc_ops,
.setup = dwxgmac2_setup,
.quirks = NULL,
},
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 278c0dbec9d9..9435b312495d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -86,6 +86,15 @@ struct stmmac_desc_ops {
void (*set_addr)(struct dma_desc *p, dma_addr_t addr);
/* clear descriptor */
void (*clear)(struct dma_desc *p);
+ /* RSS */
+ int (*get_rx_hash)(struct dma_desc *p, u32 *hash,
+ enum pkt_hash_types *type);
+ int (*get_rx_header_len)(struct dma_desc *p, unsigned int *len);
+ void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr);
+ void (*set_sarc)(struct dma_desc *p, u32 sarc_type);
+ void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
+ u32 inner_type);
+ void (*set_vlan)(struct dma_desc *p, u32 type);
};
#define stmmac_init_rx_desc(__priv, __args...) \
@@ -136,6 +145,18 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, set_addr, __args)
#define stmmac_clear_desc(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, clear, __args)
+#define stmmac_get_rx_hash(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_hash, __args)
+#define stmmac_get_rx_header_len(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_header_len, __args)
+#define stmmac_set_desc_sec_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_sec_addr, __args)
+#define stmmac_set_desc_sarc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_sarc, __args)
+#define stmmac_set_desc_vlan_tag(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_vlan_tag, __args)
+#define stmmac_set_desc_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_vlan, __args)
struct stmmac_dma_cfg;
struct dma_features;
@@ -186,6 +207,7 @@ struct stmmac_dma_ops {
void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
+ void (*enable_sph)(void __iomem *ioaddr, bool en, u32 chan);
};
#define stmmac_reset(__priv, __args...) \
@@ -242,6 +264,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, qmode, __args)
#define stmmac_set_dma_bfsize(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
+#define stmmac_enable_sph(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, enable_sph, __args)
struct mac_device_info;
struct net_device;
@@ -249,6 +273,7 @@ struct rgmii_adv;
struct stmmac_safety_stats;
struct stmmac_tc_entry;
struct stmmac_pps_cfg;
+struct stmmac_rss;
/* Helpers to program the MAC core */
struct stmmac_ops {
@@ -327,6 +352,17 @@ struct stmmac_ops {
u32 sub_second_inc, u32 systime_flags);
/* Loopback for selftests */
void (*set_mac_loopback)(void __iomem *ioaddr, bool enable);
+ /* RSS */
+ int (*rss_configure)(struct mac_device_info *hw,
+ struct stmmac_rss *cfg, u32 num_rxq);
+ /* VLAN */
+ void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
+ bool is_double);
+ void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ /* TX Timestamp */
+ int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
+ /* Source Address Insertion / Replacement */
+ void (*sarc_configure)(void __iomem *ioaddr, int val);
};
#define stmmac_core_init(__priv, __args...) \
@@ -397,6 +433,16 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, flex_pps_config, __args)
#define stmmac_set_mac_loopback(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args)
+#define stmmac_rss_configure(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, rss_configure, __args)
+#define stmmac_update_vlan_hash(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
+#define stmmac_enable_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
+#define stmmac_get_mac_tx_timestamp(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args)
+#define stmmac_sarc_configure(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, sarc_configure, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -503,6 +549,7 @@ extern const struct stmmac_ops dwxgmac210_ops;
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
extern const struct stmmac_mmc_ops dwmac_mmc_ops;
+extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 3587ceb9faf5..a0c05925883e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -24,6 +24,7 @@
#define MMC_GMAC4_OFFSET 0x700
#define MMC_GMAC3_X_OFFSET 0x100
+#define MMC_XGMAC_OFFSET 0x800
struct stmmac_counters {
unsigned int mmc_tx_octetcount_gb;
@@ -116,6 +117,14 @@ struct stmmac_counters {
unsigned int mmc_rx_tcp_err_octets;
unsigned int mmc_rx_icmp_gd_octets;
unsigned int mmc_rx_icmp_err_octets;
+
+ /* FPE */
+ unsigned int mmc_tx_fpe_fragment_cntr;
+ unsigned int mmc_tx_hold_req_cntr;
+ unsigned int mmc_rx_packet_assembly_err_cntr;
+ unsigned int mmc_rx_packet_smd_err_cntr;
+ unsigned int mmc_rx_packet_assembly_ok_cntr;
+ unsigned int mmc_rx_fpe_fragment_cntr;
};
#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index a471db6d7b11..a223584f5f9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -119,6 +119,64 @@
#define MMC_RX_ICMP_GD_OCTETS 0x180
#define MMC_RX_ICMP_ERR_OCTETS 0x184
+/* XGMAC MMC Registers */
+#define MMC_XGMAC_TX_OCTET_GB 0x14
+#define MMC_XGMAC_TX_PKT_GB 0x1c
+#define MMC_XGMAC_TX_BROAD_PKT_G 0x24
+#define MMC_XGMAC_TX_MULTI_PKT_G 0x2c
+#define MMC_XGMAC_TX_64OCT_GB 0x34
+#define MMC_XGMAC_TX_65OCT_GB 0x3c
+#define MMC_XGMAC_TX_128OCT_GB 0x44
+#define MMC_XGMAC_TX_256OCT_GB 0x4c
+#define MMC_XGMAC_TX_512OCT_GB 0x54
+#define MMC_XGMAC_TX_1024OCT_GB 0x5c
+#define MMC_XGMAC_TX_UNI_PKT_GB 0x64
+#define MMC_XGMAC_TX_MULTI_PKT_GB 0x6c
+#define MMC_XGMAC_TX_BROAD_PKT_GB 0x74
+#define MMC_XGMAC_TX_UNDER 0x7c
+#define MMC_XGMAC_TX_OCTET_G 0x84
+#define MMC_XGMAC_TX_PKT_G 0x8c
+#define MMC_XGMAC_TX_PAUSE 0x94
+#define MMC_XGMAC_TX_VLAN_PKT_G 0x9c
+#define MMC_XGMAC_TX_LPI_USEC 0xa4
+#define MMC_XGMAC_TX_LPI_TRAN 0xa8
+
+#define MMC_XGMAC_RX_PKT_GB 0x100
+#define MMC_XGMAC_RX_OCTET_GB 0x108
+#define MMC_XGMAC_RX_OCTET_G 0x110
+#define MMC_XGMAC_RX_BROAD_PKT_G 0x118
+#define MMC_XGMAC_RX_MULTI_PKT_G 0x120
+#define MMC_XGMAC_RX_CRC_ERR 0x128
+#define MMC_XGMAC_RX_RUNT_ERR 0x130
+#define MMC_XGMAC_RX_JABBER_ERR 0x134
+#define MMC_XGMAC_RX_UNDER 0x138
+#define MMC_XGMAC_RX_OVER 0x13c
+#define MMC_XGMAC_RX_64OCT_GB 0x140
+#define MMC_XGMAC_RX_65OCT_GB 0x148
+#define MMC_XGMAC_RX_128OCT_GB 0x150
+#define MMC_XGMAC_RX_256OCT_GB 0x158
+#define MMC_XGMAC_RX_512OCT_GB 0x160
+#define MMC_XGMAC_RX_1024OCT_GB 0x168
+#define MMC_XGMAC_RX_UNI_PKT_G 0x170
+#define MMC_XGMAC_RX_LENGTH_ERR 0x178
+#define MMC_XGMAC_RX_RANGE 0x180
+#define MMC_XGMAC_RX_PAUSE 0x188
+#define MMC_XGMAC_RX_FIFOOVER_PKT 0x190
+#define MMC_XGMAC_RX_VLAN_PKT_GB 0x198
+#define MMC_XGMAC_RX_WATCHDOG_ERR 0x1a0
+#define MMC_XGMAC_RX_LPI_USEC 0x1a4
+#define MMC_XGMAC_RX_LPI_TRAN 0x1a8
+#define MMC_XGMAC_RX_DISCARD_PKT_GB 0x1ac
+#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
+#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
+
+#define MMC_XGMAC_TX_FPE_FRAG 0x208
+#define MMC_XGMAC_TX_HOLD_REQ 0x20c
+#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
+#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
+#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
+#define MMC_XGMAC_RX_FPE_FRAG 0x234
+
static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
{
u32 value = readl(mmcaddr + MMC_CNTRL);
@@ -263,3 +321,137 @@ const struct stmmac_mmc_ops dwmac_mmc_ops = {
.intr_all_mask = dwmac_mmc_intr_all_mask,
.read = dwmac_mmc_read,
};
+
+static void dwxgmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
+{
+ u32 value = readl(mmcaddr + MMC_CNTRL);
+
+ value |= (mode & 0x3F);
+
+ writel(value, mmcaddr + MMC_CNTRL);
+}
+
+static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
+{
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
+}
+
+static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest)
+{
+ u64 tmp = 0;
+
+ tmp += readl(addr + reg);
+ tmp += ((u64 )readl(addr + reg + 0x4)) << 32;
+ if (tmp > GENMASK(31, 0))
+ *dest = ~0x0;
+ else
+ *dest = *dest + tmp;
+}
+
+/* This reads the MAC core counters (if actaully supported).
+ * by default the MMC core is programmed to reset each
+ * counter after a read. So all the field of the mmc struct
+ * have to be incremented.
+ */
+static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
+{
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_GB,
+ &mmc->mmc_tx_octetcount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_GB,
+ &mmc->mmc_tx_framecount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_G,
+ &mmc->mmc_tx_broadcastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_G,
+ &mmc->mmc_tx_multicastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_64OCT_GB,
+ &mmc->mmc_tx_64_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_65OCT_GB,
+ &mmc->mmc_tx_65_to_127_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_128OCT_GB,
+ &mmc->mmc_tx_128_to_255_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_256OCT_GB,
+ &mmc->mmc_tx_256_to_511_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_512OCT_GB,
+ &mmc->mmc_tx_512_to_1023_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_1024OCT_GB,
+ &mmc->mmc_tx_1024_to_max_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNI_PKT_GB,
+ &mmc->mmc_tx_unicast_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_GB,
+ &mmc->mmc_tx_multicast_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_GB,
+ &mmc->mmc_tx_broadcast_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNDER,
+ &mmc->mmc_tx_underflow_error);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_G,
+ &mmc->mmc_tx_octetcount_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_G,
+ &mmc->mmc_tx_framecount_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PAUSE,
+ &mmc->mmc_tx_pause_frame);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_VLAN_PKT_G,
+ &mmc->mmc_tx_vlan_frame_g);
+
+ /* MMC RX counter registers */
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PKT_GB,
+ &mmc->mmc_rx_framecount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_GB,
+ &mmc->mmc_rx_octetcount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_G,
+ &mmc->mmc_rx_octetcount_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_BROAD_PKT_G,
+ &mmc->mmc_rx_broadcastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_MULTI_PKT_G,
+ &mmc->mmc_rx_multicastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR,
+ &mmc->mmc_rx_crc_error);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR,
+ &mmc->mmc_rx_crc_error);
+ mmc->mmc_rx_run_error += readl(mmcaddr + MMC_XGMAC_RX_RUNT_ERR);
+ mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_XGMAC_RX_JABBER_ERR);
+ mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_XGMAC_RX_UNDER);
+ mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_XGMAC_RX_OVER);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_64OCT_GB,
+ &mmc->mmc_rx_64_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_65OCT_GB,
+ &mmc->mmc_rx_65_to_127_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_128OCT_GB,
+ &mmc->mmc_rx_128_to_255_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_256OCT_GB,
+ &mmc->mmc_rx_256_to_511_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_512OCT_GB,
+ &mmc->mmc_rx_512_to_1023_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_1024OCT_GB,
+ &mmc->mmc_rx_1024_to_max_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UNI_PKT_G,
+ &mmc->mmc_rx_unicast_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_LENGTH_ERR,
+ &mmc->mmc_rx_length_error);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_RANGE,
+ &mmc->mmc_rx_autofrangetype);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PAUSE,
+ &mmc->mmc_rx_pause_frames);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_FIFOOVER_PKT,
+ &mmc->mmc_rx_fifo_overflow);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_VLAN_PKT_GB,
+ &mmc->mmc_rx_vlan_frames_gb);
+ mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_XGMAC_RX_WATCHDOG_ERR);
+
+ mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_XGMAC_TX_FPE_FRAG);
+ mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_XGMAC_TX_HOLD_REQ);
+ mmc->mmc_rx_packet_assembly_err_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_ERR);
+ mmc->mmc_rx_packet_smd_err_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_PKT_SMD_ERR);
+ mmc->mmc_rx_packet_assembly_ok_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_OK);
+ mmc->mmc_rx_fpe_fragment_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_FPE_FRAG);
+}
+
+const struct stmmac_mmc_ops dwxgmac_mmc_ops = {
+ .ctrl = dwxgmac_mmc_ctrl,
+ .intr_all_mask = dwxgmac_mmc_intr_all_mask,
+ .read = dwxgmac_mmc_read,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 5cd966c154f3..dcb2e29a5717 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -13,6 +13,7 @@
#define DRV_MODULE_VERSION "Jan_2016"
#include <linux/clk.h>
+#include <linux/if_vlan.h>
#include <linux/stmmac.h>
#include <linux/phylink.h>
#include <linux/pci.h>
@@ -57,7 +58,9 @@ struct stmmac_tx_queue {
struct stmmac_rx_buffer {
struct page *page;
+ struct page *sec_page;
dma_addr_t addr;
+ dma_addr_t sec_addr;
};
struct stmmac_rx_queue {
@@ -73,6 +76,12 @@ struct stmmac_rx_queue {
u32 rx_zeroc_thresh;
dma_addr_t dma_rx_phy;
u32 rx_tail_addr;
+ unsigned int state_saved;
+ struct {
+ struct sk_buff *skb;
+ unsigned int len;
+ unsigned int error;
+ } state;
};
struct stmmac_channel {
@@ -113,6 +122,12 @@ struct stmmac_pps_cfg {
struct timespec64 period;
};
+struct stmmac_rss {
+ int enable;
+ u8 key[STMMAC_RSS_HASH_KEY_SIZE];
+ u32 table[STMMAC_RSS_MAX_TABLE_SIZE];
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames;
@@ -123,6 +138,8 @@ struct stmmac_priv {
int hwts_tx_en;
bool tx_path_in_lpi_mode;
bool tso;
+ int sph;
+ u32 sarc_type;
unsigned int dma_buf_sz;
unsigned int rx_copybreak;
@@ -185,11 +202,10 @@ struct stmmac_priv {
spinlock_t ptp_lock;
void __iomem *mmcaddr;
void __iomem *ptpaddr;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir;
- struct dentry *dbgfs_rings_status;
- struct dentry *dbgfs_dma_cap;
#endif
unsigned long state;
@@ -203,6 +219,9 @@ struct stmmac_priv {
/* Pulse Per Second output */
struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
+
+ /* Receive Side Scaling */
+ struct stmmac_rss rss;
};
enum stmmac_state {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 6efb66820d4c..1c450105e5a6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -18,10 +18,12 @@
#include "stmmac.h"
#include "dwmac_dma.h"
+#include "dwxgmac2.h"
#define REG_SPACE_SIZE 0x1060
#define MAC100_ETHTOOL_NAME "st_mac100"
#define GMAC_ETHTOOL_NAME "st_gmac"
+#define XGMAC_ETHTOOL_NAME "st_xgmac"
#define ETHTOOL_DMA_OFFSET 55
@@ -65,6 +67,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(rx_missed_cntr),
STMMAC_STAT(rx_overflow_cntr),
STMMAC_STAT(rx_vlan),
+ STMMAC_STAT(rx_split_hdr_pkt_n),
/* Tx/Rx IRQ error info */
STMMAC_STAT(tx_undeflow_irq),
STMMAC_STAT(tx_process_stopped_irq),
@@ -243,6 +246,12 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
+ STMMAC_MMC_STAT(mmc_tx_fpe_fragment_cntr),
+ STMMAC_MMC_STAT(mmc_tx_hold_req_cntr),
+ STMMAC_MMC_STAT(mmc_rx_packet_assembly_err_cntr),
+ STMMAC_MMC_STAT(mmc_rx_packet_smd_err_cntr),
+ STMMAC_MMC_STAT(mmc_rx_packet_assembly_ok_cntr),
+ STMMAC_MMC_STAT(mmc_rx_fpe_fragment_cntr),
};
#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
@@ -253,6 +262,8 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
if (priv->plat->has_gmac || priv->plat->has_gmac4)
strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
+ else if (priv->plat->has_xgmac)
+ strlcpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver));
else
strlcpy(info->driver, MAC100_ETHTOOL_NAME,
sizeof(info->driver));
@@ -398,23 +409,28 @@ static int stmmac_check_if_running(struct net_device *dev)
static int stmmac_ethtool_get_regs_len(struct net_device *dev)
{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->plat->has_xgmac)
+ return XGMAC_REGSIZE * 4;
return REG_SPACE_SIZE;
}
static void stmmac_ethtool_gregs(struct net_device *dev,
struct ethtool_regs *regs, void *space)
{
- u32 *reg_space = (u32 *) space;
-
struct stmmac_priv *priv = netdev_priv(dev);
-
- memset(reg_space, 0x0, REG_SPACE_SIZE);
+ u32 *reg_space = (u32 *) space;
stmmac_dump_mac_regs(priv, priv->hw, reg_space);
stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
- /* Copy DMA registers to where ethtool expects them */
- memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4],
- NUM_DWMAC1000_DMA_REGS * 4);
+
+ if (!priv->plat->has_xgmac) {
+ /* Copy DMA registers to where ethtool expects them */
+ memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
+ &reg_space[DMA_BUS_MODE / 4],
+ NUM_DWMAC1000_DMA_REGS * 4);
+ }
}
static int stmmac_nway_reset(struct net_device *dev)
@@ -758,6 +774,76 @@ static int stmmac_set_coalesce(struct net_device *dev,
return 0;
}
+static int stmmac_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = priv->plat->rx_queues_to_use;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static u32 stmmac_get_rxfh_key_size(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ return sizeof(priv->rss.key);
+}
+
+static u32 stmmac_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ return ARRAY_SIZE(priv->rss.table);
+}
+
+static int stmmac_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (indir) {
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ indir[i] = priv->rss.table[i];
+ }
+
+ if (key)
+ memcpy(key, priv->rss.key, sizeof(priv->rss.key));
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i;
+
+ if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = indir[i];
+ }
+
+ if (key)
+ memcpy(priv->rss.key, key, sizeof(priv->rss.key));
+
+ return stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+}
+
static int stmmac_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
@@ -849,6 +935,11 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_eee = stmmac_ethtool_op_get_eee,
.set_eee = stmmac_ethtool_op_set_eee,
.get_sset_count = stmmac_get_sset_count,
+ .get_rxnfc = stmmac_get_rxnfc,
+ .get_rxfh_key_size = stmmac_get_rxfh_key_size,
+ .get_rxfh_indir_size = stmmac_get_rxfh_indir_size,
+ .get_rxfh = stmmac_get_rxfh,
+ .set_rxfh = stmmac_set_rxfh,
.get_ts_info = stmmac_get_ts_info,
.get_coalesce = stmmac_get_coalesce,
.set_coalesce = stmmac_set_coalesce,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index fd54c7c87485..06ccd216ae90 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -105,7 +105,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_DEBUG_FS
-static int stmmac_init_fs(struct net_device *dev);
+static void stmmac_init_fs(struct net_device *dev);
static void stmmac_exit_fs(struct net_device *dev);
#endif
@@ -432,6 +432,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
struct dma_desc *p, struct sk_buff *skb)
{
struct skb_shared_hwtstamps shhwtstamp;
+ bool found = false;
u64 ns = 0;
if (!priv->hwts_tx_en)
@@ -443,9 +444,13 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
/* check tx tstamp status */
if (stmmac_get_tx_timestamp_status(priv, p)) {
- /* get the valid tstamp */
stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
+ found = true;
+ } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
+ found = true;
+ }
+ if (found) {
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp.hwtstamp = ns_to_ktime(ns);
@@ -453,8 +458,6 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
/* pass tstamp to stack */
skb_tstamp_tx(skb, &shhwtstamp);
}
-
- return;
}
/* stmmac_get_rx_hwtstamp - get HW RX timestamps
@@ -1198,6 +1201,17 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
if (!buf->page)
return -ENOMEM;
+ if (priv->sph) {
+ buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ if (!buf->sec_page)
+ return -ENOMEM;
+
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
+ } else {
+ buf->sec_page = NULL;
+ }
+
buf->addr = page_pool_get_dma_addr(buf->page);
stmmac_set_desc_addr(priv, p, buf->addr);
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
@@ -1220,6 +1234,10 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
if (buf->page)
page_pool_put_page(rx_q->page_pool, buf->page, false);
buf->page = NULL;
+
+ if (buf->sec_page)
+ page_pool_put_page(rx_q->page_pool, buf->sec_page, false);
+ buf->sec_page = NULL;
}
/**
@@ -2417,6 +2435,22 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
}
}
+static void stmmac_mac_config_rss(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
+ priv->rss.enable = false;
+ return;
+ }
+
+ if (priv->dev->features & NETIF_F_RXHASH)
+ priv->rss.enable = true;
+ else
+ priv->rss.enable = false;
+
+ stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+}
+
/**
* stmmac_mtl_configuration - Configure MTL
* @priv: driver private structure
@@ -2461,6 +2495,10 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv)
/* Set RX routing */
if (rx_queues_count > 1)
stmmac_mac_config_rx_queues_routing(priv);
+
+ /* Receive Side Scaling */
+ if (rx_queues_count > 1)
+ stmmac_mac_config_rss(priv);
}
static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
@@ -2573,6 +2611,16 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
}
+ /* Enable Split Header */
+ if (priv->sph && priv->hw->rx_csum) {
+ for (chan = 0; chan < rx_cnt; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
+ }
+
+ /* VLAN Tag Insertion */
+ if (priv->dma_cap.vlins)
+ stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
+
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
@@ -2750,6 +2798,33 @@ static int stmmac_release(struct net_device *dev)
return 0;
}
+static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
+ struct stmmac_tx_queue *tx_q)
+{
+ u16 tag = 0x0, inner_tag = 0x0;
+ u32 inner_type = 0x0;
+ struct dma_desc *p;
+
+ if (!priv->dma_cap.vlins)
+ return false;
+ if (!skb_vlan_tag_present(skb))
+ return false;
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ inner_tag = skb_vlan_tag_get(skb);
+ inner_type = STMMAC_VLAN_INSERT;
+ }
+
+ tag = skb_vlan_tag_get(skb);
+
+ p = tx_q->dma_tx + tx_q->cur_tx;
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
+ return false;
+
+ stmmac_set_tx_owner(priv, p);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ return true;
+}
+
/**
* stmmac_tso_allocator - close entry point of the driver
* @priv: driver private structure
@@ -2829,12 +2904,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
- unsigned int first_entry;
struct stmmac_tx_queue *tx_q;
+ unsigned int first_entry;
int tmp_pay_len = 0;
u32 pay_len, mss;
u8 proto_hdr_len;
dma_addr_t des;
+ bool has_vlan;
int i;
tx_q = &priv->tx_queue[queue];
@@ -2876,12 +2952,18 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data_len);
}
+ /* Check if VLAN can be inserted by HW */
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
+
first_entry = tx_q->cur_tx;
WARN_ON(tx_q->tx_skbuff[first_entry]);
desc = tx_q->dma_tx + first_entry;
first = desc;
+ if (has_vlan)
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
+
/* first descriptor: fill Headers on Buf1 */
des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
@@ -2960,6 +3042,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_set_ic_bit++;
}
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+
skb_tx_timestamp(skb);
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
@@ -3038,6 +3123,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int first_entry;
unsigned int enh_desc;
dma_addr_t des;
+ bool has_vlan;
int entry;
tx_q = &priv->tx_queue[queue];
@@ -3063,6 +3149,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
+ /* Check if VLAN can be inserted by HW */
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
+
entry = tx_q->cur_tx;
first_entry = entry;
WARN_ON(tx_q->tx_skbuff[first_entry]);
@@ -3076,6 +3165,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first = desc;
+ if (has_vlan)
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
+
enh_desc = priv->plat->enh_desc;
/* To program the descriptors according to the size of the frame */
if (enh_desc)
@@ -3173,6 +3265,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_set_ic_bit++;
}
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+
skb_tx_timestamp(skb);
/* Ready to fill the first descriptor and set the OWN bit w/o any
@@ -3292,6 +3387,17 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
break;
}
+ if (priv->sph && !buf->sec_page) {
+ buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ if (!buf->sec_page)
+ break;
+
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+
+ dma_sync_single_for_device(priv->device, buf->sec_addr,
+ len, DMA_FROM_DEVICE);
+ }
+
buf->addr = page_pool_get_dma_addr(buf->page);
/* Sync whole allocation to device. This will invalidate old
@@ -3301,6 +3407,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
DMA_FROM_DEVICE);
stmmac_set_desc_addr(priv, p, buf->addr);
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
stmmac_refill_desc3(priv, rx_q, p);
rx_q->rx_count_frames++;
@@ -3330,9 +3437,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned int count = 0, error = 0, len = 0;
+ int status = 0, coe = priv->hw->rx_csum;
unsigned int next_entry = rx_q->cur_rx;
- int coe = priv->hw->rx_csum;
- unsigned int count = 0;
+ struct sk_buff *skb = NULL;
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -3346,10 +3454,30 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
+ unsigned int hlen = 0, prev_len = 0;
+ enum pkt_hash_types hash_type;
struct stmmac_rx_buffer *buf;
struct dma_desc *np, *p;
- int entry, status;
+ unsigned int sec_len;
+ int entry;
+ u32 hash;
+
+ if (!count && rx_q->state_saved) {
+ skb = rx_q->state.skb;
+ error = rx_q->state.error;
+ len = rx_q->state.len;
+ } else {
+ rx_q->state_saved = false;
+ skb = NULL;
+ error = 0;
+ len = 0;
+ }
+ if (count >= limit)
+ break;
+
+read_again:
+ sec_len = 0;
entry = next_entry;
buf = &rx_q->buf_pool[entry];
@@ -3376,6 +3504,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
np = rx_q->dma_rx + next_entry;
prefetch(np);
+ prefetch(page_address(buf->page));
if (priv->extend_desc)
stmmac_rx_extended_status(priv, &priv->dev->stats,
@@ -3384,26 +3513,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
page_pool_recycle_direct(rx_q->page_pool, buf->page);
priv->dev->stats.rx_errors++;
buf->page = NULL;
- } else {
- struct sk_buff *skb;
- int frame_len;
- unsigned int des;
+ error = 1;
+ }
- stmmac_get_desc_addr(priv, p, &des);
- frame_len = stmmac_get_rx_frame_len(priv, p, coe);
+ if (unlikely(error && (status & rx_not_ls)))
+ goto read_again;
+ if (unlikely(error)) {
+ dev_kfree_skb(skb);
+ continue;
+ }
- /* If frame length is greater than skb buffer size
- * (preallocated during init) then the packet is
- * ignored
- */
- if (frame_len > priv->dma_buf_sz) {
- if (net_ratelimit())
- netdev_err(priv->dev,
- "len %d larger than size (%d)\n",
- frame_len, priv->dma_buf_sz);
- priv->dev->stats.rx_length_errors++;
- continue;
- }
+ /* Buffer is good. Go on. */
+
+ if (likely(status & rx_not_ls)) {
+ len += priv->dma_buf_sz;
+ } else {
+ prev_len = len;
+ len = stmmac_get_rx_frame_len(priv, p, coe);
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
* Type frames (LLC/LLC-SNAP)
@@ -3414,53 +3540,97 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
*/
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
unlikely(status != llc_snap))
- frame_len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ }
+
+ if (!skb) {
+ int ret = stmmac_get_rx_header_len(priv, p, &hlen);
- if (netif_msg_rx_status(priv)) {
- netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
- p, entry, des);
- netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
- frame_len, status);
+ if (priv->sph && !ret && (hlen > 0)) {
+ sec_len = len;
+ if (!(status & rx_not_ls))
+ sec_len = sec_len - hlen;
+ len = hlen;
+
+ prefetch(page_address(buf->sec_page));
+ priv->xstats.rx_split_hdr_pkt_n++;
}
- skb = netdev_alloc_skb_ip_align(priv->dev, frame_len);
- if (unlikely(!skb)) {
+ skb = napi_alloc_skb(&ch->rx_napi, len);
+ if (!skb) {
priv->dev->stats.rx_dropped++;
continue;
}
- dma_sync_single_for_cpu(priv->device, buf->addr,
- frame_len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(priv->device, buf->addr, len,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, page_address(buf->page),
- frame_len);
- skb_put(skb, frame_len);
-
- if (netif_msg_pktdata(priv)) {
- netdev_dbg(priv->dev, "frame received (%dbytes)",
- frame_len);
- print_pkt(skb->data, frame_len);
- }
+ len);
+ skb_put(skb, len);
- stmmac_get_rx_hwtstamp(priv, p, np, skb);
+ /* Data payload copied into SKB, page ready for recycle */
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ } else {
+ unsigned int buf_len = len - prev_len;
- stmmac_rx_vlan(priv->dev, skb);
+ if (likely(status & rx_not_ls))
+ buf_len = priv->dma_buf_sz;
- skb->protocol = eth_type_trans(skb, priv->dev);
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ buf_len, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf->page, 0, buf_len,
+ priv->dma_buf_sz);
- if (unlikely(!coe))
- skb_checksum_none_assert(skb);
- else
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* Data payload appended into SKB */
+ page_pool_release_page(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ }
- napi_gro_receive(&ch->rx_napi, skb);
+ if (sec_len > 0) {
+ dma_sync_single_for_cpu(priv->device, buf->sec_addr,
+ sec_len, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf->sec_page, 0, sec_len,
+ priv->dma_buf_sz);
- /* Data payload copied into SKB, page ready for recycle */
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
- buf->page = NULL;
+ len += sec_len;
- priv->dev->stats.rx_packets++;
- priv->dev->stats.rx_bytes += frame_len;
+ /* Data payload appended into SKB */
+ page_pool_release_page(rx_q->page_pool, buf->sec_page);
+ buf->sec_page = NULL;
}
+
+ if (likely(status & rx_not_ls))
+ goto read_again;
+
+ /* Got entire packet into SKB. Finish it. */
+
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
+ stmmac_rx_vlan(priv->dev, skb);
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (unlikely(!coe))
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
+ skb_set_hash(skb, hash, hash_type);
+
+ skb_record_rx_queue(skb, queue);
+ napi_gro_receive(&ch->rx_napi, skb);
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += len;
+ }
+
+ if (status & rx_not_ls) {
+ rx_q->state_saved = true;
+ rx_q->state.skb = skb;
+ rx_q->state.error = error;
+ rx_q->state.len = len;
}
stmmac_rx_refill(priv, queue);
@@ -3606,6 +3776,8 @@ static int stmmac_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct stmmac_priv *priv = netdev_priv(netdev);
+ bool sph_en;
+ u32 chan;
/* Keep the COE Type in case of csum is supporting */
if (features & NETIF_F_RXCSUM)
@@ -3617,6 +3789,10 @@ static int stmmac_set_features(struct net_device *netdev,
*/
stmmac_rx_ipc(priv, priv->hw);
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+
return 0;
}
@@ -3962,54 +4138,102 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
-static int stmmac_init_fs(struct net_device *dev)
+static void stmmac_init_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
/* Create per netdev entries */
priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
- if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
- netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
+ /* Entry to report DMA RX/TX rings */
+ debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
+ &stmmac_rings_status_fops);
- return -ENOMEM;
+ /* Entry to report the DMA HW features */
+ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
+ &stmmac_dma_cap_fops);
+}
+
+static void stmmac_exit_fs(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ debugfs_remove_recursive(priv->dbgfs_dir);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static u32 stmmac_vid_crc32_le(__le16 vid_le)
+{
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ u32 crc = ~0x0;
+ u32 temp = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= 0xedb88320;
}
- /* Entry to report DMA RX/TX rings */
- priv->dbgfs_rings_status =
- debugfs_create_file("descriptors_status", 0444,
- priv->dbgfs_dir, dev,
- &stmmac_rings_status_fops);
+ return crc;
+}
- if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
- netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
- debugfs_remove_recursive(priv->dbgfs_dir);
+static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
+{
+ u32 crc, hash = 0;
+ u16 vid;
- return -ENOMEM;
+ for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
+ __le16 vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
+ hash |= (1 << crc);
}
- /* Entry to report the DMA HW features */
- priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
- priv->dbgfs_dir,
- dev, &stmmac_dma_cap_fops);
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, is_double);
+}
+
+static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ bool is_double = false;
+ int ret;
- if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
- netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
- debugfs_remove_recursive(priv->dbgfs_dir);
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
+ is_double = true;
- return -ENOMEM;
+ set_bit(vid, priv->active_vlans);
+ ret = stmmac_vlan_update(priv, is_double);
+ if (ret) {
+ clear_bit(vid, priv->active_vlans);
+ return ret;
}
- return 0;
+ return ret;
}
-static void stmmac_exit_fs(struct net_device *dev)
+static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
- struct stmmac_priv *priv = netdev_priv(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ bool is_double = false;
- debugfs_remove_recursive(priv->dbgfs_dir);
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
+ is_double = true;
+
+ clear_bit(vid, priv->active_vlans);
+ return stmmac_vlan_update(priv, is_double);
}
-#endif /* CONFIG_DEBUG_FS */
static const struct net_device_ops stmmac_netdev_ops = {
.ndo_open = stmmac_open,
@@ -4027,6 +4251,8 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_poll_controller = stmmac_poll_controller,
#endif
.ndo_set_mac_address = stmmac_set_mac_address,
+ .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
};
static void stmmac_reset_subtask(struct stmmac_priv *priv)
@@ -4175,8 +4401,8 @@ int stmmac_dvr_probe(struct device *device,
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
- u32 queue, maxq;
- int ret = 0;
+ u32 queue, rxq, maxq;
+ int i, ret = 0;
ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
@@ -4259,6 +4485,12 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "TSO feature enabled\n");
}
+ if (priv->dma_cap.sphen) {
+ ndev->hw_features |= NETIF_F_GRO;
+ priv->sph = true;
+ dev_info(priv->device, "SPH feature enabled\n");
+ }
+
if (priv->dma_cap.addr64) {
ret = dma_set_mask_and_coherent(device,
DMA_BIT_MASK(priv->dma_cap.addr64));
@@ -4281,9 +4513,27 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
+ if (priv->dma_cap.vlhash) {
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ }
+ if (priv->dma_cap.vlins) {
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+ if (priv->dma_cap.dvlan)
+ ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
+ }
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
+ /* Initialize RSS */
+ rxq = priv->plat->rx_queues_to_use;
+ netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
+
+ if (priv->dma_cap.rssen && priv->plat->rss_en)
+ ndev->features |= NETIF_F_RXHASH;
+
/* MTU range: 46 - hw-specific max */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
@@ -4368,10 +4618,7 @@ int stmmac_dvr_probe(struct device *device,
}
#ifdef CONFIG_DEBUG_FS
- ret = stmmac_init_fs(ndev);
- if (ret < 0)
- netdev_warn(priv->dev, "%s: failed debugFS registration\n",
- __func__);
+ stmmac_init_fs(ndev);
#endif
return ret;
@@ -4617,16 +4864,8 @@ static int __init stmmac_init(void)
{
#ifdef CONFIG_DEBUG_FS
/* Create debugfs main directory if it doesn't exist yet */
- if (!stmmac_fs_dir) {
+ if (!stmmac_fs_dir)
stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
-
- if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
- pr_err("ERROR %s, debugfs create directory failed\n",
- STMMAC_RESOURCE_NAME);
-
- return -ENOMEM;
- }
- }
#endif
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 4304c1abc5d1..40c42637ad75 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -348,7 +348,9 @@ int stmmac_mdio_register(struct net_device *ndev)
max_addr = PHY_MAX_ADDR;
}
- new_bus->reset = &stmmac_mdio_reset;
+ if (mdio_bus_data->needs_reset)
+ new_bus->reset = &stmmac_mdio_reset;
+
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
new_bus->name, priv->plat->bus_id);
new_bus->priv = ndev;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 86f9c07a38cf..d5d08e11c353 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -63,6 +63,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->has_gmac = 1;
plat->force_sf_dma_mode = 1;
+ plat->mdio_bus_data->needs_reset = true;
plat->mdio_bus_data->phy_mask = 0;
/* Set default value for multicast hash bins */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 154daf4d1072..eaf8f08f2e91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -342,10 +342,16 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
mdio = true;
}
- if (mdio)
+ if (mdio) {
plat->mdio_bus_data =
devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
+ plat->mdio_bus_data->needs_reset = true;
+ }
+
return 0;
}
@@ -522,13 +528,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
}
/* clock setup */
- plat->stmmac_clk = devm_clk_get(&pdev->dev,
- STMMAC_RESOURCE_NAME);
- if (IS_ERR(plat->stmmac_clk)) {
- dev_warn(&pdev->dev, "Cannot get CSR clock\n");
- plat->stmmac_clk = NULL;
+ if (!of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) {
+ plat->stmmac_clk = devm_clk_get(&pdev->dev,
+ STMMAC_RESOURCE_NAME);
+ if (IS_ERR(plat->stmmac_clk)) {
+ dev_warn(&pdev->dev, "Cannot get CSR clock\n");
+ plat->stmmac_clk = NULL;
+ }
+ clk_prepare_enable(plat->stmmac_clk);
}
- clk_prepare_enable(plat->stmmac_clk);
plat->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(plat->pclk)) {
@@ -609,13 +617,8 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
* probe if needed before we went too far with resource allocation.
*/
stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
- if (stmmac_res->irq < 0) {
- if (stmmac_res->irq != -EPROBE_DEFER) {
- dev_err(&pdev->dev,
- "MAC IRQ configuration information not found\n");
- }
+ if (stmmac_res->irq < 0)
return stmmac_res->irq;
- }
/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
* The external wake up irq can be passed through the platform code
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index a97b1ea76438..ecc8602c6799 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -11,8 +11,10 @@
#include <linux/ip.h>
#include <linux/phy.h>
#include <linux/udp.h>
+#include <net/pkt_cls.h>
#include <net/tcp.h>
#include <net/udp.h>
+#include <net/tc_act/tc_gact.h>
#include "stmmac.h"
struct stmmachdr {
@@ -43,6 +45,7 @@ struct stmmac_packet_attrs {
int size;
int remove_sa;
u8 id;
+ int sarc;
};
static u8 stmmac_test_next_id;
@@ -228,8 +231,11 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
goto out;
}
- if (tpriv->packet->src) {
- if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
+ if (tpriv->packet->sarc) {
+ if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest))
+ goto out;
+ } else if (tpriv->packet->src) {
+ if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
goto out;
}
@@ -290,7 +296,9 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv,
tpriv->pt.dev = priv->dev;
tpriv->pt.af_packet_priv = tpriv;
tpriv->packet = attr;
- dev_add_pack(&tpriv->pt);
+
+ if (!attr->dont_wait)
+ dev_add_pack(&tpriv->pt);
skb = stmmac_test_get_udp_skb(priv, attr);
if (!skb) {
@@ -313,7 +321,8 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv,
ret = !tpriv->ok;
cleanup:
- dev_remove_pack(&tpriv->pt);
+ if (!attr->dont_wait)
+ dev_remove_pack(&tpriv->pt);
kfree(tpriv);
return ret;
}
@@ -700,6 +709,465 @@ cleanup:
return ret;
}
+static int stmmac_test_rss(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+
+ if (!priv->dma_cap.rssen || !priv->rss.enable)
+ return -EOPNOTSUPP;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.exp_hash = true;
+ attr.sport = 0x321;
+ attr.dport = 0x123;
+
+ return __stmmac_test_loopback(priv, &attr);
+}
+
+static int stmmac_test_vlan_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ struct stmmachdr *shdr;
+ struct ethhdr *ehdr;
+ struct udphdr *uhdr;
+ struct iphdr *ihdr;
+ u16 proto;
+
+ proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q;
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ if (skb_linearize(skb))
+ goto out;
+ if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
+ goto out;
+ if (tpriv->vlan_id) {
+ if (skb->vlan_proto != htons(proto))
+ goto out;
+ if (skb->vlan_tci != tpriv->vlan_id)
+ goto out;
+ }
+
+ ehdr = (struct ethhdr *)skb_mac_header(skb);
+ if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+ goto out;
+
+ ihdr = ip_hdr(skb);
+ if (tpriv->double_vlan)
+ ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
+ if (ihdr->protocol != IPPROTO_UDP)
+ goto out;
+
+ uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
+ if (uhdr->dest != htons(tpriv->packet->dport))
+ goto out;
+
+ shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
+ if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
+ goto out;
+
+ tpriv->ok = true;
+ complete(&tpriv->comp);
+
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0, i;
+
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = htons(ETH_P_IP);
+ tpriv->pt.func = stmmac_test_vlan_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+
+ /*
+ * As we use HASH filtering, false positives may appear. This is a
+ * specially chosen ID so that adjacent IDs (+4) have different
+ * HASH values.
+ */
+ tpriv->vlan_id = 0x123;
+ dev_add_pack(&tpriv->pt);
+
+ ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
+ if (ret)
+ goto cleanup;
+
+ for (i = 0; i < 4; i++) {
+ attr.vlan = 1;
+ attr.vlan_id_out = tpriv->vlan_id + i;
+ attr.dst = priv->dev->dev_addr;
+ attr.sport = 9;
+ attr.dport = 9;
+
+ skb = stmmac_test_get_udp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto vlan_del;
+ }
+
+ skb_set_queue_mapping(skb, 0);
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ goto vlan_del;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = !tpriv->ok;
+ if (ret && !i) {
+ goto vlan_del;
+ } else if (!ret && i) {
+ ret = -1;
+ goto vlan_del;
+ } else {
+ ret = 0;
+ }
+
+ tpriv->ok = false;
+ }
+
+vlan_del:
+ vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0, i;
+
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ tpriv->double_vlan = true;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = htons(ETH_P_8021Q);
+ tpriv->pt.func = stmmac_test_vlan_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+
+ /*
+ * As we use HASH filtering, false positives may appear. This is a
+ * specially chosen ID so that adjacent IDs (+4) have different
+ * HASH values.
+ */
+ tpriv->vlan_id = 0x123;
+ dev_add_pack(&tpriv->pt);
+
+ ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
+ if (ret)
+ goto cleanup;
+
+ for (i = 0; i < 4; i++) {
+ attr.vlan = 2;
+ attr.vlan_id_out = tpriv->vlan_id + i;
+ attr.dst = priv->dev->dev_addr;
+ attr.sport = 9;
+ attr.dport = 9;
+
+ skb = stmmac_test_get_udp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto vlan_del;
+ }
+
+ skb_set_queue_mapping(skb, 0);
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ goto vlan_del;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = !tpriv->ok;
+ if (ret && !i) {
+ goto vlan_del;
+ } else if (!ret && i) {
+ ret = -1;
+ goto vlan_del;
+ } else {
+ ret = 0;
+ }
+
+ tpriv->ok = false;
+ }
+
+vlan_del:
+ vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+#ifdef CONFIG_NET_CLS_ACT
+static int stmmac_test_rxp(struct stmmac_priv *priv)
+{
+ unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
+ struct tc_cls_u32_offload cls_u32 = { };
+ struct stmmac_packet_attrs attr = { };
+ struct tc_action **actions, *act;
+ struct tc_u32_sel *sel;
+ struct tcf_exts *exts;
+ int ret, i, nk = 1;
+
+ if (!tc_can_offload(priv->dev))
+ return -EOPNOTSUPP;
+ if (!priv->dma_cap.frpsel)
+ return -EOPNOTSUPP;
+
+ sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL);
+ if (!sel)
+ return -ENOMEM;
+
+ exts = kzalloc(sizeof(*exts), GFP_KERNEL);
+ if (!exts) {
+ ret = -ENOMEM;
+ goto cleanup_sel;
+ }
+
+ actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
+ if (!actions) {
+ ret = -ENOMEM;
+ goto cleanup_exts;
+ }
+
+ act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
+ if (!act) {
+ ret = -ENOMEM;
+ goto cleanup_actions;
+ }
+
+ cls_u32.command = TC_CLSU32_NEW_KNODE;
+ cls_u32.common.chain_index = 0;
+ cls_u32.common.protocol = htons(ETH_P_ALL);
+ cls_u32.knode.exts = exts;
+ cls_u32.knode.sel = sel;
+ cls_u32.knode.handle = 0x123;
+
+ exts->nr_actions = nk;
+ exts->actions = actions;
+ for (i = 0; i < nk; i++) {
+ struct tcf_gact *gact = to_gact(&act[i]);
+
+ actions[i] = &act[i];
+ gact->tcf_action = TC_ACT_SHOT;
+ }
+
+ sel->nkeys = nk;
+ sel->offshift = 0;
+ sel->keys[0].off = 6;
+ sel->keys[0].val = htonl(0xdeadbeef);
+ sel->keys[0].mask = ~0x0;
+
+ ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
+ if (ret)
+ goto cleanup_act;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.src = addr;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = !ret; /* Shall NOT receive packet */
+
+ cls_u32.command = TC_CLSU32_DELETE_KNODE;
+ stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
+
+cleanup_act:
+ kfree(act);
+cleanup_actions:
+ kfree(actions);
+cleanup_exts:
+ kfree(exts);
+cleanup_sel:
+ kfree(sel);
+ return ret;
+}
+#else
+static int stmmac_test_rxp(struct stmmac_priv *priv)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static int stmmac_test_desc_sai(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.remove_sa = true;
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ priv->sarc_type = 0x1;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ priv->sarc_type = 0x0;
+ return ret;
+}
+
+static int stmmac_test_desc_sar(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ priv->sarc_type = 0x2;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ priv->sarc_type = 0x0;
+ return ret;
+}
+
+static int stmmac_test_reg_sai(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.remove_sa = true;
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2))
+ return -EOPNOTSUPP;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
+ return ret;
+}
+
+static int stmmac_test_reg_sar(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3))
+ return -EOPNOTSUPP;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
+ return ret;
+}
+
+static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
+{
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0;
+ u16 proto;
+
+ if (!priv->dma_cap.vlins)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ proto = svlan ? ETH_P_8021AD : ETH_P_8021Q;
+
+ tpriv->ok = false;
+ tpriv->double_vlan = svlan;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP);
+ tpriv->pt.func = stmmac_test_vlan_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+ tpriv->vlan_id = 0x123;
+ dev_add_pack(&tpriv->pt);
+
+ ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = priv->dev->dev_addr;
+
+ skb = stmmac_test_get_udp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto vlan_del;
+ }
+
+ __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
+ skb->protocol = htons(proto);
+
+ skb_set_queue_mapping(skb, 0);
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ goto vlan_del;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = tpriv->ok ? 0 : -ETIMEDOUT;
+
+vlan_del:
+ vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id);
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int stmmac_test_vlanoff(struct stmmac_priv *priv)
+{
+ return stmmac_test_vlanoff_common(priv, false);
+}
+
+static int stmmac_test_svlanoff(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.dvlan)
+ return -EOPNOTSUPP;
+ return stmmac_test_vlanoff_common(priv, true);
+}
+
#define STMMAC_LOOPBACK_NONE 0
#define STMMAC_LOOPBACK_MAC 1
#define STMMAC_LOOPBACK_PHY 2
@@ -745,6 +1213,46 @@ static const struct stmmac_test {
.name = "Flow Control ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_flowctrl,
+ }, {
+ .name = "RSS ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_rss,
+ }, {
+ .name = "VLAN Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_vlanfilt,
+ }, {
+ .name = "Double VLAN Filtering",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_dvlanfilt,
+ }, {
+ .name = "Flexible RX Parser ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_rxp,
+ }, {
+ .name = "SA Insertion (desc) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_desc_sai,
+ }, {
+ .name = "SA Replacement (desc)",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_desc_sar,
+ }, {
+ .name = "SA Insertion (reg) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_reg_sai,
+ }, {
+ .name = "SA Replacement (reg)",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_reg_sar,
+ }, {
+ .name = "VLAN TX Insertion ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_vlanoff,
+ }, {
+ .name = "SVLAN TX Insertion ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_svlanoff,
},
};
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 6fc05c106afc..c91876f8c536 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -2034,7 +2034,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
__skb_frag_set_page(frag, page->buffer);
__skb_frag_ref(frag);
- frag->page_offset = off;
+ skb_frag_off_set(frag, off);
skb_frag_size_set(frag, hlen - swivel);
/* any more data? */
@@ -2058,7 +2058,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
__skb_frag_set_page(frag, page->buffer);
__skb_frag_ref(frag);
- frag->page_offset = 0;
+ skb_frag_off_set(frag, 0);
skb_frag_size_set(frag, hlen);
RX_USED_ADD(page, hlen + cp->crc_size);
}
@@ -2816,7 +2816,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
DMA_TO_DEVICE);
- tabort = cas_calc_tabort(cp, fragp->page_offset, len);
+ tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
if (unlikely(tabort)) {
void *addr;
@@ -2827,7 +2827,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
addr = cas_page_map(skb_frag_page(fragp));
memcpy(tx_tiny_buf(cp, ring, entry),
- addr + fragp->page_offset + len - tabort,
+ addr + skb_frag_off(fragp) + len - tabort,
tabort);
cas_page_unmap(addr);
mapping = tx_tiny_map(cp, ring, entry, tentry);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 0bc5863bffeb..f5fd1f3c07cc 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6695,7 +6695,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
len = skb_frag_size(frag);
mapping = np->ops->map_page(np->device, skb_frag_page(frag),
- frag->page_offset, len,
+ skb_frag_off(frag), len,
DMA_TO_DEVICE);
rp->tx_buffs[prod].skb = NULL;
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index baa3088b475c..8b94d9ad9e2b 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1088,7 +1088,7 @@ static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
vaddr = kmap_atomic(skb_frag_page(f));
blen = skb_frag_size(f);
blen += 8 - (blen & 7);
- err = ldc_map_single(lp, vaddr + f->page_offset,
+ err = ldc_map_single(lp, vaddr + skb_frag_off(f),
blen, cookies + nc, ncookies - nc,
map_perm);
kunmap_atomic(vaddr);
@@ -1124,7 +1124,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- docopy |= f->page_offset & 7;
+ docopy |= skb_frag_off(f) & 7;
}
if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
skb_tailroom(skb) < pad ||
@@ -1532,8 +1532,7 @@ out_dropped:
else if (port)
del_timer(&port->clean_timer);
rcu_read_unlock();
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
vnet_free_skbs(freeskbs);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
index 031cf9c3435a..8c4195a9a2cc 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
@@ -503,7 +503,7 @@ static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
struct xlgmac_desc_data *desc_data;
unsigned int offset, datalen, len;
struct xlgmac_pkt_info *pkt_info;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int tso, vlan;
dma_addr_t skb_dma;
unsigned int i;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index 1f8e9601592a..a1f5a1e61040 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -116,7 +116,7 @@ static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
struct sk_buff *skb,
struct xlgmac_pkt_info *pkt_info)
{
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int context_desc;
unsigned int len;
unsigned int i;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 5d6960fe3309..0f8a924fc60c 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1501,7 +1501,7 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
bdx_tx_db_inc_wptr(db);
for (i = 0; i < nr_frags; i++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
frag = &skb_shinfo(skb)->frags[i];
db->wptr->len = skb_frag_size(frag);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a46b8b2e44e1..f298d714efd6 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2764,7 +2764,7 @@ static int cpsw_probe(struct platform_device *pdev)
struct net_device *ndev;
struct cpsw_priv *priv;
void __iomem *ss_regs;
- struct resource *res, *ss_res;
+ struct resource *ss_res;
struct gpio_descs *mode;
const struct soc_device_attribute *soc;
struct cpsw_common *cpsw;
@@ -2799,8 +2799,7 @@ static int cpsw_probe(struct platform_device *pdev)
return PTR_ERR(ss_regs);
cpsw->regs = ss_regs;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- cpsw->wr_regs = devm_ioremap_resource(dev, res);
+ cpsw->wr_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(cpsw->wr_regs))
return PTR_ERR(cpsw->wr_regs);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 642843945031..1b2702f74455 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1116,7 +1116,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct page *page = skb_frag_page(frag);
- u32 page_offset = frag->page_offset;
+ u32 page_offset = skb_frag_off(frag);
u32 buf_len = skb_frag_size(frag);
dma_addr_t desc_dma;
u32 desc_dma_32;
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index ab55416a10fa..ed12dbd156f0 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1127,15 +1127,13 @@ static int rhine_init_one_platform(struct platform_device *pdev)
const struct of_device_id *match;
const u32 *quirks;
int irq;
- struct resource *res;
void __iomem *ioaddr;
match = of_match_device(rhine_of_tbl, &pdev->dev);
if (!match)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ioaddr))
return PTR_ERR(ioaddr);
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 9303aeb2595f..4476491b58f9 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -961,8 +961,7 @@ static int epp_close(struct net_device *dev)
parport_write_control(pp, 0); /* reset the adapter */
parport_release(bc->pdev);
parport_unregister_device(bc->pdev);
- if (bc->skb)
- dev_kfree_skb(bc->skb);
+ dev_kfree_skb(bc->skb);
bc->skb = NULL;
printk(KERN_INFO "%s: close epp at iobase 0x%lx irq %u\n",
bc_drvname, dev->base_addr, dev->irq);
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index c6f83e0df0a3..df495b5595f5 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -475,8 +475,7 @@ static int hdlcdrv_close(struct net_device *dev)
if (s->ops && s->ops->close)
i = s->ops->close(dev);
- if (s->skb)
- dev_kfree_skb(s->skb);
+ dev_kfree_skb(s->skb);
s->skb = NULL;
s->opened = 0;
return i;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 442018ccd65e..c5bfa19ddb93 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -25,6 +25,7 @@
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/jiffies.h>
+#include <linux/refcount.h>
#include <net/ax25.h>
@@ -70,7 +71,7 @@ struct mkiss {
#define CRC_MODE_FLEX_TEST 3
#define CRC_MODE_SMACK_TEST 4
- atomic_t refcnt;
+ refcount_t refcnt;
struct completion dead;
};
@@ -668,7 +669,7 @@ static struct mkiss *mkiss_get(struct tty_struct *tty)
read_lock(&disc_data_lock);
ax = tty->disc_data;
if (ax)
- atomic_inc(&ax->refcnt);
+ refcount_inc(&ax->refcnt);
read_unlock(&disc_data_lock);
return ax;
@@ -676,7 +677,7 @@ static struct mkiss *mkiss_get(struct tty_struct *tty)
static void mkiss_put(struct mkiss *ax)
{
- if (atomic_dec_and_test(&ax->refcnt))
+ if (refcount_dec_and_test(&ax->refcnt))
complete(&ax->dead);
}
@@ -704,7 +705,7 @@ static int mkiss_open(struct tty_struct *tty)
ax->dev = dev;
spin_lock_init(&ax->buflock);
- atomic_set(&ax->refcnt, 1);
+ refcount_set(&ax->refcnt, 1);
init_completion(&ax->dead);
ax->tty = tty;
@@ -784,7 +785,7 @@ static void mkiss_close(struct tty_struct *tty)
* We have now ensured that nobody can start using ap from now on, but
* we have to wait for all existing users to finish.
*/
- if (!atomic_dec_and_test(&ax->refcnt))
+ if (!refcount_dec_and_test(&ax->refcnt))
wait_for_completion(&ax->dead);
/*
* Halt the transmit queue so that a new transmit cannot scribble
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index e8fce6d715ef..0a6cd2f1111f 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -435,7 +435,7 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
slots_used += fill_pg_buf(skb_frag_page(frag),
- frag->page_offset,
+ skb_frag_off(frag),
skb_frag_size(frag), &pb[slots_used]);
}
return slots_used;
@@ -449,7 +449,7 @@ static int count_skb_frag_slots(struct sk_buff *skb)
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
unsigned long size = skb_frag_size(frag);
- unsigned long offset = frag->page_offset;
+ unsigned long offset = skb_frag_off(frag);
/* Skip unused frames from start of page */
offset &= ~PAGE_MASK;
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index c9392d70e639..5a37514e4234 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1158,23 +1158,16 @@ static int adf7242_stats_show(struct seq_file *file, void *offset)
return 0;
}
-static int adf7242_debugfs_init(struct adf7242_local *lp)
+static void adf7242_debugfs_init(struct adf7242_local *lp)
{
char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-";
- struct dentry *stats;
strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
- if (IS_ERR_OR_NULL(lp->debugfs_root))
- return PTR_ERR_OR_ZERO(lp->debugfs_root);
- stats = debugfs_create_devm_seqfile(&lp->spi->dev, "status",
- lp->debugfs_root,
- adf7242_stats_show);
- return PTR_ERR_OR_ZERO(stats);
-
- return 0;
+ debugfs_create_devm_seqfile(&lp->spi->dev, "status", lp->debugfs_root,
+ adf7242_stats_show);
}
static const s32 adf7242_powers[] = {
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 595cf7e2a651..7d67f41387f5 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1626,24 +1626,16 @@ static int at86rf230_stats_show(struct seq_file *file, void *offset)
}
DEFINE_SHOW_ATTRIBUTE(at86rf230_stats);
-static int at86rf230_debugfs_init(struct at86rf230_local *lp)
+static void at86rf230_debugfs_init(struct at86rf230_local *lp)
{
char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "at86rf230-";
- struct dentry *stats;
strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
at86rf230_debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
- if (!at86rf230_debugfs_root)
- return -ENOMEM;
-
- stats = debugfs_create_file("trac_stats", 0444,
- at86rf230_debugfs_root, lp,
- &at86rf230_stats_fops);
- if (!stats)
- return -ENOMEM;
- return 0;
+ debugfs_create_file("trac_stats", 0444, at86rf230_debugfs_root, lp,
+ &at86rf230_stats_fops);
}
static void at86rf230_debugfs_remove(void)
@@ -1651,7 +1643,7 @@ static void at86rf230_debugfs_remove(void)
debugfs_remove_recursive(at86rf230_debugfs_root);
}
#else
-static int at86rf230_debugfs_init(struct at86rf230_local *lp) { return 0; }
+static void at86rf230_debugfs_init(struct at86rf230_local *lp) { }
static void at86rf230_debugfs_remove(void) { }
#endif
@@ -1751,9 +1743,7 @@ static int at86rf230_probe(struct spi_device *spi)
/* going into sleep by default */
at86rf230_sleep(lp);
- rc = at86rf230_debugfs_init(lp);
- if (rc)
- goto free_dev;
+ at86rf230_debugfs_init(lp);
rc = ieee802154_register_hw(lp->hw);
if (rc)
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index b188fce3f641..11402dc347db 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -3019,14 +3019,7 @@ static int ca8210_test_interface_init(struct ca8210_priv *priv)
priv,
&test_int_fops
);
- if (IS_ERR(test->ca8210_dfs_spi_int)) {
- dev_err(
- &priv->spi->dev,
- "Error %ld when creating debugfs node\n",
- PTR_ERR(test->ca8210_dfs_spi_int)
- );
- return PTR_ERR(test->ca8210_dfs_spi_int);
- }
+
debugfs_create_symlink("ca8210", NULL, node_name);
init_waitqueue_head(&test->readq);
return kfifo_alloc(
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1c96bed5a7c4..887bbba4631e 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -126,6 +126,7 @@ static int ipvlan_init(struct net_device *dev)
(phy_dev->state & IPVLAN_STATE_MASK);
dev->features = phy_dev->features & IPVLAN_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED;
+ dev->hw_enc_features |= dev->features;
dev->gso_max_size = phy_dev->gso_max_size;
dev->gso_max_segs = phy_dev->gso_max_segs;
dev->hard_header_len = phy_dev->hard_header_len;
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index bcc40a236624..39cdb6c18ec0 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -17,16 +17,60 @@
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/inet.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
#include <net/devlink.h>
+#include <net/ip.h>
+#include <uapi/linux/devlink.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/udp.h>
#include "netdevsim.h"
static struct dentry *nsim_dev_ddir;
+#define NSIM_DEV_DUMMY_REGION_SIZE (1024 * 32)
+
+static ssize_t nsim_dev_take_snapshot_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev *nsim_dev = file->private_data;
+ void *dummy_data;
+ int err;
+ u32 id;
+
+ dummy_data = kmalloc(NSIM_DEV_DUMMY_REGION_SIZE, GFP_KERNEL);
+ if (!dummy_data)
+ return -ENOMEM;
+
+ get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE);
+
+ id = devlink_region_shapshot_id_get(priv_to_devlink(nsim_dev));
+ err = devlink_region_snapshot_create(nsim_dev->dummy_region,
+ dummy_data, id, kfree);
+ if (err) {
+ pr_err("Failed to create region snapshot\n");
+ kfree(dummy_data);
+ return err;
+ }
+
+ return count;
+}
+
+static const struct file_operations nsim_dev_take_snapshot_fops = {
+ .open = simple_open,
+ .write = nsim_dev_take_snapshot_write,
+ .llseek = generic_file_llseek,
+};
+
static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
{
char dev_ddir_name[16];
@@ -40,6 +84,12 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
return PTR_ERR_OR_ZERO(nsim_dev->ports_ddir) ?: -EINVAL;
debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir,
&nsim_dev->fw_update_status);
+ debugfs_create_u32("max_macs", 0600, nsim_dev->ddir,
+ &nsim_dev->max_macs);
+ debugfs_create_bool("test1", 0600, nsim_dev->ddir,
+ &nsim_dev->test1);
+ debugfs_create_file("take_snapshot", 0200, nsim_dev->ddir, nsim_dev,
+ &nsim_dev_take_snapshot_fops);
return 0;
}
@@ -193,6 +243,284 @@ out:
return err;
}
+enum nsim_devlink_param_id {
+ NSIM_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ NSIM_DEVLINK_PARAM_ID_TEST1,
+};
+
+static const struct devlink_param nsim_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(MAX_MACS,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL),
+ DEVLINK_PARAM_DRIVER(NSIM_DEVLINK_PARAM_ID_TEST1,
+ "test1", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL),
+};
+
+static void nsim_devlink_set_params_init_values(struct nsim_dev *nsim_dev,
+ struct devlink *devlink)
+{
+ union devlink_param_value value;
+
+ value.vu32 = nsim_dev->max_macs;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
+ value.vbool = nsim_dev->test1;
+ devlink_param_driverinit_value_set(devlink,
+ NSIM_DEVLINK_PARAM_ID_TEST1,
+ value);
+}
+
+static void nsim_devlink_param_load_driverinit_values(struct devlink *devlink)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ union devlink_param_value saved_value;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &saved_value);
+ if (!err)
+ nsim_dev->max_macs = saved_value.vu32;
+ err = devlink_param_driverinit_value_get(devlink,
+ NSIM_DEVLINK_PARAM_ID_TEST1,
+ &saved_value);
+ if (!err)
+ nsim_dev->test1 = saved_value.vbool;
+}
+
+#define NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX 16
+
+static int nsim_dev_dummy_region_init(struct nsim_dev *nsim_dev,
+ struct devlink *devlink)
+{
+ nsim_dev->dummy_region =
+ devlink_region_create(devlink, "dummy",
+ NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX,
+ NSIM_DEV_DUMMY_REGION_SIZE);
+ return PTR_ERR_OR_ZERO(nsim_dev->dummy_region);
+}
+
+static void nsim_dev_dummy_region_exit(struct nsim_dev *nsim_dev)
+{
+ devlink_region_destroy(nsim_dev->dummy_region);
+}
+
+struct nsim_trap_item {
+ void *trap_ctx;
+ enum devlink_trap_action action;
+};
+
+struct nsim_trap_data {
+ struct delayed_work trap_report_dw;
+ struct nsim_trap_item *trap_items_arr;
+ struct nsim_dev *nsim_dev;
+ spinlock_t trap_lock; /* Protects trap_items_arr */
+};
+
+/* All driver-specific traps must be documented in
+ * Documentation/networking/devlink-trap-netdevsim.rst
+ */
+enum {
+ NSIM_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX,
+ NSIM_TRAP_ID_FID_MISS,
+};
+
+#define NSIM_TRAP_NAME_FID_MISS "fid_miss"
+
+#define NSIM_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
+
+#define NSIM_TRAP_DROP(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ NSIM_TRAP_METADATA)
+#define NSIM_TRAP_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ NSIM_TRAP_METADATA)
+#define NSIM_TRAP_DRIVER_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(EXCEPTION, TRAP, NSIM_TRAP_ID_##_id, \
+ NSIM_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ NSIM_TRAP_METADATA)
+
+static const struct devlink_trap nsim_traps_arr[] = {
+ NSIM_TRAP_DROP(SMAC_MC, L2_DROPS),
+ NSIM_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
+ NSIM_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
+ NSIM_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS),
+ NSIM_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS),
+ NSIM_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
+ NSIM_TRAP_DRIVER_EXCEPTION(FID_MISS, L2_DROPS),
+ NSIM_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS),
+ NSIM_TRAP_EXCEPTION(TTL_ERROR, L3_DROPS),
+ NSIM_TRAP_DROP(TAIL_DROP, BUFFER_DROPS),
+};
+
+#define NSIM_TRAP_L4_DATA_LEN 100
+
+static struct sk_buff *nsim_dev_trap_skb_build(void)
+{
+ int tot_len, data_len = NSIM_TRAP_L4_DATA_LEN;
+ struct sk_buff *skb;
+ struct udphdr *udph;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+
+ skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+ tot_len = sizeof(struct iphdr) + sizeof(struct udphdr) + data_len;
+
+ eth = skb_put(skb, sizeof(struct ethhdr));
+ eth_random_addr(eth->h_dest);
+ eth_random_addr(eth->h_source);
+ eth->h_proto = htons(ETH_P_IP);
+ skb->protocol = htons(ETH_P_IP);
+
+ iph = skb_put(skb, sizeof(struct iphdr));
+ iph->protocol = IPPROTO_UDP;
+ iph->saddr = in_aton("192.0.2.1");
+ iph->daddr = in_aton("198.51.100.1");
+ iph->version = 0x4;
+ iph->frag_off = 0;
+ iph->ihl = 0x5;
+ iph->tot_len = htons(tot_len);
+ iph->ttl = 100;
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+ udph = skb_put_zero(skb, sizeof(struct udphdr) + data_len);
+ get_random_bytes(&udph->source, sizeof(u16));
+ get_random_bytes(&udph->dest, sizeof(u16));
+ udph->len = htons(sizeof(struct udphdr) + data_len);
+
+ return skb;
+}
+
+static void nsim_dev_trap_report(struct nsim_dev_port *nsim_dev_port)
+{
+ struct nsim_dev *nsim_dev = nsim_dev_port->ns->nsim_dev;
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
+ struct nsim_trap_data *nsim_trap_data;
+ int i;
+
+ nsim_trap_data = nsim_dev->trap_data;
+
+ spin_lock(&nsim_trap_data->trap_lock);
+ for (i = 0; i < ARRAY_SIZE(nsim_traps_arr); i++) {
+ struct nsim_trap_item *nsim_trap_item;
+ struct sk_buff *skb;
+
+ nsim_trap_item = &nsim_trap_data->trap_items_arr[i];
+ if (nsim_trap_item->action == DEVLINK_TRAP_ACTION_DROP)
+ continue;
+
+ skb = nsim_dev_trap_skb_build();
+ if (!skb)
+ continue;
+ skb->dev = nsim_dev_port->ns->netdev;
+
+ /* Trapped packets are usually passed to devlink in softIRQ,
+ * but in this case they are generated in a workqueue. Disable
+ * softIRQs to prevent lockdep from complaining about
+ * "incosistent lock state".
+ */
+ local_bh_disable();
+ devlink_trap_report(devlink, skb, nsim_trap_item->trap_ctx,
+ &nsim_dev_port->devlink_port);
+ local_bh_enable();
+ consume_skb(skb);
+ }
+ spin_unlock(&nsim_trap_data->trap_lock);
+}
+
+#define NSIM_TRAP_REPORT_INTERVAL_MS 100
+
+static void nsim_dev_trap_report_work(struct work_struct *work)
+{
+ struct nsim_trap_data *nsim_trap_data;
+ struct nsim_dev_port *nsim_dev_port;
+ struct nsim_dev *nsim_dev;
+
+ nsim_trap_data = container_of(work, struct nsim_trap_data,
+ trap_report_dw.work);
+ nsim_dev = nsim_trap_data->nsim_dev;
+
+ /* For each running port and enabled packet trap, generate a UDP
+ * packet with a random 5-tuple and report it.
+ */
+ mutex_lock(&nsim_dev->port_list_lock);
+ list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
+ if (!netif_running(nsim_dev_port->ns->netdev))
+ continue;
+
+ nsim_dev_trap_report(nsim_dev_port);
+ }
+ mutex_unlock(&nsim_dev->port_list_lock);
+
+ schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
+ msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
+}
+
+static int nsim_dev_traps_init(struct devlink *devlink)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ struct nsim_trap_data *nsim_trap_data;
+ int err;
+
+ nsim_trap_data = kzalloc(sizeof(*nsim_trap_data), GFP_KERNEL);
+ if (!nsim_trap_data)
+ return -ENOMEM;
+
+ nsim_trap_data->trap_items_arr = kcalloc(ARRAY_SIZE(nsim_traps_arr),
+ sizeof(struct nsim_trap_item),
+ GFP_KERNEL);
+ if (!nsim_trap_data->trap_items_arr) {
+ err = -ENOMEM;
+ goto err_trap_data_free;
+ }
+
+ /* The lock is used to protect the action state of the registered
+ * traps. The value is written by user and read in delayed work when
+ * iterating over all the traps.
+ */
+ spin_lock_init(&nsim_trap_data->trap_lock);
+ nsim_trap_data->nsim_dev = nsim_dev;
+ nsim_dev->trap_data = nsim_trap_data;
+
+ err = devlink_traps_register(devlink, nsim_traps_arr,
+ ARRAY_SIZE(nsim_traps_arr), NULL);
+ if (err)
+ goto err_trap_items_free;
+
+ INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
+ nsim_dev_trap_report_work);
+ schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
+ msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
+
+ return 0;
+
+err_trap_items_free:
+ kfree(nsim_trap_data->trap_items_arr);
+err_trap_data_free:
+ kfree(nsim_trap_data);
+ return err;
+}
+
+static void nsim_dev_traps_exit(struct devlink *devlink)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ cancel_delayed_work_sync(&nsim_dev->trap_data->trap_report_dw);
+ devlink_traps_unregister(devlink, nsim_traps_arr,
+ ARRAY_SIZE(nsim_traps_arr));
+ kfree(nsim_dev->trap_data->trap_items_arr);
+ kfree(nsim_dev->trap_data);
+}
+
static int nsim_dev_reload(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
@@ -214,6 +542,7 @@ static int nsim_dev_reload(struct devlink *devlink,
return err;
}
}
+ nsim_devlink_param_load_driverinit_values(devlink);
return 0;
}
@@ -258,11 +587,66 @@ static int nsim_dev_flash_update(struct devlink *devlink, const char *file_name,
return 0;
}
+static struct nsim_trap_item *
+nsim_dev_trap_item_lookup(struct nsim_dev *nsim_dev, u16 trap_id)
+{
+ struct nsim_trap_data *nsim_trap_data = nsim_dev->trap_data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nsim_traps_arr); i++) {
+ if (nsim_traps_arr[i].id == trap_id)
+ return &nsim_trap_data->trap_items_arr[i];
+ }
+
+ return NULL;
+}
+
+static int nsim_dev_devlink_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ struct nsim_trap_item *nsim_trap_item;
+
+ nsim_trap_item = nsim_dev_trap_item_lookup(nsim_dev, trap->id);
+ if (WARN_ON(!nsim_trap_item))
+ return -ENOENT;
+
+ nsim_trap_item->trap_ctx = trap_ctx;
+ nsim_trap_item->action = trap->init_action;
+
+ return 0;
+}
+
+static int
+nsim_dev_devlink_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ struct nsim_trap_item *nsim_trap_item;
+
+ nsim_trap_item = nsim_dev_trap_item_lookup(nsim_dev, trap->id);
+ if (WARN_ON(!nsim_trap_item))
+ return -ENOENT;
+
+ spin_lock(&nsim_dev->trap_data->trap_lock);
+ nsim_trap_item->action = action;
+ spin_unlock(&nsim_dev->trap_data->trap_lock);
+
+ return 0;
+}
+
static const struct devlink_ops nsim_dev_devlink_ops = {
.reload = nsim_dev_reload,
.flash_update = nsim_dev_flash_update,
+ .trap_init = nsim_dev_devlink_trap_init,
+ .trap_action_set = nsim_dev_devlink_trap_action_set,
};
+#define NSIM_DEV_MAX_MACS_DEFAULT 32
+#define NSIM_DEV_TEST1_DEFAULT true
+
static struct nsim_dev *
nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
{
@@ -280,6 +664,8 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
INIT_LIST_HEAD(&nsim_dev->port_list);
mutex_init(&nsim_dev->port_list_lock);
nsim_dev->fw_update_status = true;
+ nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
+ nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
err = nsim_dev_resources_register(devlink);
if (err)
@@ -289,18 +675,40 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
if (err)
goto err_resources_unregister;
- err = nsim_dev_debugfs_init(nsim_dev);
+ err = devlink_params_register(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
if (err)
goto err_dl_unregister;
+ nsim_devlink_set_params_init_values(nsim_dev, devlink);
+
+ err = nsim_dev_dummy_region_init(nsim_dev, devlink);
+ if (err)
+ goto err_params_unregister;
+
+ err = nsim_dev_traps_init(devlink);
+ if (err)
+ goto err_dummy_region_exit;
+
+ err = nsim_dev_debugfs_init(nsim_dev);
+ if (err)
+ goto err_traps_exit;
err = nsim_bpf_dev_init(nsim_dev);
if (err)
goto err_debugfs_exit;
+ devlink_params_publish(devlink);
return nsim_dev;
err_debugfs_exit:
nsim_dev_debugfs_exit(nsim_dev);
+err_traps_exit:
+ nsim_dev_traps_exit(devlink);
+err_dummy_region_exit:
+ nsim_dev_dummy_region_exit(nsim_dev);
+err_params_unregister:
+ devlink_params_unregister(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
err_dl_unregister:
devlink_unregister(devlink);
err_resources_unregister:
@@ -316,6 +724,10 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
nsim_bpf_dev_exit(nsim_dev);
nsim_dev_debugfs_exit(nsim_dev);
+ nsim_dev_traps_exit(devlink);
+ nsim_dev_dummy_region_exit(nsim_dev);
+ devlink_params_unregister(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
mutex_destroy(&nsim_dev->port_list_lock);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 9404637d34b7..66bf13765ad0 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -145,6 +145,7 @@ struct nsim_dev_port {
struct nsim_dev {
struct nsim_bus_dev *nsim_bus_dev;
struct nsim_fib_data *fib_data;
+ struct nsim_trap_data *trap_data;
struct dentry *ddir;
struct dentry *ports_ddir;
struct bpf_offload_dev *bpf_dev;
@@ -158,6 +159,9 @@ struct nsim_dev {
struct list_head port_list;
struct mutex port_list_lock; /* protects port list */
bool fw_update_status;
+ u32 max_macs;
+ bool test1;
+ struct devlink_region *dummy_region;
};
int nsim_dev_init(void);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 20f14c5fbb7e..03be30cde552 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -21,6 +21,19 @@ config MDIO_BUS
if MDIO_BUS
+config MDIO_ASPEED
+ tristate "ASPEED MDIO bus controller"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on OF_MDIO && HAS_IOMEM
+ help
+ This module provides a driver for the independent MDIO bus
+ controllers found in the ASPEED AST2600 SoC. This is a driver for the
+ third revision of the ASPEED MDIO register interface - the first two
+ revisions are the "old" and "new" interfaces found in the AST2400 and
+ AST2500, embedded in the MAC. For legacy reasons, FTGMAC100 driver
+ continues to drive the embedded MDIO controller for the AST2400 and
+ AST2500 SoCs, so say N if AST2600 support is not required.
+
config MDIO_BCM_IPROC
tristate "Broadcom iProc MDIO bus controller"
depends on ARCH_BCM_IPROC || COMPILE_TEST
@@ -159,8 +172,8 @@ config MDIO_MSCC_MIIM
config MDIO_OCTEON
tristate "Octeon and some ThunderX SOCs MDIO buses"
- depends on 64BIT
- depends on HAS_IOMEM && OF_MDIO
+ depends on (64BIT && OF_MDIO) || COMPILE_TEST
+ depends on HAS_IOMEM
select MDIO_CAVIUM
help
This module provides a driver for the Octeon and ThunderX MDIO
@@ -244,6 +257,15 @@ config SFP
depends on HWMON || HWMON=n
select MDIO_I2C
+config ADIN_PHY
+ tristate "Analog Devices Industrial Ethernet PHYs"
+ help
+ Adds support for the Analog Devices Industrial Ethernet PHYs.
+ Currently supports the:
+ - ADIN1200 - Robust,Industrial, Low Power 10/100 Ethernet PHY
+ - ADIN1300 - Robust,Industrial, Low Latency 10/100/1000 Gigabit
+ Ethernet PHY
+
config AMD_PHY
tristate "AMD PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 839acb292c38..a03437e091f3 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -22,6 +22,7 @@ libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o
obj-$(CONFIG_PHYLINK) += phylink.o
obj-$(CONFIG_PHYLIB) += libphy.o
+obj-$(CONFIG_MDIO_ASPEED) += mdio-aspeed.o
obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_SFP) += sfp.o
sfp-obj-$(CONFIG_SFP) += sfp-bus.o
obj-y += $(sfp-obj-y) $(sfp-obj-m)
+obj-$(CONFIG_ADIN_PHY) += adin.o
obj-$(CONFIG_AMD_PHY) += amd.o
aquantia-objs += aquantia_main.o
ifdef CONFIG_HWMON
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
new file mode 100644
index 000000000000..4dec83df048d
--- /dev/null
+++ b/drivers/net/phy/adin.c
@@ -0,0 +1,720 @@
+// SPDX-License-Identifier: GPL-2.0+
+/**
+ * Driver for Analog Devices Industrial Ethernet PHYs
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/property.h>
+
+#define PHY_ID_ADIN1200 0x0283bc20
+#define PHY_ID_ADIN1300 0x0283bc30
+
+#define ADIN1300_MII_EXT_REG_PTR 0x0010
+#define ADIN1300_MII_EXT_REG_DATA 0x0011
+
+#define ADIN1300_PHY_CTRL1 0x0012
+#define ADIN1300_AUTO_MDI_EN BIT(10)
+#define ADIN1300_MAN_MDIX_EN BIT(9)
+
+#define ADIN1300_RX_ERR_CNT 0x0014
+
+#define ADIN1300_PHY_CTRL2 0x0016
+#define ADIN1300_DOWNSPEED_AN_100_EN BIT(11)
+#define ADIN1300_DOWNSPEED_AN_10_EN BIT(10)
+#define ADIN1300_GROUP_MDIO_EN BIT(6)
+#define ADIN1300_DOWNSPEEDS_EN \
+ (ADIN1300_DOWNSPEED_AN_100_EN | ADIN1300_DOWNSPEED_AN_10_EN)
+
+#define ADIN1300_PHY_CTRL3 0x0017
+#define ADIN1300_LINKING_EN BIT(13)
+#define ADIN1300_DOWNSPEED_RETRIES_MSK GENMASK(12, 10)
+
+#define ADIN1300_INT_MASK_REG 0x0018
+#define ADIN1300_INT_MDIO_SYNC_EN BIT(9)
+#define ADIN1300_INT_ANEG_STAT_CHNG_EN BIT(8)
+#define ADIN1300_INT_ANEG_PAGE_RX_EN BIT(6)
+#define ADIN1300_INT_IDLE_ERR_CNT_EN BIT(5)
+#define ADIN1300_INT_MAC_FIFO_OU_EN BIT(4)
+#define ADIN1300_INT_RX_STAT_CHNG_EN BIT(3)
+#define ADIN1300_INT_LINK_STAT_CHNG_EN BIT(2)
+#define ADIN1300_INT_SPEED_CHNG_EN BIT(1)
+#define ADIN1300_INT_HW_IRQ_EN BIT(0)
+#define ADIN1300_INT_MASK_EN \
+ (ADIN1300_INT_LINK_STAT_CHNG_EN | ADIN1300_INT_HW_IRQ_EN)
+#define ADIN1300_INT_STATUS_REG 0x0019
+
+#define ADIN1300_PHY_STATUS1 0x001a
+#define ADIN1300_PAIR_01_SWAP BIT(11)
+
+/* EEE register addresses, accessible via Clause 22 access using
+ * ADIN1300_MII_EXT_REG_PTR & ADIN1300_MII_EXT_REG_DATA.
+ * The bit-fields are the same as specified by IEEE for EEE.
+ */
+#define ADIN1300_EEE_CAP_REG 0x8000
+#define ADIN1300_EEE_ADV_REG 0x8001
+#define ADIN1300_EEE_LPABLE_REG 0x8002
+#define ADIN1300_CLOCK_STOP_REG 0x9400
+#define ADIN1300_LPI_WAKE_ERR_CNT_REG 0xa000
+
+#define ADIN1300_GE_SOFT_RESET_REG 0xff0c
+#define ADIN1300_GE_SOFT_RESET BIT(0)
+
+#define ADIN1300_GE_RGMII_CFG_REG 0xff23
+#define ADIN1300_GE_RGMII_RX_MSK GENMASK(8, 6)
+#define ADIN1300_GE_RGMII_RX_SEL(x) \
+ FIELD_PREP(ADIN1300_GE_RGMII_RX_MSK, x)
+#define ADIN1300_GE_RGMII_GTX_MSK GENMASK(5, 3)
+#define ADIN1300_GE_RGMII_GTX_SEL(x) \
+ FIELD_PREP(ADIN1300_GE_RGMII_GTX_MSK, x)
+#define ADIN1300_GE_RGMII_RXID_EN BIT(2)
+#define ADIN1300_GE_RGMII_TXID_EN BIT(1)
+#define ADIN1300_GE_RGMII_EN BIT(0)
+
+/* RGMII internal delay settings for rx and tx for ADIN1300 */
+#define ADIN1300_RGMII_1_60_NS 0x0001
+#define ADIN1300_RGMII_1_80_NS 0x0002
+#define ADIN1300_RGMII_2_00_NS 0x0000
+#define ADIN1300_RGMII_2_20_NS 0x0006
+#define ADIN1300_RGMII_2_40_NS 0x0007
+
+#define ADIN1300_GE_RMII_CFG_REG 0xff24
+#define ADIN1300_GE_RMII_FIFO_DEPTH_MSK GENMASK(6, 4)
+#define ADIN1300_GE_RMII_FIFO_DEPTH_SEL(x) \
+ FIELD_PREP(ADIN1300_GE_RMII_FIFO_DEPTH_MSK, x)
+#define ADIN1300_GE_RMII_EN BIT(0)
+
+/* RMII fifo depth values */
+#define ADIN1300_RMII_4_BITS 0x0000
+#define ADIN1300_RMII_8_BITS 0x0001
+#define ADIN1300_RMII_12_BITS 0x0002
+#define ADIN1300_RMII_16_BITS 0x0003
+#define ADIN1300_RMII_20_BITS 0x0004
+#define ADIN1300_RMII_24_BITS 0x0005
+
+/**
+ * struct adin_cfg_reg_map - map a config value to aregister value
+ * @cfg value in device configuration
+ * @reg value in the register
+ */
+struct adin_cfg_reg_map {
+ int cfg;
+ int reg;
+};
+
+static const struct adin_cfg_reg_map adin_rgmii_delays[] = {
+ { 1600, ADIN1300_RGMII_1_60_NS },
+ { 1800, ADIN1300_RGMII_1_80_NS },
+ { 2000, ADIN1300_RGMII_2_00_NS },
+ { 2200, ADIN1300_RGMII_2_20_NS },
+ { 2400, ADIN1300_RGMII_2_40_NS },
+ { },
+};
+
+static const struct adin_cfg_reg_map adin_rmii_fifo_depths[] = {
+ { 4, ADIN1300_RMII_4_BITS },
+ { 8, ADIN1300_RMII_8_BITS },
+ { 12, ADIN1300_RMII_12_BITS },
+ { 16, ADIN1300_RMII_16_BITS },
+ { 20, ADIN1300_RMII_20_BITS },
+ { 24, ADIN1300_RMII_24_BITS },
+ { },
+};
+
+/**
+ * struct adin_clause45_mmd_map - map to convert Clause 45 regs to Clause 22
+ * @devad device address used in Clause 45 access
+ * @cl45_regnum register address defined by Clause 45
+ * @adin_regnum equivalent register address accessible via Clause 22
+ */
+struct adin_clause45_mmd_map {
+ int devad;
+ u16 cl45_regnum;
+ u16 adin_regnum;
+};
+
+static struct adin_clause45_mmd_map adin_clause45_mmd_map[] = {
+ { MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE, ADIN1300_EEE_CAP_REG },
+ { MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, ADIN1300_EEE_LPABLE_REG },
+ { MDIO_MMD_AN, MDIO_AN_EEE_ADV, ADIN1300_EEE_ADV_REG },
+ { MDIO_MMD_PCS, MDIO_CTRL1, ADIN1300_CLOCK_STOP_REG },
+ { MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR, ADIN1300_LPI_WAKE_ERR_CNT_REG },
+};
+
+struct adin_hw_stat {
+ const char *string;
+ u16 reg1;
+ u16 reg2;
+};
+
+static struct adin_hw_stat adin_hw_stats[] = {
+ { "total_frames_checked_count", 0x940A, 0x940B }, /* hi + lo */
+ { "length_error_frames_count", 0x940C },
+ { "alignment_error_frames_count", 0x940D },
+ { "symbol_error_count", 0x940E },
+ { "oversized_frames_count", 0x940F },
+ { "undersized_frames_count", 0x9410 },
+ { "odd_nibble_frames_count", 0x9411 },
+ { "odd_preamble_packet_count", 0x9412 },
+ { "dribble_bits_frames_count", 0x9413 },
+ { "false_carrier_events_count", 0x9414 },
+};
+
+/**
+ * struct adin_priv - ADIN PHY driver private data
+ * stats statistic counters for the PHY
+ */
+struct adin_priv {
+ u64 stats[ARRAY_SIZE(adin_hw_stats)];
+};
+
+static int adin_lookup_reg_value(const struct adin_cfg_reg_map *tbl, int cfg)
+{
+ size_t i;
+
+ for (i = 0; tbl[i].cfg; i++) {
+ if (tbl[i].cfg == cfg)
+ return tbl[i].reg;
+ }
+
+ return -EINVAL;
+}
+
+static u32 adin_get_reg_value(struct phy_device *phydev,
+ const char *prop_name,
+ const struct adin_cfg_reg_map *tbl,
+ u32 dflt)
+{
+ struct device *dev = &phydev->mdio.dev;
+ u32 val;
+ int rc;
+
+ if (device_property_read_u32(dev, prop_name, &val))
+ return dflt;
+
+ rc = adin_lookup_reg_value(tbl, val);
+ if (rc < 0) {
+ phydev_warn(phydev,
+ "Unsupported value %u for %s using default (%u)\n",
+ val, prop_name, dflt);
+ return dflt;
+ }
+
+ return rc;
+}
+
+static int adin_config_rgmii_mode(struct phy_device *phydev)
+{
+ u32 val;
+ int reg;
+
+ if (!phy_interface_is_rgmii(phydev))
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_GE_RGMII_CFG_REG,
+ ADIN1300_GE_RGMII_EN);
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_GE_RGMII_CFG_REG);
+ if (reg < 0)
+ return reg;
+
+ reg |= ADIN1300_GE_RGMII_EN;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ reg |= ADIN1300_GE_RGMII_RXID_EN;
+
+ val = adin_get_reg_value(phydev, "adi,rx-internal-delay-ps",
+ adin_rgmii_delays,
+ ADIN1300_RGMII_2_00_NS);
+ reg &= ~ADIN1300_GE_RGMII_RX_MSK;
+ reg |= ADIN1300_GE_RGMII_RX_SEL(val);
+ } else {
+ reg &= ~ADIN1300_GE_RGMII_RXID_EN;
+ }
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ reg |= ADIN1300_GE_RGMII_TXID_EN;
+
+ val = adin_get_reg_value(phydev, "adi,tx-internal-delay-ps",
+ adin_rgmii_delays,
+ ADIN1300_RGMII_2_00_NS);
+ reg &= ~ADIN1300_GE_RGMII_GTX_MSK;
+ reg |= ADIN1300_GE_RGMII_GTX_SEL(val);
+ } else {
+ reg &= ~ADIN1300_GE_RGMII_TXID_EN;
+ }
+
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_GE_RGMII_CFG_REG, reg);
+}
+
+static int adin_config_rmii_mode(struct phy_device *phydev)
+{
+ u32 val;
+ int reg;
+
+ if (phydev->interface != PHY_INTERFACE_MODE_RMII)
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_GE_RMII_CFG_REG,
+ ADIN1300_GE_RMII_EN);
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_GE_RMII_CFG_REG);
+ if (reg < 0)
+ return reg;
+
+ reg |= ADIN1300_GE_RMII_EN;
+
+ val = adin_get_reg_value(phydev, "adi,fifo-depth-bits",
+ adin_rmii_fifo_depths,
+ ADIN1300_RMII_8_BITS);
+
+ reg &= ~ADIN1300_GE_RMII_FIFO_DEPTH_MSK;
+ reg |= ADIN1300_GE_RMII_FIFO_DEPTH_SEL(val);
+
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_GE_RMII_CFG_REG, reg);
+}
+
+static int adin_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, cnt, enable;
+
+ val = phy_read(phydev, ADIN1300_PHY_CTRL2);
+ if (val < 0)
+ return val;
+
+ cnt = phy_read(phydev, ADIN1300_PHY_CTRL3);
+ if (cnt < 0)
+ return cnt;
+
+ enable = FIELD_GET(ADIN1300_DOWNSPEEDS_EN, val);
+ cnt = FIELD_GET(ADIN1300_DOWNSPEED_RETRIES_MSK, cnt);
+
+ *data = (enable && cnt) ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int adin_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ u16 val;
+ int rc;
+
+ if (cnt == DOWNSHIFT_DEV_DISABLE)
+ return phy_clear_bits(phydev, ADIN1300_PHY_CTRL2,
+ ADIN1300_DOWNSPEEDS_EN);
+
+ if (cnt > 7)
+ return -E2BIG;
+
+ val = FIELD_PREP(ADIN1300_DOWNSPEED_RETRIES_MSK, cnt);
+ val |= ADIN1300_LINKING_EN;
+
+ rc = phy_modify(phydev, ADIN1300_PHY_CTRL3,
+ ADIN1300_LINKING_EN | ADIN1300_DOWNSPEED_RETRIES_MSK,
+ val);
+ if (rc < 0)
+ return rc;
+
+ return phy_set_bits(phydev, ADIN1300_PHY_CTRL2,
+ ADIN1300_DOWNSPEEDS_EN);
+}
+
+static int adin_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return adin_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adin_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return adin_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adin_config_init(struct phy_device *phydev)
+{
+ int rc;
+
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ rc = adin_config_rgmii_mode(phydev);
+ if (rc < 0)
+ return rc;
+
+ rc = adin_config_rmii_mode(phydev);
+ if (rc < 0)
+ return rc;
+
+ rc = adin_set_downshift(phydev, 4);
+ if (rc < 0)
+ return rc;
+
+ phydev_dbg(phydev, "PHY is using mode '%s'\n",
+ phy_modes(phydev->interface));
+
+ return 0;
+}
+
+static int adin_phy_ack_intr(struct phy_device *phydev)
+{
+ /* Clear pending interrupts */
+ int rc = phy_read(phydev, ADIN1300_INT_STATUS_REG);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int adin_phy_config_intr(struct phy_device *phydev)
+{
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ return phy_set_bits(phydev, ADIN1300_INT_MASK_REG,
+ ADIN1300_INT_MASK_EN);
+
+ return phy_clear_bits(phydev, ADIN1300_INT_MASK_REG,
+ ADIN1300_INT_MASK_EN);
+}
+
+static int adin_cl45_to_adin_reg(struct phy_device *phydev, int devad,
+ u16 cl45_regnum)
+{
+ struct adin_clause45_mmd_map *m;
+ int i;
+
+ if (devad == MDIO_MMD_VEND1)
+ return cl45_regnum;
+
+ for (i = 0; i < ARRAY_SIZE(adin_clause45_mmd_map); i++) {
+ m = &adin_clause45_mmd_map[i];
+ if (m->devad == devad && m->cl45_regnum == cl45_regnum)
+ return m->adin_regnum;
+ }
+
+ phydev_err(phydev,
+ "No translation available for devad: %d reg: %04x\n",
+ devad, cl45_regnum);
+
+ return -EINVAL;
+}
+
+static int adin_read_mmd(struct phy_device *phydev, int devad, u16 regnum)
+{
+ struct mii_bus *bus = phydev->mdio.bus;
+ int phy_addr = phydev->mdio.addr;
+ int adin_regnum;
+ int err;
+
+ adin_regnum = adin_cl45_to_adin_reg(phydev, devad, regnum);
+ if (adin_regnum < 0)
+ return adin_regnum;
+
+ err = __mdiobus_write(bus, phy_addr, ADIN1300_MII_EXT_REG_PTR,
+ adin_regnum);
+ if (err)
+ return err;
+
+ return __mdiobus_read(bus, phy_addr, ADIN1300_MII_EXT_REG_DATA);
+}
+
+static int adin_write_mmd(struct phy_device *phydev, int devad, u16 regnum,
+ u16 val)
+{
+ struct mii_bus *bus = phydev->mdio.bus;
+ int phy_addr = phydev->mdio.addr;
+ int adin_regnum;
+ int err;
+
+ adin_regnum = adin_cl45_to_adin_reg(phydev, devad, regnum);
+ if (adin_regnum < 0)
+ return adin_regnum;
+
+ err = __mdiobus_write(bus, phy_addr, ADIN1300_MII_EXT_REG_PTR,
+ adin_regnum);
+ if (err)
+ return err;
+
+ return __mdiobus_write(bus, phy_addr, ADIN1300_MII_EXT_REG_DATA, val);
+}
+
+static int adin_config_mdix(struct phy_device *phydev)
+{
+ bool auto_en, mdix_en;
+ int reg;
+
+ mdix_en = false;
+ auto_en = false;
+ switch (phydev->mdix_ctrl) {
+ case ETH_TP_MDI:
+ break;
+ case ETH_TP_MDI_X:
+ mdix_en = true;
+ break;
+ case ETH_TP_MDI_AUTO:
+ auto_en = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = phy_read(phydev, ADIN1300_PHY_CTRL1);
+ if (reg < 0)
+ return reg;
+
+ if (mdix_en)
+ reg |= ADIN1300_MAN_MDIX_EN;
+ else
+ reg &= ~ADIN1300_MAN_MDIX_EN;
+
+ if (auto_en)
+ reg |= ADIN1300_AUTO_MDI_EN;
+ else
+ reg &= ~ADIN1300_AUTO_MDI_EN;
+
+ return phy_write(phydev, ADIN1300_PHY_CTRL1, reg);
+}
+
+static int adin_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = adin_config_mdix(phydev);
+ if (ret)
+ return ret;
+
+ return genphy_config_aneg(phydev);
+}
+
+static int adin_mdix_update(struct phy_device *phydev)
+{
+ bool auto_en, mdix_en;
+ bool swapped;
+ int reg;
+
+ reg = phy_read(phydev, ADIN1300_PHY_CTRL1);
+ if (reg < 0)
+ return reg;
+
+ auto_en = !!(reg & ADIN1300_AUTO_MDI_EN);
+ mdix_en = !!(reg & ADIN1300_MAN_MDIX_EN);
+
+ /* If MDI/MDIX is forced, just read it from the control reg */
+ if (!auto_en) {
+ if (mdix_en)
+ phydev->mdix = ETH_TP_MDI_X;
+ else
+ phydev->mdix = ETH_TP_MDI;
+ return 0;
+ }
+
+ /**
+ * Otherwise, we need to deduce it from the PHY status2 reg.
+ * When Auto-MDI is enabled, the ADIN1300_MAN_MDIX_EN bit implies
+ * a preference for MDIX when it is set.
+ */
+ reg = phy_read(phydev, ADIN1300_PHY_STATUS1);
+ if (reg < 0)
+ return reg;
+
+ swapped = !!(reg & ADIN1300_PAIR_01_SWAP);
+
+ if (mdix_en != swapped)
+ phydev->mdix = ETH_TP_MDI_X;
+ else
+ phydev->mdix = ETH_TP_MDI;
+
+ return 0;
+}
+
+static int adin_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = adin_mdix_update(phydev);
+ if (ret < 0)
+ return ret;
+
+ return genphy_read_status(phydev);
+}
+
+static int adin_soft_reset(struct phy_device *phydev)
+{
+ int rc;
+
+ /* The reset bit is self-clearing, set it and wait */
+ rc = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_GE_SOFT_RESET_REG,
+ ADIN1300_GE_SOFT_RESET);
+ if (rc < 0)
+ return rc;
+
+ msleep(10);
+
+ /* If we get a read error something may be wrong */
+ rc = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_GE_SOFT_RESET_REG);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int adin_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(adin_hw_stats);
+}
+
+static void adin_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adin_hw_stats); i++) {
+ strlcpy(&data[i * ETH_GSTRING_LEN],
+ adin_hw_stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+static int adin_read_mmd_stat_regs(struct phy_device *phydev,
+ struct adin_hw_stat *stat,
+ u32 *val)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, stat->reg1);
+ if (ret < 0)
+ return ret;
+
+ *val = (ret & 0xffff);
+
+ if (stat->reg2 == 0)
+ return 0;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, stat->reg2);
+ if (ret < 0)
+ return ret;
+
+ *val <<= 16;
+ *val |= (ret & 0xffff);
+
+ return 0;
+}
+
+static u64 adin_get_stat(struct phy_device *phydev, int i)
+{
+ struct adin_hw_stat *stat = &adin_hw_stats[i];
+ struct adin_priv *priv = phydev->priv;
+ u32 val;
+ int ret;
+
+ if (stat->reg1 > 0x1f) {
+ ret = adin_read_mmd_stat_regs(phydev, stat, &val);
+ if (ret < 0)
+ return (u64)(~0);
+ } else {
+ ret = phy_read(phydev, stat->reg1);
+ if (ret < 0)
+ return (u64)(~0);
+ val = (ret & 0xffff);
+ }
+
+ priv->stats[i] += val;
+
+ return priv->stats[i];
+}
+
+static void adin_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i, rc;
+
+ /* latch copies of all the frame-checker counters */
+ rc = phy_read(phydev, ADIN1300_RX_ERR_CNT);
+ if (rc < 0)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(adin_hw_stats); i++)
+ data[i] = adin_get_stat(phydev, i);
+}
+
+static int adin_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct adin_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static struct phy_driver adin_driver[] = {
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200),
+ .name = "ADIN1200",
+ .probe = adin_probe,
+ .config_init = adin_config_init,
+ .soft_reset = adin_soft_reset,
+ .config_aneg = adin_config_aneg,
+ .read_status = adin_read_status,
+ .get_tunable = adin_get_tunable,
+ .set_tunable = adin_set_tunable,
+ .ack_interrupt = adin_phy_ack_intr,
+ .config_intr = adin_phy_config_intr,
+ .get_sset_count = adin_get_sset_count,
+ .get_strings = adin_get_strings,
+ .get_stats = adin_get_stats,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .read_mmd = adin_read_mmd,
+ .write_mmd = adin_write_mmd,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300),
+ .name = "ADIN1300",
+ .probe = adin_probe,
+ .config_init = adin_config_init,
+ .soft_reset = adin_soft_reset,
+ .config_aneg = adin_config_aneg,
+ .read_status = adin_read_status,
+ .get_tunable = adin_get_tunable,
+ .set_tunable = adin_set_tunable,
+ .ack_interrupt = adin_phy_ack_intr,
+ .config_intr = adin_phy_config_intr,
+ .get_sset_count = adin_get_sset_count,
+ .get_strings = adin_get_strings,
+ .get_stats = adin_get_stats,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .read_mmd = adin_read_mmd,
+ .write_mmd = adin_write_mmd,
+ },
+};
+
+module_phy_driver(adin_driver);
+
+static struct mdio_device_id __maybe_unused adin_tbl[] = {
+ { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, adin_tbl);
+MODULE_DESCRIPTION("Analog Devices Industrial Ethernet PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 6ad8b1c63c34..2aa7b2e60046 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -249,10 +249,6 @@ static int at803x_config_init(struct phy_device *phydev)
{
int ret;
- ret = genphy_config_init(phydev);
- if (ret < 0)
- return ret;
-
/* The RX and TX delay default is:
* after HW reset: RX delay enabled and TX delay disabled
* after SW reset: RX delay enabled, while TX delay retains the
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 7ed4760fb155..8a4b1d167ce2 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -254,13 +254,8 @@ static int dp83822_config_intr(struct phy_device *phydev)
static int dp83822_config_init(struct phy_device *phydev)
{
- int err;
int value;
- err = genphy_config_init(phydev);
- if (err < 0)
- return err;
-
value = DP83822_WOL_MAGIC_EN | DP83822_WOL_SECURE_ON | DP83822_WOL_EN;
return phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 6f9bc7d91f17..54c7c1b44e4d 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -68,13 +68,8 @@ static int dp83848_config_intr(struct phy_device *phydev)
static int dp83848_config_init(struct phy_device *phydev)
{
- int err;
int val;
- err = genphy_config_init(phydev);
- if (err < 0)
- return err;
-
/* DP83620 always reports Auto Negotiation Ability on BMSR. Instead,
* we check initial value of BMCR Auto negotiation enable bit
*/
@@ -113,13 +108,13 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
static struct phy_driver dp83848_driver[] = {
DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY",
- genphy_config_init),
+ NULL),
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY",
- genphy_config_init),
+ NULL),
DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY",
dp83848_config_init),
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY",
- genphy_config_init),
+ NULL),
};
module_phy_driver(dp83848_driver);
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index ac27da16824d..06f08832ebcd 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -277,10 +277,6 @@ static int dp83811_config_init(struct phy_device *phydev)
{
int value, err;
- err = genphy_config_init(phydev);
- if (err < 0)
- return err;
-
value = phy_read(phydev, MII_DP83811_SGMII_CTRL);
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
diff --git a/drivers/net/phy/mdio-aspeed.c b/drivers/net/phy/mdio-aspeed.c
new file mode 100644
index 000000000000..cad820568f75
--- /dev/null
+++ b/drivers/net/phy/mdio-aspeed.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2019 IBM Corp. */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "mdio-aspeed"
+
+#define ASPEED_MDIO_CTRL 0x0
+#define ASPEED_MDIO_CTRL_FIRE BIT(31)
+#define ASPEED_MDIO_CTRL_ST BIT(28)
+#define ASPEED_MDIO_CTRL_ST_C45 0
+#define ASPEED_MDIO_CTRL_ST_C22 1
+#define ASPEED_MDIO_CTRL_OP GENMASK(27, 26)
+#define MDIO_C22_OP_WRITE 0b01
+#define MDIO_C22_OP_READ 0b10
+#define ASPEED_MDIO_CTRL_PHYAD GENMASK(25, 21)
+#define ASPEED_MDIO_CTRL_REGAD GENMASK(20, 16)
+#define ASPEED_MDIO_CTRL_MIIWDATA GENMASK(15, 0)
+
+#define ASPEED_MDIO_DATA 0x4
+#define ASPEED_MDIO_DATA_MDC_THRES GENMASK(31, 24)
+#define ASPEED_MDIO_DATA_MDIO_EDGE BIT(23)
+#define ASPEED_MDIO_DATA_MDIO_LATCH GENMASK(22, 20)
+#define ASPEED_MDIO_DATA_IDLE BIT(16)
+#define ASPEED_MDIO_DATA_MIIRDATA GENMASK(15, 0)
+
+#define ASPEED_MDIO_INTERVAL_US 100
+#define ASPEED_MDIO_TIMEOUT_US (ASPEED_MDIO_INTERVAL_US * 10)
+
+struct aspeed_mdio {
+ void __iomem *base;
+};
+
+static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct aspeed_mdio *ctx = bus->priv;
+ u32 ctrl;
+ u32 data;
+ int rc;
+
+ dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr,
+ regnum);
+
+ /* Just clause 22 for the moment */
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ ctrl = ASPEED_MDIO_CTRL_FIRE
+ | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_READ)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum);
+
+ iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
+
+ rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data,
+ data & ASPEED_MDIO_DATA_IDLE,
+ ASPEED_MDIO_INTERVAL_US,
+ ASPEED_MDIO_TIMEOUT_US);
+ if (rc < 0)
+ return rc;
+
+ return FIELD_GET(ASPEED_MDIO_DATA_MIIRDATA, data);
+}
+
+static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ struct aspeed_mdio *ctx = bus->priv;
+ u32 ctrl;
+
+ dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n",
+ __func__, addr, regnum, val);
+
+ /* Just clause 22 for the moment */
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ ctrl = ASPEED_MDIO_CTRL_FIRE
+ | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_WRITE)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_MIIWDATA, val);
+
+ iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
+
+ return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
+ !(ctrl & ASPEED_MDIO_CTRL_FIRE),
+ ASPEED_MDIO_INTERVAL_US,
+ ASPEED_MDIO_TIMEOUT_US);
+}
+
+static int aspeed_mdio_probe(struct platform_device *pdev)
+{
+ struct aspeed_mdio *ctx;
+ struct mii_bus *bus;
+ int rc;
+
+ bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*ctx));
+ if (!bus)
+ return -ENOMEM;
+
+ ctx = bus->priv;
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->base))
+ return PTR_ERR(ctx->base);
+
+ bus->name = DRV_NAME;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id);
+ bus->parent = &pdev->dev;
+ bus->read = aspeed_mdio_read;
+ bus->write = aspeed_mdio_write;
+
+ rc = of_mdiobus_register(bus, pdev->dev.of_node);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
+ return rc;
+ }
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+}
+
+static int aspeed_mdio_remove(struct platform_device *pdev)
+{
+ mdiobus_unregister(platform_get_drvdata(pdev));
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_mdio_of_match[] = {
+ { .compatible = "aspeed,ast2600-mdio", },
+ { },
+};
+
+static struct platform_driver aspeed_mdio_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = aspeed_mdio_of_match,
+ },
+ .probe = aspeed_mdio_probe,
+ .remove = aspeed_mdio_remove,
+};
+
+module_platform_driver(aspeed_mdio_driver);
+
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/phy/mdio-cavium.h
index ed5f9bb5448d..b7f89ad27465 100644
--- a/drivers/net/phy/mdio-cavium.h
+++ b/drivers/net/phy/mdio-cavium.h
@@ -108,6 +108,8 @@ static inline u64 oct_mdio_readq(u64 addr)
return cvmx_read_csr(addr);
}
#else
+#include <linux/io-64-nonatomic-lo-hi.h>
+
#define oct_mdio_writeq(val, addr) writeq(val, (void *)addr)
#define oct_mdio_readq(addr) readq((void *)addr)
#endif
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index 717cc2a056e8..34990eaa3298 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -328,7 +328,6 @@ static int xgene_mdio_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mii_bus *mdio_bus;
const struct of_device_id *of_id;
- struct resource *res;
struct xgene_mdio_pdata *pdata;
void __iomem *csr_base;
int mdio_id = 0, ret = 0;
@@ -355,8 +354,7 @@ static int xgene_mdio_probe(struct platform_device *pdev)
pdata->mdio_id = mdio_id;
pdata->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- csr_base = devm_ioremap_resource(dev, res);
+ csr_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csr_base))
return PTR_ERR(csr_base);
pdata->mac_csr_addr = csr_base;
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index fa80d6dce8ee..e8f2ca625837 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -136,7 +136,7 @@ static int meson_gxl_config_init(struct phy_device *phydev)
if (ret)
return ret;
- return genphy_config_init(phydev);
+ return 0;
}
/* This function is provided to cope with the possible failures of this phy
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index eb1b3287fe08..a644e8e5071c 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -305,7 +305,6 @@ static int lan88xx_config_init(struct phy_device *phydev)
{
int val;
- genphy_config_init(phydev);
/*Zerodetect delay enable */
val = phy_read_mmd(phydev, MDIO_MMD_PCS,
PHY_ARDENNES_MMD_DEV_3_PHY_CFG);
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index 3d09b471632c..001def4509c2 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -48,7 +48,6 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.features = PHY_BASIC_T1_FEATURES,
- .config_init = genphy_config_init,
.config_aneg = genphy_config_aneg,
.ack_interrupt = lan87xx_phy_ack_interrupt,
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 645d354ffb48..7ada1fd9ca71 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -1725,7 +1725,7 @@ static int vsc8584_config_init(struct phy_device *phydev)
return ret;
}
- return genphy_config_init(phydev);
+ return 0;
err:
mutex_unlock(&phydev->mdio.bus->mdio_lock);
@@ -1767,7 +1767,7 @@ static int vsc85xx_config_init(struct phy_device *phydev)
return rc;
}
- return genphy_config_init(phydev);
+ return 0;
}
static int vsc8584_did_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 16667fbac8bf..369903d9b6ec 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -207,14 +207,14 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
return count;
}
-static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
+static int __set_linkmode_max_speed(u32 max_speed, unsigned long *addr)
{
const struct phy_setting *p;
int i;
for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
if (p->speed > max_speed)
- linkmode_clear_bit(p->bit, phydev->supported);
+ linkmode_clear_bit(p->bit, addr);
else
break;
}
@@ -222,6 +222,11 @@ static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
return 0;
}
+static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
+{
+ return __set_linkmode_max_speed(max_speed, phydev->supported);
+}
+
int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
{
int err;
@@ -310,6 +315,34 @@ void phy_resolve_aneg_linkmode(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
+static int phy_resolve_min_speed(struct phy_device *phydev, bool fdx_only)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
+ int i = ARRAY_SIZE(settings);
+
+ linkmode_and(common, phydev->lp_advertising, phydev->advertising);
+
+ while (--i >= 0) {
+ if (test_bit(settings[i].bit, common)) {
+ if (fdx_only && settings[i].duplex != DUPLEX_FULL)
+ continue;
+ return settings[i].speed;
+ }
+ }
+
+ return SPEED_UNKNOWN;
+}
+
+int phy_speed_down_core(struct phy_device *phydev)
+{
+ int min_common_speed = phy_resolve_min_speed(phydev, true);
+
+ if (min_common_speed == SPEED_UNKNOWN)
+ return -EINVAL;
+
+ return __set_linkmode_max_speed(min_common_speed, phydev->advertising);
+}
+
static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
u16 regnum)
{
@@ -783,24 +816,43 @@ int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val)
EXPORT_SYMBOL(phy_write_paged);
/**
- * phy_modify_paged() - Convenience function for modifying a paged register
+ * phy_modify_paged_changed() - Function for modifying a paged register
* @phydev: a pointer to a &struct phy_device
* @page: the page for the phy
* @regnum: register number
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
*
- * Same rules as for phy_read() and phy_write().
+ * Returns negative errno, 0 if there was no change, and 1 in case of change
*/
-int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
- u16 mask, u16 set)
+int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum,
+ u16 mask, u16 set)
{
int ret = 0, oldpage;
oldpage = phy_select_page(phydev, page);
if (oldpage >= 0)
- ret = __phy_modify(phydev, regnum, mask, set);
+ ret = __phy_modify_changed(phydev, regnum, mask, set);
return phy_restore_page(phydev, oldpage, ret);
}
+EXPORT_SYMBOL(phy_modify_paged_changed);
+
+/**
+ * phy_modify_paged() - Convenience function for modifying a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Same rules as for phy_read() and phy_write().
+ */
+int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
+ u16 mask, u16 set)
+{
+ int ret = phy_modify_paged_changed(phydev, page, regnum, mask, set);
+
+ return ret < 0 ? ret : 0;
+}
EXPORT_SYMBOL(phy_modify_paged);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index ef7aa738e0dc..f3adea9ef400 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -608,38 +608,21 @@ static int phy_poll_aneg_done(struct phy_device *phydev)
*/
int phy_speed_down(struct phy_device *phydev, bool sync)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_tmp);
int ret;
if (phydev->autoneg != AUTONEG_ENABLE)
return 0;
- linkmode_copy(adv_old, phydev->advertising);
- linkmode_copy(adv, phydev->lp_advertising);
- linkmode_and(adv, adv, phydev->supported);
-
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, adv) ||
- linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, adv)) {
- linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- phydev->advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- phydev->advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- phydev->advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- phydev->advertising);
- } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- adv) ||
- linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- adv)) {
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- phydev->advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- phydev->advertising);
- }
+ linkmode_copy(adv_tmp, phydev->advertising);
+
+ ret = phy_speed_down_core(phydev);
+ if (ret)
+ return ret;
- if (linkmode_equal(phydev->advertising, adv_old))
+ linkmode_copy(phydev->adv_old, adv_tmp);
+
+ if (linkmode_equal(phydev->advertising, adv_tmp))
return 0;
ret = phy_config_aneg(phydev);
@@ -658,30 +641,19 @@ EXPORT_SYMBOL_GPL(phy_speed_down);
*/
int phy_speed_up(struct phy_device *phydev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(all_speeds) = { 0, };
- __ETHTOOL_DECLARE_LINK_MODE_MASK(not_speeds);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(speeds);
-
- linkmode_copy(adv_old, phydev->advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_tmp);
if (phydev->autoneg != AUTONEG_ENABLE)
return 0;
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, all_speeds);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, all_speeds);
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, all_speeds);
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, all_speeds);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, all_speeds);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, all_speeds);
+ if (linkmode_empty(phydev->adv_old))
+ return 0;
- linkmode_andnot(not_speeds, adv_old, all_speeds);
- linkmode_copy(supported, phydev->supported);
- linkmode_and(speeds, supported, all_speeds);
- linkmode_or(phydev->advertising, not_speeds, speeds);
+ linkmode_copy(adv_tmp, phydev->advertising);
+ linkmode_copy(phydev->advertising, phydev->adv_old);
+ linkmode_zero(phydev->adv_old);
- if (linkmode_equal(phydev->advertising, adv_old))
+ if (linkmode_equal(phydev->advertising, adv_tmp))
return 0;
return phy_config_aneg(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 27ebc2c6c2d0..d347ddcac45b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1564,24 +1564,20 @@ EXPORT_SYMBOL(phy_reset_after_clk_enable);
*/
static int genphy_config_advert(struct phy_device *phydev)
{
- u32 advertise;
- int bmsr, adv;
- int err, changed = 0;
+ int err, bmsr, changed = 0;
+ u32 adv;
/* Only allow advertising what this PHY supports */
linkmode_and(phydev->advertising, phydev->advertising,
phydev->supported);
- if (!ethtool_convert_link_mode_to_legacy_u32(&advertise,
- phydev->advertising))
- phydev_warn(phydev, "PHY advertising (%*pb) more modes than genphy supports, some modes not advertised.\n",
- __ETHTOOL_LINK_MODE_MASK_NBITS,
- phydev->advertising);
+
+ adv = linkmode_adv_to_mii_adv_t(phydev->advertising);
/* Setup standard advertisement */
err = phy_modify_changed(phydev, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_100BASE4 |
ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM,
- ethtool_adv_to_mii_adv_t(advertise));
+ adv);
if (err < 0)
return err;
if (err > 0)
@@ -1598,13 +1594,7 @@ static int genphy_config_advert(struct phy_device *phydev)
if (!(bmsr & BMSR_ESTATEN))
return changed;
- /* Configure gigabit if it's supported */
- adv = 0;
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- phydev->supported) ||
- linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- phydev->supported))
- adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
+ adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
err = phy_modify_changed(phydev, MII_CTRL1000,
ADVERTISE_1000FULL | ADVERTISE_1000HALF,
@@ -1681,18 +1671,20 @@ int genphy_restart_aneg(struct phy_device *phydev)
EXPORT_SYMBOL(genphy_restart_aneg);
/**
- * genphy_config_aneg - restart auto-negotiation or write BMCR
+ * __genphy_config_aneg - restart auto-negotiation or write BMCR
* @phydev: target phy_device struct
+ * @changed: whether autoneg is requested
*
* Description: If auto-negotiation is enabled, we configure the
* advertising, and then restart auto-negotiation. If it is not
* enabled, then we write the BMCR.
*/
-int genphy_config_aneg(struct phy_device *phydev)
+int __genphy_config_aneg(struct phy_device *phydev, bool changed)
{
- int err, changed;
+ int err;
- changed = genphy_config_eee_advert(phydev);
+ if (genphy_config_eee_advert(phydev))
+ changed = true;
if (AUTONEG_ENABLE != phydev->autoneg)
return genphy_setup_forced(phydev);
@@ -1700,10 +1692,10 @@ int genphy_config_aneg(struct phy_device *phydev)
err = genphy_config_advert(phydev);
if (err < 0) /* error */
return err;
+ else if (err)
+ changed = true;
- changed |= err;
-
- if (changed == 0) {
+ if (!changed) {
/* Advertisement hasn't changed, but maybe aneg was never on to
* begin with? Or maybe phy was isolated?
*/
@@ -1713,18 +1705,15 @@ int genphy_config_aneg(struct phy_device *phydev)
return ctl;
if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
- changed = 1; /* do restart aneg */
+ changed = true; /* do restart aneg */
}
/* Only restart aneg if we are advertising something different
* than we were before.
*/
- if (changed > 0)
- return genphy_restart_aneg(phydev);
-
- return 0;
+ return changed ? genphy_restart_aneg(phydev) : 0;
}
-EXPORT_SYMBOL(genphy_config_aneg);
+EXPORT_SYMBOL(__genphy_config_aneg);
/**
* genphy_aneg_done - return auto-negotiation status
@@ -1805,7 +1794,7 @@ EXPORT_SYMBOL(genphy_update_link);
*/
int genphy_read_status(struct phy_device *phydev)
{
- int adv, lpa, lpagb, err, old_link = phydev->link;
+ int lpa, lpagb, err, old_link = phydev->link;
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
@@ -1821,19 +1810,18 @@ int genphy_read_status(struct phy_device *phydev)
phydev->pause = 0;
phydev->asym_pause = 0;
- linkmode_zero(phydev->lp_advertising);
-
if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
if (phydev->is_gigabit_capable) {
lpagb = phy_read(phydev, MII_STAT1000);
if (lpagb < 0)
return lpagb;
- adv = phy_read(phydev, MII_CTRL1000);
- if (adv < 0)
- return adv;
-
if (lpagb & LPA_1000MSFAIL) {
+ int adv = phy_read(phydev, MII_CTRL1000);
+
+ if (adv < 0)
+ return adv;
+
if (adv & CTL1000_ENABLE_MASTER)
phydev_err(phydev, "Master/Slave resolution failed, maybe conflicting manual settings?\n");
else
@@ -1907,57 +1895,6 @@ int genphy_soft_reset(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_soft_reset);
-int genphy_config_init(struct phy_device *phydev)
-{
- int val;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(features) = { 0, };
-
- linkmode_set_bit_array(phy_basic_ports_array,
- ARRAY_SIZE(phy_basic_ports_array),
- features);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, features);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, features);
-
- /* Do we support autonegotiation? */
- val = phy_read(phydev, MII_BMSR);
- if (val < 0)
- return val;
-
- if (val & BMSR_ANEGCAPABLE)
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, features);
-
- if (val & BMSR_100FULL)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, features);
- if (val & BMSR_100HALF)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, features);
- if (val & BMSR_10FULL)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, features);
- if (val & BMSR_10HALF)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, features);
-
- if (val & BMSR_ESTATEN) {
- val = phy_read(phydev, MII_ESTATUS);
- if (val < 0)
- return val;
-
- if (val & ESTATUS_1000_TFULL)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- features);
- if (val & ESTATUS_1000_THALF)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- features);
- if (val & ESTATUS_1000_XFULL)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
- features);
- }
-
- linkmode_and(phydev->supported, phydev->supported, features);
- linkmode_and(phydev->advertising, phydev->advertising, features);
-
- return 0;
-}
-EXPORT_SYMBOL(genphy_config_init);
-
/**
* genphy_read_abilities - read PHY abilities from Clause 22 registers
* @phydev: target phy_device struct
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index a669945eb829..677c45985338 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -39,6 +39,16 @@
#define RTL8366RB_POWER_SAVE 0x15
#define RTL8366RB_POWER_SAVE_ON BIT(12)
+#define RTL_SUPPORTS_5000FULL BIT(14)
+#define RTL_SUPPORTS_2500FULL BIT(13)
+#define RTL_SUPPORTS_10000FULL BIT(0)
+#define RTL_ADV_2500FULL BIT(7)
+#define RTL_LPADV_10000FULL BIT(11)
+#define RTL_LPADV_5000FULL BIT(6)
+#define RTL_LPADV_2500FULL BIT(5)
+
+#define RTL_GENERIC_PHYID 0x001cc800
+
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
MODULE_LICENSE("GPL");
@@ -256,6 +266,166 @@ static int rtl8366rb_config_init(struct phy_device *phydev)
return ret;
}
+static int rtlgen_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
+{
+ int ret;
+
+ if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE) {
+ rtl821x_write_page(phydev, 0xa5c);
+ ret = __phy_read(phydev, 0x12);
+ rtl821x_write_page(phydev, 0);
+ } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
+ rtl821x_write_page(phydev, 0xa5d);
+ ret = __phy_read(phydev, 0x10);
+ rtl821x_write_page(phydev, 0);
+ } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE) {
+ rtl821x_write_page(phydev, 0xa5d);
+ ret = __phy_read(phydev, 0x11);
+ rtl821x_write_page(phydev, 0);
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int rtlgen_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
+ u16 val)
+{
+ int ret;
+
+ if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
+ rtl821x_write_page(phydev, 0xa5d);
+ ret = __phy_write(phydev, 0x10, val);
+ rtl821x_write_page(phydev, 0);
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int rtl8125_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
+{
+ int ret = rtlgen_read_mmd(phydev, devnum, regnum);
+
+ if (ret != -EOPNOTSUPP)
+ return ret;
+
+ if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE2) {
+ rtl821x_write_page(phydev, 0xa6e);
+ ret = __phy_read(phydev, 0x16);
+ rtl821x_write_page(phydev, 0);
+ } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) {
+ rtl821x_write_page(phydev, 0xa6d);
+ ret = __phy_read(phydev, 0x12);
+ rtl821x_write_page(phydev, 0);
+ } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE2) {
+ rtl821x_write_page(phydev, 0xa6d);
+ ret = __phy_read(phydev, 0x10);
+ rtl821x_write_page(phydev, 0);
+ }
+
+ return ret;
+}
+
+static int rtl8125_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
+ u16 val)
+{
+ int ret = rtlgen_write_mmd(phydev, devnum, regnum, val);
+
+ if (ret != -EOPNOTSUPP)
+ return ret;
+
+ if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) {
+ rtl821x_write_page(phydev, 0xa6d);
+ ret = __phy_write(phydev, 0x12, val);
+ rtl821x_write_page(phydev, 0);
+ }
+
+ return ret;
+}
+
+static int rtl8125_get_features(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_paged(phydev, 0xa61, 0x13);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->supported, val & RTL_SUPPORTS_2500FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->supported, val & RTL_SUPPORTS_5000FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->supported, val & RTL_SUPPORTS_10000FULL);
+
+ return genphy_read_abilities(phydev);
+}
+
+static int rtl8125_config_aneg(struct phy_device *phydev)
+{
+ int ret = 0;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ u16 adv2500 = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->advertising))
+ adv2500 = RTL_ADV_2500FULL;
+
+ ret = phy_modify_paged_changed(phydev, 0xa5d, 0x12,
+ RTL_ADV_2500FULL, adv2500);
+ if (ret < 0)
+ return ret;
+ }
+
+ return __genphy_config_aneg(phydev, ret);
+}
+
+static int rtl8125_read_status(struct phy_device *phydev)
+{
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ int lpadv = phy_read_paged(phydev, 0xa5d, 0x13);
+
+ if (lpadv < 0)
+ return lpadv;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->lp_advertising, lpadv & RTL_LPADV_10000FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->lp_advertising, lpadv & RTL_LPADV_5000FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->lp_advertising, lpadv & RTL_LPADV_2500FULL);
+ }
+
+ return genphy_read_status(phydev);
+}
+
+static bool rtlgen_supports_2_5gbps(struct phy_device *phydev)
+{
+ int val;
+
+ phy_write(phydev, RTL821x_PAGE_SELECT, 0xa61);
+ val = phy_read(phydev, 0x13);
+ phy_write(phydev, RTL821x_PAGE_SELECT, 0);
+
+ return val >= 0 && val & RTL_SUPPORTS_2500FULL;
+}
+
+static int rtlgen_match_phy_device(struct phy_device *phydev)
+{
+ return phydev->phy_id == RTL_GENERIC_PHYID &&
+ !rtlgen_supports_2_5gbps(phydev);
+}
+
+static int rtl8125_match_phy_device(struct phy_device *phydev)
+{
+ return phydev->phy_id == RTL_GENERIC_PHYID &&
+ rtlgen_supports_2_5gbps(phydev);
+}
+
static struct phy_driver realtek_drvs[] = {
{
PHY_ID_MATCH_EXACT(0x00008201),
@@ -326,12 +496,26 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- PHY_ID_MATCH_EXACT(0x001cc800),
- .name = "Generic Realtek PHY",
+ .name = "Generic FE-GE Realtek PHY",
+ .match_phy_device = rtlgen_match_phy_device,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ .read_mmd = rtlgen_read_mmd,
+ .write_mmd = rtlgen_write_mmd,
+ }, {
+ .name = "RTL8125 2.5Gbps internal",
+ .match_phy_device = rtl8125_match_phy_device,
+ .get_features = rtl8125_get_features,
+ .config_aneg = rtl8125_config_aneg,
+ .read_status = rtl8125_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
+ .read_mmd = rtl8125_read_mmd,
+ .write_mmd = rtl8125_write_mmd,
}, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index e36c04c26866..272d5773573e 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -429,6 +429,7 @@ static umode_t sfp_hwmon_is_visible(const void *data,
return 0;
/* fall through */
case hwmon_temp_input:
+ case hwmon_temp_label:
return 0444;
default:
return 0;
@@ -447,6 +448,7 @@ static umode_t sfp_hwmon_is_visible(const void *data,
return 0;
/* fall through */
case hwmon_in_input:
+ case hwmon_in_label:
return 0444;
default:
return 0;
@@ -465,6 +467,7 @@ static umode_t sfp_hwmon_is_visible(const void *data,
return 0;
/* fall through */
case hwmon_curr_input:
+ case hwmon_curr_label:
return 0444;
default:
return 0;
@@ -492,6 +495,7 @@ static umode_t sfp_hwmon_is_visible(const void *data,
return 0;
/* fall through */
case hwmon_power_input:
+ case hwmon_power_label:
return 0444;
default:
return 0;
@@ -987,9 +991,63 @@ static int sfp_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
}
}
+static const char *const sfp_hwmon_power_labels[] = {
+ "TX_power",
+ "RX_power",
+};
+
+static int sfp_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_label:
+ *str = "bias";
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_label:
+ *str = "temperature";
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_label:
+ *str = "VCC";
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_label:
+ *str = sfp_hwmon_power_labels[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static const struct hwmon_ops sfp_hwmon_ops = {
.is_visible = sfp_hwmon_is_visible,
.read = sfp_hwmon_read,
+ .read_string = sfp_hwmon_read_string,
};
static u32 sfp_hwmon_chip_config[] = {
@@ -1007,7 +1065,8 @@ static u32 sfp_hwmon_temp_config[] = {
HWMON_T_MAX | HWMON_T_MIN |
HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
HWMON_T_CRIT | HWMON_T_LCRIT |
- HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM,
+ HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM |
+ HWMON_T_LABEL,
0,
};
@@ -1021,7 +1080,8 @@ static u32 sfp_hwmon_vcc_config[] = {
HWMON_I_MAX | HWMON_I_MIN |
HWMON_I_MAX_ALARM | HWMON_I_MIN_ALARM |
HWMON_I_CRIT | HWMON_I_LCRIT |
- HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM,
+ HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM |
+ HWMON_I_LABEL,
0,
};
@@ -1035,7 +1095,8 @@ static u32 sfp_hwmon_bias_config[] = {
HWMON_C_MAX | HWMON_C_MIN |
HWMON_C_MAX_ALARM | HWMON_C_MIN_ALARM |
HWMON_C_CRIT | HWMON_C_LCRIT |
- HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM,
+ HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM |
+ HWMON_C_LABEL,
0,
};
@@ -1050,13 +1111,15 @@ static u32 sfp_hwmon_power_config[] = {
HWMON_P_MAX | HWMON_P_MIN |
HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
HWMON_P_CRIT | HWMON_P_LCRIT |
- HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM,
+ HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
+ HWMON_P_LABEL,
/* Receive power */
HWMON_P_INPUT |
HWMON_P_MAX | HWMON_P_MIN |
HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
HWMON_P_CRIT | HWMON_P_LCRIT |
- HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM,
+ HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
+ HWMON_P_LABEL,
0,
};
diff --git a/drivers/net/phy/swphy.c b/drivers/net/phy/swphy.c
index dad22481d9c1..53c214a22b95 100644
--- a/drivers/net/phy/swphy.c
+++ b/drivers/net/phy/swphy.c
@@ -22,6 +22,7 @@ struct swmii_regs {
u16 bmsr;
u16 lpa;
u16 lpagb;
+ u16 estat;
};
enum {
@@ -48,6 +49,7 @@ static const struct swmii_regs speed[] = {
[SWMII_SPEED_1000] = {
.bmsr = BMSR_ESTATEN,
.lpagb = LPA_1000FULL | LPA_1000HALF,
+ .estat = ESTATUS_1000_TFULL | ESTATUS_1000_THALF,
},
};
@@ -56,11 +58,13 @@ static const struct swmii_regs duplex[] = {
.bmsr = BMSR_ESTATEN | BMSR_100HALF,
.lpa = LPA_10HALF | LPA_100HALF,
.lpagb = LPA_1000HALF,
+ .estat = ESTATUS_1000_THALF,
},
[SWMII_DUPLEX_FULL] = {
.bmsr = BMSR_ESTATEN | BMSR_100FULL,
.lpa = LPA_10FULL | LPA_100FULL,
.lpagb = LPA_1000FULL,
+ .estat = ESTATUS_1000_TFULL,
},
};
@@ -112,6 +116,7 @@ int swphy_read_reg(int reg, const struct fixed_phy_status *state)
{
int speed_index, duplex_index;
u16 bmsr = BMSR_ANEGCAPABLE;
+ u16 estat = 0;
u16 lpagb = 0;
u16 lpa = 0;
@@ -125,6 +130,7 @@ int swphy_read_reg(int reg, const struct fixed_phy_status *state)
duplex_index = state->duplex ? SWMII_DUPLEX_FULL : SWMII_DUPLEX_HALF;
bmsr |= speed[speed_index].bmsr & duplex[duplex_index].bmsr;
+ estat |= speed[speed_index].estat & duplex[duplex_index].estat;
if (state->link) {
bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
@@ -151,6 +157,8 @@ int swphy_read_reg(int reg, const struct fixed_phy_status *state)
return lpa;
case MII_STAT1000:
return lpagb;
+ case MII_ESTATUS:
+ return estat;
/*
* We do not support emulating Clause 45 over Clause 22 register
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 43691b1acfd9..bb680352708a 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -197,7 +197,7 @@ static int vsc738x_config_init(struct phy_device *phydev)
vsc73xx_config_init(phydev);
- return genphy_config_init(phydev);
+ return 0;
}
static int vsc739x_config_init(struct phy_device *phydev)
@@ -229,7 +229,7 @@ static int vsc739x_config_init(struct phy_device *phydev)
vsc73xx_config_init(phydev);
- return genphy_config_init(phydev);
+ return 0;
}
static int vsc73xx_config_aneg(struct phy_device *phydev)
@@ -267,7 +267,7 @@ static int vsc8601_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
- return genphy_config_init(phydev);
+ return 0;
}
static int vsc824x_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index ea90db3c7705..58a69f830d29 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -91,8 +91,8 @@ static unsigned short pull16(unsigned char **cpp);
struct slcompress *
slhc_init(int rslots, int tslots)
{
- register short i;
- register struct cstate *ts;
+ short i;
+ struct cstate *ts;
struct slcompress *comp;
if (rslots < 0 || rslots > 255 || tslots < 0 || tslots > 255)
@@ -206,7 +206,7 @@ pull16(unsigned char **cpp)
static long
decode(unsigned char **cpp)
{
- register int x;
+ int x;
x = *(*cpp)++;
if(x == 0){
@@ -227,14 +227,14 @@ int
slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
unsigned char *ocp, unsigned char **cpp, int compress_cid)
{
- register struct cstate *ocs = &(comp->tstate[comp->xmit_oldest]);
- register struct cstate *lcs = ocs;
- register struct cstate *cs = lcs->next;
- register unsigned long deltaS, deltaA;
- register short changes = 0;
+ struct cstate *ocs = &(comp->tstate[comp->xmit_oldest]);
+ struct cstate *lcs = ocs;
+ struct cstate *cs = lcs->next;
+ unsigned long deltaS, deltaA;
+ short changes = 0;
int hlen;
unsigned char new_seq[16];
- register unsigned char *cp = new_seq;
+ unsigned char *cp = new_seq;
struct iphdr *ip;
struct tcphdr *th, *oth;
__sum16 csum;
@@ -486,11 +486,11 @@ uncompressed:
int
slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
{
- register int changes;
+ int changes;
long x;
- register struct tcphdr *thp;
- register struct iphdr *ip;
- register struct cstate *cs;
+ struct tcphdr *thp;
+ struct iphdr *ip;
+ struct cstate *cs;
int len, hdrlen;
unsigned char *cp = icp;
@@ -543,7 +543,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
switch(changes & SPECIALS_MASK){
case SPECIAL_I: /* Echoed terminal traffic */
{
- register short i;
+ short i;
i = ntohs(ip->tot_len) - hdrlen;
thp->ack_seq = htonl( ntohl(thp->ack_seq) + i);
thp->seq = htonl( ntohl(thp->seq) + i);
@@ -637,7 +637,7 @@ bad:
int
slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
{
- register struct cstate *cs;
+ struct cstate *cs;
unsigned ihl;
unsigned char index;
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index fcf31335a8b6..dacb4f680fd4 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -1005,7 +1005,7 @@ static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
*len = skb_frag_size(frag);
- return kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
+ return kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
}
static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index b39ee714fb01..e39f41efda3e 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -221,6 +221,7 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
int tailroom = skb_tailroom(skb);
u32 packet_len;
u32 padbytes = 0xffff0000;
+ void *ptr;
padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
@@ -256,13 +257,11 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
}
packet_len = ((skb->len ^ 0x0000ffff) << 16) + skb->len;
- skb_push(skb, 4);
- cpu_to_le32s(&packet_len);
- skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
+ ptr = skb_push(skb, 4);
+ put_unaligned_le32(packet_len, ptr);
if (padlen) {
- cpu_to_le32s(&padbytes);
- memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
+ put_unaligned_le32(padbytes, skb_tail_pointer(skb));
skb_put(skb, sizeof(padbytes));
}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 0bc457ba8574..daa54486ab09 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1366,8 +1366,7 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 0;
skb_trim(skb, skb->len - 4);
- memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
- le32_to_cpus(&rx_hdr);
+ rx_hdr = get_unaligned_le32(skb_tail_pointer(skb));
pkt_cnt = (u16)rx_hdr;
hdr_off = (u16)(rx_hdr >> 16);
@@ -1422,6 +1421,7 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
int frame_size = dev->maxpacket;
int mss = skb_shinfo(skb)->gso_size;
int headroom;
+ void *ptr;
tx_hdr1 = skb->len;
tx_hdr2 = mss;
@@ -1436,13 +1436,9 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
return NULL;
}
- skb_push(skb, 4);
- cpu_to_le32s(&tx_hdr2);
- skb_copy_to_linear_data(skb, &tx_hdr2, 4);
-
- skb_push(skb, 4);
- cpu_to_le32s(&tx_hdr1);
- skb_copy_to_linear_data(skb, &tx_hdr1, 4);
+ ptr = skb_push(skb, 8);
+ put_unaligned_le32(tx_hdr1, ptr);
+ put_unaligned_le32(tx_hdr2, ptr + 4);
return skb;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f033fee225a1..58f5a219fb65 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1258,8 +1258,7 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
return;
}
- memcpy(&intdata, urb->transfer_buffer, 4);
- le32_to_cpus(&intdata);
+ intdata = get_unaligned_le32(urb->transfer_buffer);
if (intdata & INT_ENP_PHY_INT) {
netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
@@ -2730,6 +2729,7 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
struct sk_buff *skb, gfp_t flags)
{
u32 tx_cmd_a, tx_cmd_b;
+ void *ptr;
if (skb_cow_head(skb, TX_OVERHEAD)) {
dev_kfree_skb_any(skb);
@@ -2758,13 +2758,9 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
}
- skb_push(skb, 4);
- cpu_to_le32s(&tx_cmd_b);
- memcpy(skb->data, &tx_cmd_b, 4);
-
- skb_push(skb, 4);
- cpu_to_le32s(&tx_cmd_a);
- memcpy(skb->data, &tx_cmd_a, 4);
+ ptr = skb_push(skb, 8);
+ put_unaligned_le32(tx_cmd_a, ptr);
+ put_unaligned_le32(tx_cmd_b, ptr + 4);
return skb;
}
@@ -3105,16 +3101,13 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
struct sk_buff *skb2;
unsigned char *packet;
- memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
- le32_to_cpus(&rx_cmd_a);
+ rx_cmd_a = get_unaligned_le32(skb->data);
skb_pull(skb, sizeof(rx_cmd_a));
- memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
- le32_to_cpus(&rx_cmd_b);
+ rx_cmd_b = get_unaligned_le32(skb->data);
skb_pull(skb, sizeof(rx_cmd_b));
- memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
- le16_to_cpus(&rx_cmd_c);
+ rx_cmd_c = get_unaligned_le16(skb->data);
skb_pull(skb, sizeof(rx_cmd_c));
packet = skb->data;
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 6c2b3e368efe..217a2d8fa47b 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -87,9 +87,7 @@ static void vl600_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct vl600_state *s = dev->driver_priv;
- if (s->current_rx_buf)
- dev_kfree_skb(s->current_rx_buf);
-
+ dev_kfree_skb(s->current_rx_buf);
kfree(s);
return usbnet_cdc_unbind(dev, intf);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index eee0f5007ee3..c6fa0c17c13d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -22,10 +22,11 @@
#include <linux/mdio.h>
#include <linux/usb/cdc.h>
#include <linux/suspend.h>
+#include <linux/atomic.h>
#include <linux/acpi.h>
/* Information for net-next */
-#define NETNEXT_VERSION "09"
+#define NETNEXT_VERSION "10"
/* Information for net */
#define NET_VERSION "10"
@@ -583,6 +584,9 @@ enum rtl_register_content {
#define TX_ALIGN 4
#define RX_ALIGN 8
+#define RTL8152_RX_MAX_PENDING 4096
+#define RTL8152_RXFG_HEADSZ 256
+
#define INTR_LINK 0x0004
#define RTL8152_REQT_READ 0xc0
@@ -615,7 +619,7 @@ enum rtl8152_flags {
RTL8152_LINK_CHG,
SELECTIVE_SUSPEND,
PHY_RESET,
- SCHEDULE_NAPI,
+ SCHEDULE_TASKLET,
GREEN_ETHERNET,
DELL_TB_RX_AGG_BUG,
};
@@ -694,11 +698,11 @@ struct tx_desc {
struct r8152;
struct rx_agg {
- struct list_head list;
+ struct list_head list, info_list;
struct urb *urb;
struct r8152 *context;
+ struct page *page;
void *buffer;
- void *head;
};
struct tx_agg {
@@ -719,7 +723,7 @@ struct r8152 {
struct net_device *netdev;
struct urb *intr_urb;
struct tx_agg tx_info[RTL8152_MAX_TX];
- struct rx_agg rx_info[RTL8152_MAX_RX];
+ struct list_head rx_info, rx_used;
struct list_head rx_done, tx_free;
struct sk_buff_head tx_queue, rx_queue;
spinlock_t rx_lock, tx_lock;
@@ -729,6 +733,7 @@ struct r8152 {
#ifdef CONFIG_PM_SLEEP
struct notifier_block pm_notifier;
#endif
+ struct tasklet_struct tx_tl;
struct rtl_ops {
void (*init)(struct r8152 *);
@@ -744,13 +749,21 @@ struct r8152 {
void (*autosuspend_en)(struct r8152 *tp, bool enable);
} rtl_ops;
+ atomic_t rx_count;
+
+ bool eee_en;
int intr_interval;
u32 saved_wolopts;
u32 msg_enable;
u32 tx_qlen;
u32 coalesce;
+ u32 rx_buf_sz;
+ u32 rx_copybreak;
+ u32 rx_pending;
+
u16 ocp_base;
u16 speed;
+ u16 eee_adv;
u8 *intr_buff;
u8 version;
u8 duplex;
@@ -1394,7 +1407,7 @@ static void write_bulk_callback(struct urb *urb)
return;
if (!skb_queue_empty(&tp->tx_queue))
- napi_schedule(&tp->napi);
+ tasklet_schedule(&tp->tx_tl);
}
static void intr_callback(struct urb *urb)
@@ -1470,18 +1483,72 @@ static inline void *tx_agg_align(void *data)
return (void *)ALIGN((uintptr_t)data, TX_ALIGN);
}
+static void free_rx_agg(struct r8152 *tp, struct rx_agg *agg)
+{
+ list_del(&agg->info_list);
+
+ usb_free_urb(agg->urb);
+ put_page(agg->page);
+ kfree(agg);
+
+ atomic_dec(&tp->rx_count);
+}
+
+static struct rx_agg *alloc_rx_agg(struct r8152 *tp, gfp_t mflags)
+{
+ struct net_device *netdev = tp->netdev;
+ int node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1;
+ unsigned int order = get_order(tp->rx_buf_sz);
+ struct rx_agg *rx_agg;
+ unsigned long flags;
+
+ rx_agg = kmalloc_node(sizeof(*rx_agg), mflags, node);
+ if (!rx_agg)
+ return NULL;
+
+ rx_agg->page = alloc_pages(mflags | __GFP_COMP, order);
+ if (!rx_agg->page)
+ goto free_rx;
+
+ rx_agg->buffer = page_address(rx_agg->page);
+
+ rx_agg->urb = usb_alloc_urb(0, mflags);
+ if (!rx_agg->urb)
+ goto free_buf;
+
+ rx_agg->context = tp;
+
+ INIT_LIST_HEAD(&rx_agg->list);
+ INIT_LIST_HEAD(&rx_agg->info_list);
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&rx_agg->info_list, &tp->rx_info);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+ atomic_inc(&tp->rx_count);
+
+ return rx_agg;
+
+free_buf:
+ __free_pages(rx_agg->page, order);
+free_rx:
+ kfree(rx_agg);
+ return NULL;
+}
+
static void free_all_mem(struct r8152 *tp)
{
+ struct rx_agg *agg, *agg_next;
+ unsigned long flags;
int i;
- for (i = 0; i < RTL8152_MAX_RX; i++) {
- usb_free_urb(tp->rx_info[i].urb);
- tp->rx_info[i].urb = NULL;
+ spin_lock_irqsave(&tp->rx_lock, flags);
- kfree(tp->rx_info[i].buffer);
- tp->rx_info[i].buffer = NULL;
- tp->rx_info[i].head = NULL;
- }
+ list_for_each_entry_safe(agg, agg_next, &tp->rx_info, info_list)
+ free_rx_agg(tp, agg);
+
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+ WARN_ON(atomic_read(&tp->rx_count));
for (i = 0; i < RTL8152_MAX_TX; i++) {
usb_free_urb(tp->tx_info[i].urb);
@@ -1505,46 +1572,28 @@ static int alloc_all_mem(struct r8152 *tp)
struct usb_interface *intf = tp->intf;
struct usb_host_interface *alt = intf->cur_altsetting;
struct usb_host_endpoint *ep_intr = alt->endpoint + 2;
- struct urb *urb;
int node, i;
- u8 *buf;
node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1;
spin_lock_init(&tp->rx_lock);
spin_lock_init(&tp->tx_lock);
+ INIT_LIST_HEAD(&tp->rx_info);
INIT_LIST_HEAD(&tp->tx_free);
INIT_LIST_HEAD(&tp->rx_done);
skb_queue_head_init(&tp->tx_queue);
skb_queue_head_init(&tp->rx_queue);
+ atomic_set(&tp->rx_count, 0);
for (i = 0; i < RTL8152_MAX_RX; i++) {
- buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node);
- if (!buf)
+ if (!alloc_rx_agg(tp, GFP_KERNEL))
goto err1;
-
- if (buf != rx_agg_align(buf)) {
- kfree(buf);
- buf = kmalloc_node(agg_buf_sz + RX_ALIGN, GFP_KERNEL,
- node);
- if (!buf)
- goto err1;
- }
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- kfree(buf);
- goto err1;
- }
-
- INIT_LIST_HEAD(&tp->rx_info[i].list);
- tp->rx_info[i].context = tp;
- tp->rx_info[i].urb = urb;
- tp->rx_info[i].buffer = buf;
- tp->rx_info[i].head = rx_agg_align(buf);
}
for (i = 0; i < RTL8152_MAX_TX; i++) {
+ struct urb *urb;
+ u8 *buf;
+
buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node);
if (!buf)
goto err1;
@@ -1910,6 +1959,46 @@ return_result:
return checksum;
}
+static inline bool rx_count_exceed(struct r8152 *tp)
+{
+ return atomic_read(&tp->rx_count) > RTL8152_MAX_RX;
+}
+
+static inline int agg_offset(struct rx_agg *agg, void *addr)
+{
+ return (int)(addr - agg->buffer);
+}
+
+static struct rx_agg *rtl_get_free_rx(struct r8152 *tp, gfp_t mflags)
+{
+ struct rx_agg *agg, *agg_next, *agg_free = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->rx_lock, flags);
+
+ list_for_each_entry_safe(agg, agg_next, &tp->rx_used, list) {
+ if (page_count(agg->page) == 1) {
+ if (!agg_free) {
+ list_del_init(&agg->list);
+ agg_free = agg;
+ continue;
+ }
+ if (rx_count_exceed(tp)) {
+ list_del_init(&agg->list);
+ free_rx_agg(tp, agg);
+ }
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+ if (!agg_free && atomic_read(&tp->rx_count) < tp->rx_pending)
+ agg_free = alloc_rx_agg(tp, mflags);
+
+ return agg_free;
+}
+
static int rx_bottom(struct r8152 *tp, int budget)
{
unsigned long flags;
@@ -1945,7 +2034,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
list_for_each_safe(cursor, next, &rx_queue) {
struct rx_desc *rx_desc;
- struct rx_agg *agg;
+ struct rx_agg *agg, *agg_free;
int len_used = 0;
struct urb *urb;
u8 *rx_data;
@@ -1957,14 +2046,16 @@ static int rx_bottom(struct r8152 *tp, int budget)
if (urb->actual_length < ETH_ZLEN)
goto submit;
- rx_desc = agg->head;
- rx_data = agg->head;
+ agg_free = rtl_get_free_rx(tp, GFP_ATOMIC);
+
+ rx_desc = agg->buffer;
+ rx_data = agg->buffer;
len_used += sizeof(struct rx_desc);
while (urb->actual_length > len_used) {
struct net_device *netdev = tp->netdev;
struct net_device_stats *stats = &netdev->stats;
- unsigned int pkt_len;
+ unsigned int pkt_len, rx_frag_head_sz;
struct sk_buff *skb;
/* limite the skb numbers for rx_queue */
@@ -1982,22 +2073,37 @@ static int rx_bottom(struct r8152 *tp, int budget)
pkt_len -= ETH_FCS_LEN;
rx_data += sizeof(struct rx_desc);
- skb = napi_alloc_skb(napi, pkt_len);
+ if (!agg_free || tp->rx_copybreak > pkt_len)
+ rx_frag_head_sz = pkt_len;
+ else
+ rx_frag_head_sz = tp->rx_copybreak;
+
+ skb = napi_alloc_skb(napi, rx_frag_head_sz);
if (!skb) {
stats->rx_dropped++;
goto find_next_rx;
}
skb->ip_summed = r8152_rx_csum(tp, rx_desc);
- memcpy(skb->data, rx_data, pkt_len);
- skb_put(skb, pkt_len);
+ memcpy(skb->data, rx_data, rx_frag_head_sz);
+ skb_put(skb, rx_frag_head_sz);
+ pkt_len -= rx_frag_head_sz;
+ rx_data += rx_frag_head_sz;
+ if (pkt_len) {
+ skb_add_rx_frag(skb, 0, agg->page,
+ agg_offset(agg, rx_data),
+ pkt_len,
+ SKB_DATA_ALIGN(pkt_len));
+ get_page(agg->page);
+ }
+
skb->protocol = eth_type_trans(skb, netdev);
rtl_rx_vlan_tag(rx_desc, skb);
if (work_done < budget) {
- napi_gro_receive(napi, skb);
work_done++;
stats->rx_packets++;
- stats->rx_bytes += pkt_len;
+ stats->rx_bytes += skb->len;
+ napi_gro_receive(napi, skb);
} else {
__skb_queue_tail(&tp->rx_queue, skb);
}
@@ -2005,10 +2111,24 @@ static int rx_bottom(struct r8152 *tp, int budget)
find_next_rx:
rx_data = rx_agg_align(rx_data + pkt_len + ETH_FCS_LEN);
rx_desc = (struct rx_desc *)rx_data;
- len_used = (int)(rx_data - (u8 *)agg->head);
+ len_used = agg_offset(agg, rx_data);
len_used += sizeof(struct rx_desc);
}
+ WARN_ON(!agg_free && page_count(agg->page) > 1);
+
+ if (agg_free) {
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ if (page_count(agg->page) == 1) {
+ list_add(&agg_free->list, &tp->rx_used);
+ } else {
+ list_add_tail(&agg->list, &tp->rx_used);
+ agg = agg_free;
+ urb = agg->urb;
+ }
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ }
+
submit:
if (!ret) {
ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
@@ -2065,8 +2185,12 @@ static void tx_bottom(struct r8152 *tp)
} while (res == 0);
}
-static void bottom_half(struct r8152 *tp)
+static void bottom_half(unsigned long data)
{
+ struct r8152 *tp;
+
+ tp = (struct r8152 *)data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
@@ -2078,7 +2202,7 @@ static void bottom_half(struct r8152 *tp)
if (!netif_carrier_ok(tp->netdev))
return;
- clear_bit(SCHEDULE_NAPI, &tp->flags);
+ clear_bit(SCHEDULE_TASKLET, &tp->flags);
tx_bottom(tp);
}
@@ -2089,16 +2213,12 @@ static int r8152_poll(struct napi_struct *napi, int budget)
int work_done;
work_done = rx_bottom(tp, budget);
- bottom_half(tp);
if (work_done < budget) {
if (!napi_complete_done(napi, work_done))
goto out;
if (!list_empty(&tp->rx_done))
napi_schedule(napi);
- else if (!skb_queue_empty(&tp->tx_queue) &&
- !list_empty(&tp->tx_free))
- napi_schedule(napi);
}
out:
@@ -2116,7 +2236,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
return 0;
usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
- agg->head, agg_buf_sz,
+ agg->buffer, tp->rx_buf_sz,
(usb_complete_t)read_bulk_callback, agg);
ret = usb_submit_urb(agg->urb, mem_flags);
@@ -2252,11 +2372,11 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
if (!list_empty(&tp->tx_free)) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- set_bit(SCHEDULE_NAPI, &tp->flags);
+ set_bit(SCHEDULE_TASKLET, &tp->flags);
schedule_delayed_work(&tp->schedule, 0);
} else {
usb_mark_last_busy(tp->udev);
- napi_schedule(&tp->napi);
+ tasklet_schedule(&tp->tx_tl);
}
} else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) {
netif_stop_queue(netdev);
@@ -2333,44 +2453,80 @@ static void rxdy_gated_en(struct r8152 *tp, bool enable)
static int rtl_start_rx(struct r8152 *tp)
{
- int i, ret = 0;
+ struct rx_agg *agg, *agg_next;
+ struct list_head tmp_list;
+ unsigned long flags;
+ int ret = 0, i = 0;
- INIT_LIST_HEAD(&tp->rx_done);
- for (i = 0; i < RTL8152_MAX_RX; i++) {
- INIT_LIST_HEAD(&tp->rx_info[i].list);
- ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
- if (ret)
- break;
- }
+ INIT_LIST_HEAD(&tmp_list);
- if (ret && ++i < RTL8152_MAX_RX) {
- struct list_head rx_queue;
- unsigned long flags;
+ spin_lock_irqsave(&tp->rx_lock, flags);
- INIT_LIST_HEAD(&rx_queue);
+ INIT_LIST_HEAD(&tp->rx_done);
+ INIT_LIST_HEAD(&tp->rx_used);
- do {
- struct rx_agg *agg = &tp->rx_info[i++];
- struct urb *urb = agg->urb;
+ list_splice_init(&tp->rx_info, &tmp_list);
- urb->actual_length = 0;
- list_add_tail(&agg->list, &rx_queue);
- } while (i < RTL8152_MAX_RX);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
- spin_lock_irqsave(&tp->rx_lock, flags);
- list_splice_tail(&rx_queue, &tp->rx_done);
- spin_unlock_irqrestore(&tp->rx_lock, flags);
+ list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) {
+ INIT_LIST_HEAD(&agg->list);
+
+ /* Only RTL8152_MAX_RX rx_agg need to be submitted. */
+ if (++i > RTL8152_MAX_RX) {
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&agg->list, &tp->rx_used);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ } else if (unlikely(ret < 0)) {
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ } else {
+ ret = r8152_submit_rx(tp, agg, GFP_KERNEL);
+ }
}
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ WARN_ON(!list_empty(&tp->rx_info));
+ list_splice(&tmp_list, &tp->rx_info);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
return ret;
}
static int rtl_stop_rx(struct r8152 *tp)
{
- int i;
+ struct rx_agg *agg, *agg_next;
+ struct list_head tmp_list;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&tmp_list);
+
+ /* The usb_kill_urb() couldn't be used in atomic.
+ * Therefore, move the list of rx_info to a tmp one.
+ * Then, list_for_each_entry_safe could be used without
+ * spin lock.
+ */
+
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_splice_init(&tp->rx_info, &tmp_list);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+ list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) {
+ /* At least RTL8152_MAX_RX rx_agg have the page_count being
+ * equal to 1, so the other ones could be freed safely.
+ */
+ if (page_count(agg->page) > 1)
+ free_rx_agg(tp, agg);
+ else
+ usb_kill_urb(agg->urb);
+ }
- for (i = 0; i < RTL8152_MAX_RX; i++)
- usb_kill_urb(tp->rx_info[i].urb);
+ /* Move back the list of temp to the rx_info */
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ WARN_ON(!list_empty(&tp->rx_info));
+ list_splice(&tmp_list, &tp->rx_info);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
while (!skb_queue_empty(&tp->rx_queue))
dev_kfree_skb(__skb_dequeue(&tp->rx_queue));
@@ -2450,7 +2606,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
static void r8153_set_rx_early_size(struct r8152 *tp)
{
- u32 ocp_data = agg_buf_sz - rx_reserved_size(tp->netdev->mtu);
+ u32 ocp_data = tp->rx_buf_sz - rx_reserved_size(tp->netdev->mtu);
switch (tp->version) {
case RTL_VER_03:
@@ -3049,10 +3205,76 @@ static void r8152_eee_en(struct r8152 *tp, bool enable)
ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
}
-static void r8152b_enable_eee(struct r8152 *tp)
+static void r8153_eee_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+ u16 config;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+ config = ocp_reg_read(tp, OCP_EEE_CFG);
+
+ if (enable) {
+ ocp_data |= EEE_RX_EN | EEE_TX_EN;
+ config |= EEE10_EN;
+ } else {
+ ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
+ config &= ~EEE10_EN;
+ }
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+ ocp_reg_write(tp, OCP_EEE_CFG, config);
+}
+
+static void r8153b_eee_en(struct r8152 *tp, bool enable)
{
- r8152_eee_en(tp, true);
- r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
+ r8153_eee_en(tp, enable);
+
+ if (enable)
+ r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_EEE, 0);
+ else
+ r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_EEE);
+}
+
+static void rtl_eee_enable(struct r8152 *tp, bool enable)
+{
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ case RTL_VER_07:
+ if (enable) {
+ r8152_eee_en(tp, true);
+ r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
+ tp->eee_adv);
+ } else {
+ r8152_eee_en(tp, false);
+ r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0);
+ }
+ break;
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ if (enable) {
+ r8153_eee_en(tp, true);
+ ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv);
+ } else {
+ r8153_eee_en(tp, false);
+ ocp_reg_write(tp, OCP_EEE_ADV, 0);
+ }
+ break;
+ case RTL_VER_08:
+ case RTL_VER_09:
+ if (enable) {
+ r8153b_eee_en(tp, true);
+ ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv);
+ } else {
+ r8153b_eee_en(tp, false);
+ ocp_reg_write(tp, OCP_EEE_ADV, 0);
+ }
+ break;
+ default:
+ break;
+ }
}
static void r8152b_enable_fc(struct r8152 *tp)
@@ -3073,7 +3295,7 @@ static void rtl8152_disable(struct r8152 *tp)
static void r8152b_hw_phy_cfg(struct r8152 *tp)
{
- r8152b_enable_eee(tp);
+ rtl_eee_enable(tp, tp->eee_en);
r8152_aldps_en(tp, true);
r8152b_enable_fc(tp);
@@ -3267,36 +3489,6 @@ static void r8153b_aldps_en(struct r8152 *tp, bool enable)
r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_ALDPS);
}
-static void r8153_eee_en(struct r8152 *tp, bool enable)
-{
- u32 ocp_data;
- u16 config;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
- config = ocp_reg_read(tp, OCP_EEE_CFG);
-
- if (enable) {
- ocp_data |= EEE_RX_EN | EEE_TX_EN;
- config |= EEE10_EN;
- } else {
- ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
- config &= ~EEE10_EN;
- }
-
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
- ocp_reg_write(tp, OCP_EEE_CFG, config);
-}
-
-static void r8153b_eee_en(struct r8152 *tp, bool enable)
-{
- r8153_eee_en(tp, enable);
-
- if (enable)
- r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_EEE, 0);
- else
- r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_EEE);
-}
-
static void r8153b_enable_fc(struct r8152 *tp)
{
r8152b_enable_fc(tp);
@@ -3312,8 +3504,7 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
r8153_aldps_en(tp, false);
/* disable EEE before updating the PHY parameters */
- r8153_eee_en(tp, false);
- ocp_reg_write(tp, OCP_EEE_ADV, 0);
+ rtl_eee_enable(tp, false);
if (tp->version == RTL_VER_03) {
data = ocp_reg_read(tp, OCP_EEE_CFG);
@@ -3344,8 +3535,8 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
sram_write(tp, SRAM_10M_AMP1, 0x00af);
sram_write(tp, SRAM_10M_AMP2, 0x0208);
- r8153_eee_en(tp, true);
- ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
+ if (tp->eee_en)
+ rtl_eee_enable(tp, true);
r8153_aldps_en(tp, true);
r8152b_enable_fc(tp);
@@ -3385,8 +3576,7 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
r8153b_aldps_en(tp, false);
/* disable EEE before updating the PHY parameters */
- r8153b_eee_en(tp, false);
- ocp_reg_write(tp, OCP_EEE_ADV, 0);
+ rtl_eee_enable(tp, false);
r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags));
@@ -3448,8 +3638,8 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
r8153b_ups_flags_w1w0(tp, ups_flags, 0);
- r8153b_eee_en(tp, true);
- ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
+ if (tp->eee_en)
+ rtl_eee_enable(tp, true);
r8153b_aldps_en(tp, true);
r8153b_enable_fc(tp);
@@ -3870,9 +4060,11 @@ static void set_carrier(struct r8152 *tp)
} else {
if (netif_carrier_ok(netdev)) {
netif_carrier_off(netdev);
+ tasklet_disable(&tp->tx_tl);
napi_disable(napi);
tp->rtl_ops.disable(tp);
napi_enable(napi);
+ tasklet_enable(&tp->tx_tl);
netif_info(tp, link, netdev, "carrier off\n");
}
}
@@ -3905,10 +4097,10 @@ static void rtl_work_func_t(struct work_struct *work)
if (test_and_clear_bit(RTL8152_SET_RX_MODE, &tp->flags))
_rtl8152_set_rx_mode(tp->netdev);
- /* don't schedule napi before linking */
- if (test_and_clear_bit(SCHEDULE_NAPI, &tp->flags) &&
+ /* don't schedule tasket before linking */
+ if (test_and_clear_bit(SCHEDULE_TASKLET, &tp->flags) &&
netif_carrier_ok(tp->netdev))
- napi_schedule(&tp->napi);
+ tasklet_schedule(&tp->tx_tl);
mutex_unlock(&tp->control);
@@ -3994,6 +4186,7 @@ static int rtl8152_open(struct net_device *netdev)
goto out_unlock;
}
napi_enable(&tp->napi);
+ tasklet_enable(&tp->tx_tl);
mutex_unlock(&tp->control);
@@ -4021,6 +4214,7 @@ static int rtl8152_close(struct net_device *netdev)
#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&tp->pm_notifier);
#endif
+ tasklet_disable(&tp->tx_tl);
if (!test_bit(RTL8152_UNPLUG, &tp->flags))
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
@@ -4290,6 +4484,7 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
return 0;
netif_stop_queue(netdev);
+ tasklet_disable(&tp->tx_tl);
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
@@ -4333,6 +4528,7 @@ static int rtl8152_post_reset(struct usb_interface *intf)
}
napi_enable(&tp->napi);
+ tasklet_enable(&tp->tx_tl);
netif_wake_queue(netdev);
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
@@ -4486,10 +4682,12 @@ static int rtl8152_system_suspend(struct r8152 *tp)
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
+ tasklet_disable(&tp->tx_tl);
napi_disable(napi);
cancel_delayed_work_sync(&tp->schedule);
tp->rtl_ops.down(tp);
napi_enable(napi);
+ tasklet_enable(&tp->tx_tl);
}
return 0;
@@ -4732,7 +4930,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
{
- u32 ocp_data, lp, adv, supported = 0;
+ u32 lp, adv, supported = 0;
u16 val;
val = r8152_mmd_read(tp, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
@@ -4744,13 +4942,10 @@ static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
lp = mmd_eee_adv_to_ethtool_adv_t(val);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
- ocp_data &= EEE_RX_EN | EEE_TX_EN;
-
- eee->eee_enabled = !!ocp_data;
+ eee->eee_enabled = tp->eee_en;
eee->eee_active = !!(supported & adv & lp);
eee->supported = supported;
- eee->advertised = adv;
+ eee->advertised = tp->eee_adv;
eee->lp_advertised = lp;
return 0;
@@ -4760,19 +4955,17 @@ static int r8152_set_eee(struct r8152 *tp, struct ethtool_eee *eee)
{
u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised);
- r8152_eee_en(tp, eee->eee_enabled);
+ tp->eee_en = eee->eee_enabled;
+ tp->eee_adv = val;
- if (!eee->eee_enabled)
- val = 0;
-
- r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
+ rtl_eee_enable(tp, tp->eee_en);
return 0;
}
static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
{
- u32 ocp_data, lp, adv, supported = 0;
+ u32 lp, adv, supported = 0;
u16 val;
val = ocp_reg_read(tp, OCP_EEE_ABLE);
@@ -4784,46 +4977,15 @@ static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
val = ocp_reg_read(tp, OCP_EEE_LPABLE);
lp = mmd_eee_adv_to_ethtool_adv_t(val);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
- ocp_data &= EEE_RX_EN | EEE_TX_EN;
-
- eee->eee_enabled = !!ocp_data;
+ eee->eee_enabled = tp->eee_en;
eee->eee_active = !!(supported & adv & lp);
eee->supported = supported;
- eee->advertised = adv;
+ eee->advertised = tp->eee_adv;
eee->lp_advertised = lp;
return 0;
}
-static int r8153_set_eee(struct r8152 *tp, struct ethtool_eee *eee)
-{
- u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised);
-
- r8153_eee_en(tp, eee->eee_enabled);
-
- if (!eee->eee_enabled)
- val = 0;
-
- ocp_reg_write(tp, OCP_EEE_ADV, val);
-
- return 0;
-}
-
-static int r8153b_set_eee(struct r8152 *tp, struct ethtool_eee *eee)
-{
- u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised);
-
- r8153b_eee_en(tp, eee->eee_enabled);
-
- if (!eee->eee_enabled)
- val = 0;
-
- ocp_reg_write(tp, OCP_EEE_ADV, val);
-
- return 0;
-}
-
static int
rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata)
{
@@ -4957,6 +5119,77 @@ static int rtl8152_set_coalesce(struct net_device *netdev,
return ret;
}
+static int rtl8152_get_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tunable, void *d)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+
+ switch (tunable->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)d = tp->rx_copybreak;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int rtl8152_set_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tunable,
+ const void *d)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+ u32 val;
+
+ switch (tunable->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ val = *(u32 *)d;
+ if (val < ETH_ZLEN) {
+ netif_err(tp, rx_err, netdev,
+ "Invalid rx copy break value\n");
+ return -EINVAL;
+ }
+
+ if (tp->rx_copybreak != val) {
+ napi_disable(&tp->napi);
+ tp->rx_copybreak = val;
+ napi_enable(&tp->napi);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtl8152_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+
+ ring->rx_max_pending = RTL8152_RX_MAX_PENDING;
+ ring->rx_pending = tp->rx_pending;
+}
+
+static int rtl8152_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+
+ if (ring->rx_pending < (RTL8152_MAX_RX * 2))
+ return -EINVAL;
+
+ if (tp->rx_pending != ring->rx_pending) {
+ napi_disable(&tp->napi);
+ tp->rx_pending = ring->rx_pending;
+ napi_enable(&tp->napi);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops ops = {
.get_drvinfo = rtl8152_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -4974,6 +5207,10 @@ static const struct ethtool_ops ops = {
.set_eee = rtl_ethtool_set_eee,
.get_link_ksettings = rtl8152_get_link_ksettings,
.set_link_ksettings = rtl8152_set_link_ksettings,
+ .get_tunable = rtl8152_get_tunable,
+ .set_tunable = rtl8152_set_tunable,
+ .get_ringparam = rtl8152_get_ringparam,
+ .set_ringparam = rtl8152_set_ringparam,
};
static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -5118,6 +5355,9 @@ static int rtl_ops_init(struct r8152 *tp)
ops->in_nway = rtl8152_in_nway;
ops->hw_phy_cfg = r8152b_hw_phy_cfg;
ops->autosuspend_en = rtl_runtime_suspend_enable;
+ tp->rx_buf_sz = 16 * 1024;
+ tp->eee_en = true;
+ tp->eee_adv = MDIO_EEE_100TX;
break;
case RTL_VER_03:
@@ -5131,10 +5371,13 @@ static int rtl_ops_init(struct r8152 *tp)
ops->down = rtl8153_down;
ops->unload = rtl8153_unload;
ops->eee_get = r8153_get_eee;
- ops->eee_set = r8153_set_eee;
+ ops->eee_set = r8152_set_eee;
ops->in_nway = rtl8153_in_nway;
ops->hw_phy_cfg = r8153_hw_phy_cfg;
ops->autosuspend_en = rtl8153_runtime_enable;
+ tp->rx_buf_sz = 32 * 1024;
+ tp->eee_en = true;
+ tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX;
break;
case RTL_VER_08:
@@ -5146,10 +5389,13 @@ static int rtl_ops_init(struct r8152 *tp)
ops->down = rtl8153b_down;
ops->unload = rtl8153b_unload;
ops->eee_get = r8153_get_eee;
- ops->eee_set = r8153b_set_eee;
+ ops->eee_set = r8152_set_eee;
ops->in_nway = rtl8153_in_nway;
ops->hw_phy_cfg = r8153b_hw_phy_cfg;
ops->autosuspend_en = rtl8153b_runtime_enable;
+ tp->rx_buf_sz = 32 * 1024;
+ tp->eee_en = true;
+ tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX;
break;
default:
@@ -5271,6 +5517,8 @@ static int rtl8152_probe(struct usb_interface *intf,
mutex_init(&tp->control);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t);
+ tasklet_init(&tp->tx_tl, bottom_half, (unsigned long)tp);
+ tasklet_disable(&tp->tx_tl);
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
@@ -5324,6 +5572,9 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100;
tp->duplex = DUPLEX_FULL;
+ tp->rx_copybreak = RTL8152_RXFG_HEADSZ;
+ tp->rx_pending = 10 * RTL8152_MAX_RX;
+
intf->needs_remote_wakeup = 1;
tp->rtl_ops.init(tp);
@@ -5354,6 +5605,7 @@ static int rtl8152_probe(struct usb_interface *intf,
out1:
netif_napi_del(&tp->napi);
+ tasklet_kill(&tp->tx_tl);
usb_set_intfdata(intf, NULL);
out:
free_netdev(netdev);
@@ -5370,6 +5622,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
netif_napi_del(&tp->napi);
unregister_netdev(tp->netdev);
+ tasklet_kill(&tp->tx_tl);
cancel_delayed_work_sync(&tp->hw_phy_work);
tp->rtl_ops.unload(tp);
free_netdev(tp->netdev);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 98f33e270af1..13e51ccf0214 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -586,8 +586,7 @@ static void free_skb_pool(rtl8150_t *dev)
int i;
for (i = 0; i < RX_SKB_POOL_SIZE; i++)
- if (dev->rx_skb_pool[i])
- dev_kfree_skb(dev->rx_skb_pool[i]);
+ dev_kfree_skb(dev->rx_skb_pool[i]);
}
static void rx_fixup(unsigned long data)
@@ -946,8 +945,7 @@ static void rtl8150_disconnect(struct usb_interface *intf)
unlink_all_urbs(dev);
free_all_urbs(dev);
free_skb_pool(dev);
- if (dev->rx_skb)
- dev_kfree_skb(dev->rx_skb);
+ dev_kfree_skb(dev->rx_skb);
kfree(dev->intr_buff);
free_netdev(dev->netdev);
}
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 1417a22962a1..9556d431885f 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -661,8 +661,7 @@ static void smsc75xx_status(struct usbnet *dev, struct urb *urb)
return;
}
- memcpy(&intdata, urb->transfer_buffer, 4);
- le32_to_cpus(&intdata);
+ intdata = get_unaligned_le32(urb->transfer_buffer);
netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata);
@@ -2181,12 +2180,10 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
struct sk_buff *ax_skb;
unsigned char *packet;
- memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
- le32_to_cpus(&rx_cmd_a);
+ rx_cmd_a = get_unaligned_le32(skb->data);
skb_pull(skb, 4);
- memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
- le32_to_cpus(&rx_cmd_b);
+ rx_cmd_b = get_unaligned_le32(skb->data);
skb_pull(skb, 4 + RXW_PADDING);
packet = skb->data;
@@ -2258,6 +2255,7 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
struct sk_buff *skb, gfp_t flags)
{
u32 tx_cmd_a, tx_cmd_b;
+ void *ptr;
if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) {
dev_kfree_skb_any(skb);
@@ -2278,13 +2276,9 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
tx_cmd_b = 0;
}
- skb_push(skb, 4);
- cpu_to_le32s(&tx_cmd_b);
- memcpy(skb->data, &tx_cmd_b, 4);
-
- skb_push(skb, 4);
- cpu_to_le32s(&tx_cmd_a);
- memcpy(skb->data, &tx_cmd_a, 4);
+ ptr = skb_push(skb, 8);
+ put_unaligned_le32(tx_cmd_a, ptr);
+ put_unaligned_le32(tx_cmd_b, ptr + 4);
return skb;
}
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 35f39f23d881..c5d4a0060124 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -115,6 +115,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
u32 padbytes = 0xffff0000;
u32 packet_len;
int padlen;
+ void *ptr;
padlen = ((skb->len + 4) % (dev->maxpacket - 1)) ? 0 : 4;
@@ -133,14 +134,12 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
return NULL;
}
- skb_push(skb, 4);
+ ptr = skb_push(skb, 4);
packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
- cpu_to_le32s(&packet_len);
- skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
+ put_unaligned_le32(packet_len, ptr);
if (padlen) {
- cpu_to_le32s(&padbytes);
- memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
+ put_unaligned_le32(padbytes, skb_tail_pointer(skb));
skb_put(skb, sizeof(padbytes));
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 72514c46b478..58952a79b05f 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1324,11 +1324,11 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
total_len += skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *f = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
total_len += skb_frag_size(f);
- sg_set_page(&urb->sg[i + s], f->page.p, f->size,
- f->page_offset);
+ sg_set_page(&urb->sg[i + s], skb_frag_page(f), skb_frag_size(f),
+ skb_frag_off(f));
}
urb->transfer_buffer_length = total_len;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 2a1918f25e47..216acf37ca7c 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -657,13 +657,12 @@ static void
vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
struct vmxnet3_rx_buf_info *rbi)
{
- struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
- skb_shinfo(skb)->nr_frags;
+ skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
__skb_frag_set_page(frag, rbi->page);
- frag->page_offset = 0;
+ skb_frag_off_set(frag, 0);
skb_frag_size_set(frag, rcd->len);
skb->data_len += rcd->len;
skb->truesize += PAGE_SIZE;
@@ -755,7 +754,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 buf_size;
buf_offset = 0;
@@ -956,7 +955,7 @@ static int txd_estimate(const struct sk_buff *skb)
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
}
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
index 6544ac9df047..73f5892ce6c1 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -30,15 +30,6 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_netdev_queue_stopped,
debugfs_netdev_queue_stopped_get,
NULL, "%llu\n");
-
-static
-struct dentry *debugfs_create_netdev_queue_stopped(
- const char *name, struct dentry *parent, struct i2400m *i2400m)
-{
- return debugfs_create_file(name, 0400, parent, i2400m,
- &fops_netdev_queue_stopped);
-}
-
/*
* We don't allow partial reads of this file, as then the reader would
* get weirdly confused data as it is updated.
@@ -167,15 +158,6 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_suspend,
NULL, debugfs_i2400m_suspend_set,
"%llu\n");
-static
-struct dentry *debugfs_create_i2400m_suspend(
- const char *name, struct dentry *parent, struct i2400m *i2400m)
-{
- return debugfs_create_file(name, 0200, parent, i2400m,
- &fops_i2400m_suspend);
-}
-
-
/*
* Reset the device
*
@@ -205,73 +187,25 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_reset,
NULL, debugfs_i2400m_reset_set,
"%llu\n");
-static
-struct dentry *debugfs_create_i2400m_reset(
- const char *name, struct dentry *parent, struct i2400m *i2400m)
+void i2400m_debugfs_add(struct i2400m *i2400m)
{
- return debugfs_create_file(name, 0200, parent, i2400m,
- &fops_i2400m_reset);
-}
-
-
-#define __debugfs_register(prefix, name, parent) \
-do { \
- result = d_level_register_debugfs(prefix, name, parent); \
- if (result < 0) \
- goto error; \
-} while (0)
-
-
-int i2400m_debugfs_add(struct i2400m *i2400m)
-{
- int result;
- struct device *dev = i2400m_dev(i2400m);
struct dentry *dentry = i2400m->wimax_dev.debugfs_dentry;
- struct dentry *fd;
dentry = debugfs_create_dir("i2400m", dentry);
- result = PTR_ERR(dentry);
- if (IS_ERR(dentry)) {
- if (result == -ENODEV)
- result = 0; /* No debugfs support */
- goto error;
- }
i2400m->debugfs_dentry = dentry;
- __debugfs_register("dl_", control, dentry);
- __debugfs_register("dl_", driver, dentry);
- __debugfs_register("dl_", debugfs, dentry);
- __debugfs_register("dl_", fw, dentry);
- __debugfs_register("dl_", netdev, dentry);
- __debugfs_register("dl_", rfkill, dentry);
- __debugfs_register("dl_", rx, dentry);
- __debugfs_register("dl_", tx, dentry);
-
- fd = debugfs_create_size_t("tx_in", 0400, dentry,
- &i2400m->tx_in);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "tx_in: %d\n", result);
- goto error;
- }
- fd = debugfs_create_size_t("tx_out", 0400, dentry,
- &i2400m->tx_out);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "tx_out: %d\n", result);
- goto error;
- }
+ d_level_register_debugfs("dl_", control, dentry);
+ d_level_register_debugfs("dl_", driver, dentry);
+ d_level_register_debugfs("dl_", debugfs, dentry);
+ d_level_register_debugfs("dl_", fw, dentry);
+ d_level_register_debugfs("dl_", netdev, dentry);
+ d_level_register_debugfs("dl_", rfkill, dentry);
+ d_level_register_debugfs("dl_", rx, dentry);
+ d_level_register_debugfs("dl_", tx, dentry);
- fd = debugfs_create_u32("state", 0600, dentry,
- &i2400m->state);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "state: %d\n", result);
- goto error;
- }
+ debugfs_create_size_t("tx_in", 0400, dentry, &i2400m->tx_in);
+ debugfs_create_size_t("tx_out", 0400, dentry, &i2400m->tx_out);
+ debugfs_create_u32("state", 0600, dentry, &i2400m->state);
/*
* Trace received messages from user space
@@ -295,60 +229,22 @@ int i2400m_debugfs_add(struct i2400m *i2400m)
* It is not really very atomic, but it is also not too
* critical.
*/
- fd = debugfs_create_u8("trace_msg_from_user", 0600, dentry,
- &i2400m->trace_msg_from_user);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "trace_msg_from_user: %d\n", result);
- goto error;
- }
+ debugfs_create_u8("trace_msg_from_user", 0600, dentry,
+ &i2400m->trace_msg_from_user);
- fd = debugfs_create_netdev_queue_stopped("netdev_queue_stopped",
- dentry, i2400m);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "netdev_queue_stopped: %d\n", result);
- goto error;
- }
+ debugfs_create_file("netdev_queue_stopped", 0400, dentry, i2400m,
+ &fops_netdev_queue_stopped);
- fd = debugfs_create_file("rx_stats", 0600, dentry, i2400m,
- &i2400m_rx_stats_fops);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "rx_stats: %d\n", result);
- goto error;
- }
+ debugfs_create_file("rx_stats", 0600, dentry, i2400m,
+ &i2400m_rx_stats_fops);
- fd = debugfs_create_file("tx_stats", 0600, dentry, i2400m,
- &i2400m_tx_stats_fops);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "tx_stats: %d\n", result);
- goto error;
- }
+ debugfs_create_file("tx_stats", 0600, dentry, i2400m,
+ &i2400m_tx_stats_fops);
- fd = debugfs_create_i2400m_suspend("suspend", dentry, i2400m);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry suspend: %d\n",
- result);
- goto error;
- }
+ debugfs_create_file("suspend", 0200, dentry, i2400m,
+ &fops_i2400m_suspend);
- fd = debugfs_create_i2400m_reset("reset", dentry, i2400m);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry reset: %d\n", result);
- goto error;
- }
-
- result = 0;
-error:
- return result;
+ debugfs_create_file("reset", 0200, dentry, i2400m, &fops_i2400m_reset);
}
void i2400m_debugfs_rm(struct i2400m *i2400m)
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 0a29222a1bf9..f66c0f8f6f4a 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -905,11 +905,7 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
goto error_sysfs_setup;
}
- result = i2400m_debugfs_add(i2400m);
- if (result < 0) {
- dev_err(dev, "cannot setup i2400m's debugfs: %d\n", result);
- goto error_debugfs_setup;
- }
+ i2400m_debugfs_add(i2400m);
result = i2400m_dev_start(i2400m, bm_flags);
if (result < 0)
@@ -919,7 +915,6 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
error_dev_start:
i2400m_debugfs_rm(i2400m);
-error_debugfs_setup:
sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
&i2400m_dev_attr_group);
error_sysfs_setup:
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 5a34e72bab9a..a3733a6d14f5 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -812,13 +812,10 @@ enum i2400m_pt;
int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
#ifdef CONFIG_DEBUG_FS
-int i2400m_debugfs_add(struct i2400m *);
+void i2400m_debugfs_add(struct i2400m *);
void i2400m_debugfs_rm(struct i2400m *);
#else
-static inline int i2400m_debugfs_add(struct i2400m *i2400m)
-{
- return 0;
-}
+static inline void i2400m_debugfs_add(struct i2400m *i2400m) {}
static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {}
#endif
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 2075e7b1fff6..6953f904232f 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -366,61 +366,25 @@ struct d_level D_LEVEL[] = {
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
-
-#define __debugfs_register(prefix, name, parent) \
-do { \
- result = d_level_register_debugfs(prefix, name, parent); \
- if (result < 0) \
- goto error; \
-} while (0)
-
-
static
-int i2400mu_debugfs_add(struct i2400mu *i2400mu)
+void i2400mu_debugfs_add(struct i2400mu *i2400mu)
{
- int result;
- struct device *dev = &i2400mu->usb_iface->dev;
struct dentry *dentry = i2400mu->i2400m.wimax_dev.debugfs_dentry;
- struct dentry *fd;
dentry = debugfs_create_dir("i2400m-usb", dentry);
- result = PTR_ERR(dentry);
- if (IS_ERR(dentry)) {
- if (result == -ENODEV)
- result = 0; /* No debugfs support */
- goto error;
- }
i2400mu->debugfs_dentry = dentry;
- __debugfs_register("dl_", usb, dentry);
- __debugfs_register("dl_", fw, dentry);
- __debugfs_register("dl_", notif, dentry);
- __debugfs_register("dl_", rx, dentry);
- __debugfs_register("dl_", tx, dentry);
- /* Don't touch these if you don't know what you are doing */
- fd = debugfs_create_u8("rx_size_auto_shrink", 0600, dentry,
- &i2400mu->rx_size_auto_shrink);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "rx_size_auto_shrink: %d\n", result);
- goto error;
- }
+ d_level_register_debugfs("dl_", usb, dentry);
+ d_level_register_debugfs("dl_", fw, dentry);
+ d_level_register_debugfs("dl_", notif, dentry);
+ d_level_register_debugfs("dl_", rx, dentry);
+ d_level_register_debugfs("dl_", tx, dentry);
- fd = debugfs_create_size_t("rx_size", 0600, dentry,
- &i2400mu->rx_size);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "rx_size: %d\n", result);
- goto error;
- }
-
- return 0;
+ /* Don't touch these if you don't know what you are doing */
+ debugfs_create_u8("rx_size_auto_shrink", 0600, dentry,
+ &i2400mu->rx_size_auto_shrink);
-error:
- debugfs_remove_recursive(i2400mu->debugfs_dentry);
- return result;
+ debugfs_create_size_t("rx_size", 0600, dentry, &i2400mu->rx_size);
}
@@ -534,15 +498,9 @@ int i2400mu_probe(struct usb_interface *iface,
dev_err(dev, "cannot setup device: %d\n", result);
goto error_setup;
}
- result = i2400mu_debugfs_add(i2400mu);
- if (result < 0) {
- dev_err(dev, "Can't register i2400mu's debugfs: %d\n", result);
- goto error_debugfs_add;
- }
+ i2400mu_debugfs_add(i2400mu);
return 0;
-error_debugfs_add:
- i2400m_release(i2400m);
error_setup:
usb_set_intfdata(iface, NULL);
usb_put_dev(i2400mu->usb_dev);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 0606416dc971..12dad659bf68 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -6970,7 +6970,8 @@ exit:
return ret;
}
-static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
+static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath10k *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index f23cb2f3d296..34121fbf32e3 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2392,7 +2392,8 @@ out:
return ret;
}
-static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw)
+static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 2fb4258941a5..2414f574bf69 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -351,7 +351,7 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) |
BIT_ULL(NL80211_STA_INFO_TX_FAILED);
- sinfo->txrate.flags = RATE_INFO_FLAGS_60G;
+ sinfo->txrate.flags = RATE_INFO_FLAGS_DMG;
sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
sinfo->rxrate.mcs = stats->last_mcs_rx;
sinfo->rx_bytes = stats->rx_bytes;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 74834131cf7c..fd3b2b3d1b5c 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1052,8 +1052,7 @@ static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
if (nr_frags) {
seq_printf(s, " nr_frags = %d\n", nr_frags);
for (i = 0; i < nr_frags; i++) {
- const struct skb_frag_struct *frag =
- &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
p = skb_frag_address_safe(frag);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index eae00aafaa88..8b01ef8269da 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1657,7 +1657,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
len);
} else {
frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
+ len = skb_frag_size(frag);
wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
}
@@ -1678,8 +1678,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
if (!headlen) {
pa = skb_frag_dma_map(dev, frag,
- frag->size - len, lenmss,
- DMA_TO_DEVICE);
+ skb_frag_size(frag) - len,
+ lenmss, DMA_TO_DEVICE);
vring->ctx[i].mapped_as = wil_mapped_as_page;
} else {
pa = dma_map_single(dev,
@@ -1900,8 +1900,7 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
/* middle segments */
for (; f < nr_frags; f++) {
- const struct skb_frag_struct *frag =
- &skb_shinfo(skb)->frags[f];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
int len = skb_frag_size(frag);
*_d = *d;
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index dc040cd4ab06..71b7ad4b6454 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -1471,7 +1471,7 @@ static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil,
/* Rest of the descriptors are from the SKB fragments */
for (f = 0; f < nr_frags; f++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- int len = frag->size;
+ int len = skb_frag_size(frag);
wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f,
len, descs_used);
diff --git a/drivers/net/wireless/broadcom/b43legacy/phy.c b/drivers/net/wireless/broadcom/b43legacy/phy.c
index add7a0ff75b8..a659259bc51a 100644
--- a/drivers/net/wireless/broadcom/b43legacy/phy.c
+++ b/drivers/net/wireless/broadcom/b43legacy/phy.c
@@ -69,17 +69,6 @@ static const s8 b43legacy_tssi2dbm_g_table[] = {
static void b43legacy_phy_initg(struct b43legacy_wldev *dev);
-
-static inline
-void b43legacy_voluntary_preempt(void)
-{
- B43legacy_BUG_ON(!(!in_atomic() && !in_irq() &&
- !in_interrupt() && !irqs_disabled()));
-#ifndef CONFIG_PREEMPT
- cond_resched();
-#endif /* CONFIG_PREEMPT */
-}
-
/* Lock the PHY registers against concurrent access from the microcode.
* This lock is nonrecursive. */
void b43legacy_phy_lock(struct b43legacy_wldev *dev)
@@ -1124,7 +1113,7 @@ static u16 b43legacy_phy_lo_b_r15_loop(struct b43legacy_wldev *dev)
ret += b43legacy_phy_read(dev, 0x002C);
}
local_irq_restore(flags);
- b43legacy_voluntary_preempt();
+ cond_resched();
return ret;
}
@@ -1253,7 +1242,7 @@ u16 b43legacy_phy_lo_g_deviation_subval(struct b43legacy_wldev *dev,
}
ret = b43legacy_phy_read(dev, 0x002D);
local_irq_restore(flags);
- b43legacy_voluntary_preempt();
+ cond_resched();
return ret;
}
@@ -1591,7 +1580,7 @@ void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev)
b43legacy_radio_write16(dev, 0x43, i);
b43legacy_radio_write16(dev, 0x52, phy->txctl2);
udelay(10);
- b43legacy_voluntary_preempt();
+ cond_resched();
b43legacy_phy_set_baseband_attenuation(dev, j * 2);
@@ -1642,7 +1631,7 @@ void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev)
phy->txctl2
| (3/*txctl1*/ << 4));
udelay(10);
- b43legacy_voluntary_preempt();
+ cond_resched();
b43legacy_phy_set_baseband_attenuation(dev, j * 2);
@@ -1665,7 +1654,7 @@ void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev)
b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA2);
udelay(2);
b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA3);
- b43legacy_voluntary_preempt();
+ cond_resched();
} else
b43legacy_phy_write(dev, 0x0015, r27 | 0xEFA0);
b43legacy_phy_lo_adjust(dev, is_initializing);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 322e913ca7aa..2c95a08a5871 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -479,18 +479,11 @@ fail:
return -ENOMEM;
}
-void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr)
-{
- struct brcmf_bcdc *bcdc = drvr->proto->pd;
-
- brcmf_fws_detach_pre_delif(bcdc->fws);
-}
-
-void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr)
+void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr)
{
struct brcmf_bcdc *bcdc = drvr->proto->pd;
drvr->proto->pd = NULL;
- brcmf_fws_detach_post_delif(bcdc->fws);
+ brcmf_fws_detach(bcdc->fws);
kfree(bcdc);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
index 102e6938905c..b051d2860cd1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
@@ -7,16 +7,14 @@
#ifdef CONFIG_BRCMFMAC_PROTO_BCDC
int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
-void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr);
-void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr);
+void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state);
void brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp,
bool success);
struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr);
#else
static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; }
-static void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr) {};
-static inline void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr) {}
+static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {}
#endif
#endif /* BRCMFMAC_BCDC_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b6d0df354b36..581d0013f33e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -189,9 +189,9 @@ static const struct ieee80211_regdomain brcmf_regdom = {
*/
REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
/* IEEE 802.11a, channel 36..64 */
- REG_RULE(5150-10, 5350+10, 80, 6, 20, 0),
+ REG_RULE(5150-10, 5350+10, 160, 6, 20, 0),
/* IEEE 802.11a, channel 100..165 */
- REG_RULE(5470-10, 5850+10, 80, 6, 20, 0), }
+ REG_RULE(5470-10, 5850+10, 160, 6, 20, 0), }
};
/* Note: brcmf_cipher_suites is an array of int defining which cipher suites
@@ -276,8 +276,26 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
else
ch_inf.sb = BRCMU_CHAN_SB_UU;
break;
- case NL80211_CHAN_WIDTH_80P80:
case NL80211_CHAN_WIDTH_160:
+ ch_inf.bw = BRCMU_CHAN_BW_160;
+ if (primary_offset == -70)
+ ch_inf.sb = BRCMU_CHAN_SB_LLL;
+ else if (primary_offset == -50)
+ ch_inf.sb = BRCMU_CHAN_SB_LLU;
+ else if (primary_offset == -30)
+ ch_inf.sb = BRCMU_CHAN_SB_LUL;
+ else if (primary_offset == -10)
+ ch_inf.sb = BRCMU_CHAN_SB_LUU;
+ else if (primary_offset == 10)
+ ch_inf.sb = BRCMU_CHAN_SB_ULL;
+ else if (primary_offset == 30)
+ ch_inf.sb = BRCMU_CHAN_SB_ULU;
+ else if (primary_offset == 50)
+ ch_inf.sb = BRCMU_CHAN_SB_UUL;
+ else
+ ch_inf.sb = BRCMU_CHAN_SB_UUU;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
default:
@@ -296,6 +314,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
}
d11inf->encchspec(&ch_inf);
+ brcmf_dbg(TRACE, "chanspec: 0x%x\n", ch_inf.chspec);
return ch_inf.chspec;
}
@@ -1267,17 +1286,21 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy);
struct brcmf_pub *drvr = cfg->pub;
+ bool bus_up = drvr->bus_if->state == BRCMF_BUS_UP;
s32 err = 0;
brcmf_dbg(TRACE, "Enter\n");
if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state)) {
- brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n");
- err = brcmf_fil_cmd_data_set(vif->ifp,
- BRCMF_C_DISASSOC, NULL, 0);
- if (err) {
- bphy_err(drvr, "WLC_DISASSOC failed (%d)\n", err);
+ if (bus_up) {
+ brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n");
+ err = brcmf_fil_cmd_data_set(vif->ifp,
+ BRCMF_C_DISASSOC, NULL, 0);
+ if (err)
+ bphy_err(drvr, "WLC_DISASSOC failed (%d)\n",
+ err);
}
+
if ((vif->wdev.iftype == NL80211_IFTYPE_STATION) ||
(vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT))
cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0,
@@ -1287,7 +1310,8 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
brcmf_btcoex_set_mode(vif, BRCMF_BTCOEX_ENABLED, 0);
if (vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_NONE) {
- brcmf_set_pmk(vif->ifp, NULL, 0);
+ if (bus_up)
+ brcmf_set_pmk(vif->ifp, NULL, 0);
vif->profile.use_fwsup = BRCMF_PROFILE_FWSUP_NONE;
}
brcmf_dbg(TRACE, "Exit\n");
@@ -2958,8 +2982,6 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
struct brcmf_pub *drvr = cfg->pub;
struct brcmf_bss_info_le *bi;
const struct brcmf_tlv *tim;
- u16 beacon_interval;
- u8 dtim_period;
size_t ie_len;
u8 *ie;
s32 err = 0;
@@ -2983,12 +3005,9 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset);
ie_len = le32_to_cpu(bi->ie_length);
- beacon_interval = le16_to_cpu(bi->beacon_period);
tim = brcmf_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
- if (tim)
- dtim_period = tim->data[1];
- else {
+ if (!tim) {
/*
* active scan was done so we could not get dtim
* information out of probe response.
@@ -3000,7 +3019,6 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
bphy_err(drvr, "wl dtim_assoc failed (%d)\n", err);
goto update_bss_info_out;
}
- dtim_period = (u8)var;
}
update_bss_info_out:
@@ -4985,18 +5003,16 @@ static int brcmf_cfg80211_get_channel(struct wiphy *wiphy,
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = wdev->netdev;
struct brcmf_pub *drvr = cfg->pub;
- struct brcmf_if *ifp;
struct brcmu_chan ch;
enum nl80211_band band = 0;
enum nl80211_chan_width width = 0;
u32 chanspec;
int freq, err;
- if (!ndev)
+ if (!ndev || drvr->bus_if->state != BRCMF_BUS_UP)
return -ENODEV;
- ifp = netdev_priv(ndev);
- err = brcmf_fil_iovar_int_get(ifp, "chanspec", &chanspec);
+ err = brcmf_fil_iovar_int_get(netdev_priv(ndev), "chanspec", &chanspec);
if (err) {
bphy_err(drvr, "chanspec failed (%d)\n", err);
return err;
@@ -6714,6 +6730,11 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
}
}
+ if (wiphy->bands[NL80211_BAND_5GHZ] &&
+ brcmf_feat_is_enabled(ifp, BRCMF_FEAT_DOT11H))
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_DFS_OFFLOAD);
+
wiphy_read_of_freq_limits(wiphy);
return 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index aa89d620ee5d..dec25e415619 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -258,7 +258,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
/* query for 'ver' to get version info from firmware */
memset(buf, 0, sizeof(buf));
- strlcpy(buf, "ver", sizeof(buf));
err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
if (err < 0) {
bphy_err(drvr, "Retrieving version information failed, %d\n",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index bf18491a33a5..705b8cc53c3e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -579,7 +579,8 @@ static int brcmf_netdev_stop(struct net_device *ndev)
brcmf_cfg80211_down(ndev);
- brcmf_fil_iovar_data_set(ifp, "arp_hostip_clear", NULL, 0);
+ if (ifp->drvr->bus_if->state == BRCMF_BUS_UP)
+ brcmf_fil_iovar_data_set(ifp, "arp_hostip_clear", NULL, 0);
brcmf_net_setcarrier(ifp, false);
@@ -1307,27 +1308,26 @@ void brcmf_detach(struct device *dev)
unregister_inet6addr_notifier(&drvr->inet6addr_notifier);
#endif
- /* stop firmware event handling */
- brcmf_fweh_detach(drvr);
- if (drvr->config)
- brcmf_p2p_detach(&drvr->config->p2p);
-
brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
+ brcmf_bus_stop(drvr->bus_if);
- brcmf_proto_detach_pre_delif(drvr);
+ brcmf_fweh_detach(drvr);
+ brcmf_proto_detach(drvr);
/* make sure primary interface removed last */
- for (i = BRCMF_MAX_IFS-1; i > -1; i--)
- brcmf_remove_interface(drvr->iflist[i], false);
-
- brcmf_cfg80211_detach(drvr->config);
- drvr->config = NULL;
-
- brcmf_bus_stop(drvr->bus_if);
+ for (i = BRCMF_MAX_IFS - 1; i > -1; i--) {
+ if (drvr->iflist[i])
+ brcmf_del_if(drvr, drvr->iflist[i]->bsscfgidx, false);
+ }
- brcmf_proto_detach_post_delif(drvr);
+ if (drvr->config) {
+ brcmf_p2p_detach(&drvr->config->p2p);
+ brcmf_cfg80211_detach(drvr->config);
+ drvr->config = NULL;
+ }
bus_if->drvr = NULL;
+
wiphy_free(drvr->wiphy);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 73aff4e4039d..2c3526aeca6f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -39,6 +39,7 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
{ BRCMF_FEAT_P2P, "p2p" },
{ BRCMF_FEAT_MONITOR, "monitor" },
{ BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
+ { BRCMF_FEAT_DOT11H, "802.11h" }
};
#ifdef DEBUG
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index f127eb2030a6..736a8179f62f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -25,6 +25,7 @@
* MONITOR: firmware can pass monitor packets to host.
* MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header
* MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header
+ * DOT11H: firmware supports 802.11h
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -43,7 +44,8 @@
BRCMF_FEAT_DEF(FWSUP) \
BRCMF_FEAT_DEF(MONITOR) \
BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) \
- BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR)
+ BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \
+ BRCMF_FEAT_DEF(DOT11H)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index adedd4fac10b..79c8a858b6d6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -303,16 +303,7 @@ void brcmf_fweh_attach(struct brcmf_pub *drvr)
void brcmf_fweh_detach(struct brcmf_pub *drvr)
{
struct brcmf_fweh_info *fweh = &drvr->fweh;
- struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
- s8 eventmask[BRCMF_EVENTING_MASK_LEN];
- if (ifp) {
- /* clear all events */
- memset(eventmask, 0, BRCMF_EVENTING_MASK_LEN);
- (void)brcmf_fil_iovar_data_set(ifp, "event_msgs",
- eventmask,
- BRCMF_EVENTING_MASK_LEN);
- }
/* cancel the worker */
cancel_work_sync(&fweh->event_work);
WARN_ON(!list_empty(&fweh->event_q));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index b8452cb46297..2bd892df83cc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -2432,25 +2432,17 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr)
return fws;
fail:
- brcmf_fws_detach_pre_delif(fws);
- brcmf_fws_detach_post_delif(fws);
+ brcmf_fws_detach(fws);
return ERR_PTR(rc);
}
-void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws)
+void brcmf_fws_detach(struct brcmf_fws_info *fws)
{
if (!fws)
return;
- if (fws->fws_wq) {
- destroy_workqueue(fws->fws_wq);
- fws->fws_wq = NULL;
- }
-}
-void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws)
-{
- if (!fws)
- return;
+ if (fws->fws_wq)
+ destroy_workqueue(fws->fws_wq);
/* cleanup */
brcmf_fws_lock(fws);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index 10184eeaad94..b486d578ec96 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -7,8 +7,7 @@
#define FWSIGNAL_H_
struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr);
-void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws);
-void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws);
+void brcmf_fws_detach(struct brcmf_fws_info *fws);
void brcmf_fws_debugfs_create(struct brcmf_pub *drvr);
bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws);
bool brcmf_fws_fc_active(struct brcmf_fws_info *fws);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 241747bd5cb2..8428be8b8d43 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -1398,6 +1398,13 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
u8 ifidx;
int err;
+ /* no need to submit if firmware can not be reached */
+ if (drvr->bus_if->state != BRCMF_BUS_UP) {
+ brcmf_dbg(MSGBUF, "bus down, flowring will be removed\n");
+ brcmf_msgbuf_remove_flowring(msgbuf, flowid);
+ return;
+ }
+
commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
brcmf_commonring_lock(commonring);
ret_ptr = brcmf_commonring_reserve_for_write(commonring);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 4ea5401c4d6b..8d0e74416643 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -794,7 +794,8 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
if (ch == '\n') {
console->log_str[console->log_idx] = 0;
if (error)
- brcmf_err(bus, "CONSOLE: %s", console->log_str);
+ __brcmf_err(bus, __func__, "CONSOLE: %s",
+ console->log_str);
else
pr_debug("CONSOLE: %s", console->log_str);
console->log_idx = 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
index e3d1b075044b..2e911d4874af 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
@@ -56,22 +56,16 @@ fail:
return -ENOMEM;
}
-void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr)
+void brcmf_proto_detach(struct brcmf_pub *drvr)
{
brcmf_dbg(TRACE, "Enter\n");
if (drvr->proto) {
if (drvr->bus_if->proto_type == BRCMF_PROTO_BCDC)
- brcmf_proto_bcdc_detach_post_delif(drvr);
+ brcmf_proto_bcdc_detach(drvr);
else if (drvr->bus_if->proto_type == BRCMF_PROTO_MSGBUF)
brcmf_proto_msgbuf_detach(drvr);
kfree(drvr->proto);
drvr->proto = NULL;
}
}
-
-void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr)
-{
- if (drvr->proto && drvr->bus_if->proto_type == BRCMF_PROTO_BCDC)
- brcmf_proto_bcdc_detach_pre_delif(drvr);
-}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index 8d55fad531d0..bd08d3aaa8f4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -43,8 +43,7 @@ struct brcmf_proto {
int brcmf_proto_attach(struct brcmf_pub *drvr);
-void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr);
-void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr);
+void brcmf_proto_detach(struct brcmf_pub *drvr);
static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws,
struct sk_buff *skb,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 7d4e8f589fdc..080e829da9b3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -5248,15 +5248,7 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
/* Default to 54g Auto */
/* Advertise and use shortslot (-1/0/1 Auto/Off/On) */
s8 shortslot = BRCMS_SHORTSLOT_AUTO;
- bool shortslot_restrict = false; /* Restrict association to stations
- * that support shortslot
- */
bool ofdm_basic = false; /* Make 6, 12, and 24 basic rates */
- /* Advertise and use short preambles (-1/0/1 Auto/Off/On) */
- int preamble = BRCMS_PLCP_LONG;
- bool preamble_restrict = false; /* Restrict association to stations
- * that support short preambles
- */
struct brcms_band *band;
/* if N-support is enabled, allow Gmode set as long as requested
@@ -5297,16 +5289,11 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
case GMODE_ONLY:
ofdm_basic = true;
- preamble = BRCMS_PLCP_SHORT;
- preamble_restrict = true;
break;
case GMODE_PERFORMANCE:
shortslot = BRCMS_SHORTSLOT_ON;
- shortslot_restrict = true;
ofdm_basic = true;
- preamble = BRCMS_PLCP_SHORT;
- preamble_restrict = true;
break;
default:
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 75c0c29d81f0..8dfbaff2d1fe 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -4413,7 +4413,7 @@ static void ipw2100_kill_works(struct ipw2100_priv *priv)
static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
{
- int i, j, err = -EINVAL;
+ int i, j, err;
void *v;
dma_addr_t p;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 4a88e35d58d7..73f7bbf742bc 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -4942,8 +4942,7 @@ EXPORT_SYMBOL(il_add_beacon_time);
static int
il_pci_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct il_priv *il = pci_get_drvdata(pdev);
+ struct il_priv *il = dev_get_drvdata(device);
/*
* This function is called when system goes into suspend state
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index b1e5d64ca60d..74229fcb63a9 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -3256,28 +3256,16 @@ static void rs_add_debugfs(void *priv, void *priv_sta,
struct dentry *dir)
{
struct iwl_lq_sta *lq_sta = priv_sta;
- lq_sta->rs_sta_dbgfs_scale_table_file =
- debugfs_create_file("rate_scale_table", 0600, dir,
- lq_sta, &rs_sta_dbgfs_scale_table_ops);
- lq_sta->rs_sta_dbgfs_stats_table_file =
- debugfs_create_file("rate_stats_table", 0400, dir,
- lq_sta, &rs_sta_dbgfs_stats_table_ops);
- lq_sta->rs_sta_dbgfs_rate_scale_data_file =
- debugfs_create_file("rate_scale_data", 0400, dir,
- lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
- lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
- debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
- &lq_sta->tx_agg_tid_en);
-}
+ debugfs_create_file("rate_scale_table", 0600, dir, lq_sta,
+ &rs_sta_dbgfs_scale_table_ops);
+ debugfs_create_file("rate_stats_table", 0400, dir, lq_sta,
+ &rs_sta_dbgfs_stats_table_ops);
+ debugfs_create_file("rate_scale_data", 0400, dir, lq_sta,
+ &rs_sta_dbgfs_rate_scale_data_ops);
+ debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
+ &lq_sta->tx_agg_tid_en);
-static void rs_remove_debugfs(void *priv, void *priv_sta)
-{
- struct iwl_lq_sta *lq_sta = priv_sta;
- debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
}
#endif
@@ -3303,7 +3291,6 @@ static const struct rate_control_ops rs_ops = {
.free_sta = rs_free_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = rs_add_debugfs,
- .remove_sta_debugfs = rs_remove_debugfs,
#endif
};
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
index b7a1854cd202..68a840d739e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
@@ -356,10 +356,6 @@ struct iwl_lq_sta {
struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
u8 tx_agg_tid_en;
#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *rs_sta_dbgfs_scale_table_file;
- struct dentry *rs_sta_dbgfs_stats_table_file;
- struct dentry *rs_sta_dbgfs_rate_scale_data_file;
- struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
u32 dbg_fixed_rate;
#endif
struct iwl_priv *drv;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index a7bc00d1296f..d6499763f0dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -4045,7 +4045,8 @@ out_unlock:
return ret;
}
-static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
+static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index d3f04acfbacb..e4415e58fa78 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -4127,10 +4127,6 @@ static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta,
MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, 0600);
}
-
-void rs_remove_sta_debugfs(void *mvm, void *mvm_sta)
-{
-}
#endif
/*
@@ -4158,7 +4154,6 @@ static const struct rate_control_ops rs_mvm_ops_drv = {
.rate_update = rs_drv_rate_update,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = rs_drv_add_sta_debugfs,
- .remove_sta_debugfs = rs_remove_sta_debugfs,
#endif
.capa = RATE_CTRL_CAPA_VHT_EXT_NSS_BW,
};
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 772e54f0696f..f86c2891310a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2216,7 +2216,8 @@ static int mac80211_hwsim_roc(struct ieee80211_hw *hw,
return 0;
}
-static int mac80211_hwsim_croc(struct ieee80211_hw *hw)
+static int mac80211_hwsim_croc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct mac80211_hwsim_data *hwsim = hw->priv;
@@ -2594,7 +2595,7 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz = {
},
};
-static void mac80211_hswim_he_capab(struct ieee80211_supported_band *sband)
+static void mac80211_hwsim_he_capab(struct ieee80211_supported_band *sband)
{
if (sband->band == NL80211_BAND_2GHZ)
sband->iftype_data =
@@ -2805,12 +2806,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, TDLS_WIDER_BW);
-
- /* We only have SW crypto and only implement the A-MPDU API
- * (but don't really build A-MPDUs) so can have extended key
- * support
- */
- ieee80211_hw_set(hw, EXT_KEY_ID_NATIVE);
if (rctbl)
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
@@ -2897,7 +2892,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sband->ht_cap.mcs.rx_mask[1] = 0xff;
sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- mac80211_hswim_he_capab(sband);
+ mac80211_hwsim_he_capab(sband);
hw->wiphy->bands[band] = sband;
}
@@ -3233,6 +3228,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
{
struct mac80211_hwsim_data *data2;
struct ieee80211_rx_status rx_status;
+ struct ieee80211_hdr *hdr;
const u8 *dst;
int frame_data_len;
void *frame_data;
@@ -3299,6 +3295,12 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
+ hdr = (void *)skb->data;
+
+ if (ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))
+ rx_status.boottime_ns = ktime_get_boottime_ns();
+
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
data2->rx_pkts++;
data2->rx_bytes += skb->len;
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index 27067e79e83f..d07fe82c557e 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -766,19 +766,15 @@ static int if_spi_c2h_data(struct if_spi_card *card)
/* Read the data from the WLAN module into our skb... */
err = spu_read(card, IF_SPI_DATA_RDWRPORT_REG, data, ALIGN(len, 4));
- if (err)
- goto free_skb;
+ if (err) {
+ dev_kfree_skb(skb);
+ goto out;
+ }
/* pass the SKB to libertas */
err = lbs_process_rxed_packet(card->priv, skb);
- if (err)
- goto free_skb;
-
- /* success */
- goto out;
+ /* lbs_process_rxed_packet() consumes the skb */
-free_skb:
- dev_kfree_skb(skb);
out:
if (err)
netdev_err(priv->dev, "%s: err=%d\n", __func__, err);
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index afac2481909b..20436a289d5c 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -50,7 +50,8 @@ static const struct lbs_fw_table fw_table[] = {
{ MODEL_8388, "libertas/usb8388_v5.bin", NULL },
{ MODEL_8388, "libertas/usb8388.bin", NULL },
{ MODEL_8388, "usb8388.bin", NULL },
- { MODEL_8682, "libertas/usb8682.bin", NULL }
+ { MODEL_8682, "libertas/usb8682.bin", NULL },
+ { 0, NULL, NULL }
};
static const struct usb_device_id if_usb_table[] = {
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 5968852b65a7..2233b59cdf44 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -1046,7 +1046,7 @@ int lbs_rtap_supported(struct lbs_private *priv)
int lbs_start_card(struct lbs_private *priv)
{
struct net_device *dev = priv->dev;
- int ret = -1;
+ int ret;
/* poke the firmware */
ret = lbs_setup_firmware(priv);
diff --git a/drivers/net/wireless/marvell/libertas_tf/cmd.c b/drivers/net/wireless/marvell/libertas_tf/cmd.c
index 1eacca0d079b..a0b4c9debc11 100644
--- a/drivers/net/wireless/marvell/libertas_tf/cmd.c
+++ b/drivers/net/wireless/marvell/libertas_tf/cmd.c
@@ -65,7 +65,7 @@ static void lbtf_geo_init(struct lbtf_private *priv)
break;
}
- for (ch = priv->range.start; ch < priv->range.end; ch++)
+ for (ch = range->start; ch < range->end; ch++)
priv->channels[CHAN_TO_IDX(ch)].flags = 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 6c0e52eb8794..1aa93e7e9835 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -59,7 +59,7 @@ static void wakeup_timer_fn(struct timer_list *t)
adapter->hw_status = MWIFIEX_HW_STATUS_RESET;
mwifiex_cancel_all_pending_cmd(adapter);
- if (adapter->if_ops.card_reset && !adapter->hs_activated)
+ if (adapter->if_ops.card_reset)
adapter->if_ops.card_reset(adapter);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index b54f73e3d508..eff06d59e9df 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -150,10 +150,8 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_suspend(struct device *dev)
{
struct mwifiex_adapter *adapter;
- struct pcie_service_card *card;
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcie_service_card *card = dev_get_drvdata(dev);
- card = pci_get_drvdata(pdev);
/* Might still be loading firmware */
wait_for_completion(&card->fw_done);
@@ -195,10 +193,8 @@ static int mwifiex_pcie_suspend(struct device *dev)
static int mwifiex_pcie_resume(struct device *dev)
{
struct mwifiex_adapter *adapter;
- struct pcie_service_card *card;
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcie_service_card *card = dev_get_drvdata(dev);
- card = pci_get_drvdata(pdev);
if (!card->adapter) {
dev_err(dev, "adapter structure is not valid\n");
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 21dda385f6c6..593c594982cb 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1244,7 +1244,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
mwifiex_dbg(adapter, ERROR,
"err: InterpretIE: in processing\t"
"IE, bytes left < IE length\n");
- return -1;
+ return -EINVAL;
}
switch (element_id) {
case WLAN_EID_SSID:
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index 18e654dc34c6..09313047beed 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -731,7 +731,6 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
u16 status_code, struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt;
- u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
int ret;
u16 capab;
struct ieee80211_ht_cap *ht_cap;
@@ -765,7 +764,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
memmove(pos + ETH_ALEN, &mgmt->u.action.category,
sizeof(mgmt->u.action.u.tdls_discover_resp));
/* init address 4 */
- memcpy(pos, bc_addr, ETH_ALEN);
+ eth_broadcast_addr(pos);
ret = mwifiex_tdls_append_rates_ie(priv, skb);
if (ret) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
index b920be1f5718..c6c1ce69bcbc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -17,10 +17,8 @@ mt76_wmac_probe(struct platform_device *pdev)
int ret;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get device IRQ\n");
+ if (irq < 0)
return irq;
- }
mem_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mem_base)) {
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index 9bfac9f1d47f..cada48800928 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -557,6 +557,9 @@ mt76_init_sband_2g(struct mt7601u_dev *dev)
{
dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g),
GFP_KERNEL);
+ if (!dev->sband_2g)
+ return -ENOMEM;
+
dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = dev->sband_2g;
WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 89a7b1234ffb..72e608cc53af 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -351,7 +351,7 @@ mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta = params->sta;
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
- u16 *ssn = &params->ssn;
+ u16 ssn = params->ssn;
struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
WARN_ON(msta->wcid.idx > GROUP_WCID(0));
@@ -371,7 +371,7 @@ mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
break;
case IEEE80211_AMPDU_TX_START:
- msta->agg_ssn[tid] = *ssn << 4;
+ msta->agg_ssn[tid] = ssn << 4;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index e4e9344b6982..8ae318b5fe54 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -430,7 +430,7 @@ static int qtnf_pcie_suspend(struct device *dev)
struct qtnf_pcie_bus_priv *priv;
struct qtnf_bus *bus;
- bus = pci_get_drvdata(to_pci_dev(dev));
+ bus = dev_get_drvdata(dev);
if (!bus)
return -EFAULT;
@@ -443,7 +443,7 @@ static int qtnf_pcie_resume(struct device *dev)
struct qtnf_pcie_bus_priv *priv;
struct qtnf_bus *bus;
- bus = pci_get_drvdata(to_pci_dev(dev));
+ bus = dev_get_drvdata(dev);
if (!bus)
return -EFAULT;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index fdf0504b5f1d..0dfb55c69b73 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -1086,6 +1086,7 @@ static const struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0846, 0x9013) },
{ USB_DEVICE(0x0846, 0x9019) },
/* Planex */
+ { USB_DEVICE(0x2019, 0xed14) },
{ USB_DEVICE(0x2019, 0xed19) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x3573) },
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index ef5f51512212..4d4e3888ef20 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -65,26 +65,6 @@ struct rt2x00debug_intf {
* - crypto stats file
*/
struct dentry *driver_folder;
- struct dentry *driver_entry;
- struct dentry *chipset_entry;
- struct dentry *dev_flags;
- struct dentry *cap_flags;
- struct dentry *restart_hw;
- struct dentry *register_folder;
- struct dentry *csr_off_entry;
- struct dentry *csr_val_entry;
- struct dentry *eeprom_off_entry;
- struct dentry *eeprom_val_entry;
- struct dentry *bbp_off_entry;
- struct dentry *bbp_val_entry;
- struct dentry *rf_off_entry;
- struct dentry *rf_val_entry;
- struct dentry *rfcsr_off_entry;
- struct dentry *rfcsr_val_entry;
- struct dentry *queue_folder;
- struct dentry *queue_frame_dump_entry;
- struct dentry *queue_stats_entry;
- struct dentry *crypto_stats_entry;
/*
* The frame dump file only allows a single reader,
@@ -596,39 +576,34 @@ static const struct file_operations rt2x00debug_restart_hw = {
.llseek = generic_file_llseek,
};
-static struct dentry *rt2x00debug_create_file_driver(const char *name,
- struct rt2x00debug_intf
- *intf,
- struct debugfs_blob_wrapper
- *blob)
+static void rt2x00debug_create_file_driver(const char *name,
+ struct rt2x00debug_intf *intf,
+ struct debugfs_blob_wrapper *blob)
{
char *data;
data = kzalloc(3 * MAX_LINE_LENGTH, GFP_KERNEL);
if (!data)
- return NULL;
+ return;
blob->data = data;
data += sprintf(data, "driver:\t%s\n", intf->rt2x00dev->ops->name);
data += sprintf(data, "version:\t%s\n", DRV_VERSION);
blob->size = strlen(blob->data);
- return debugfs_create_blob(name, 0400, intf->driver_folder, blob);
+ debugfs_create_blob(name, 0400, intf->driver_folder, blob);
}
-static struct dentry *rt2x00debug_create_file_chipset(const char *name,
- struct rt2x00debug_intf
- *intf,
- struct
- debugfs_blob_wrapper
- *blob)
+static void rt2x00debug_create_file_chipset(const char *name,
+ struct rt2x00debug_intf *intf,
+ struct debugfs_blob_wrapper *blob)
{
const struct rt2x00debug *debug = intf->debug;
char *data;
data = kzalloc(9 * MAX_LINE_LENGTH, GFP_KERNEL);
if (!data)
- return NULL;
+ return;
blob->data = data;
data += sprintf(data, "rt chip:\t%04x\n", intf->rt2x00dev->chip.rt);
@@ -654,13 +629,15 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name,
blob->size = strlen(blob->data);
- return debugfs_create_blob(name, 0400, intf->driver_folder, blob);
+ debugfs_create_blob(name, 0400, intf->driver_folder, blob);
}
void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
{
const struct rt2x00debug *debug = rt2x00dev->ops->debugfs;
struct rt2x00debug_intf *intf;
+ struct dentry *queue_folder;
+ struct dentry *register_folder;
intf = kzalloc(sizeof(struct rt2x00debug_intf), GFP_KERNEL);
if (!intf) {
@@ -676,43 +653,27 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
debugfs_create_dir(intf->rt2x00dev->ops->name,
rt2x00dev->hw->wiphy->debugfsdir);
- intf->driver_entry =
- rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob);
+ rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob);
+ rt2x00debug_create_file_chipset("chipset", intf, &intf->chipset_blob);
+ debugfs_create_file("dev_flags", 0400, intf->driver_folder, intf,
+ &rt2x00debug_fop_dev_flags);
+ debugfs_create_file("cap_flags", 0400, intf->driver_folder, intf,
+ &rt2x00debug_fop_cap_flags);
+ debugfs_create_file("restart_hw", 0200, intf->driver_folder, intf,
+ &rt2x00debug_restart_hw);
- intf->chipset_entry =
- rt2x00debug_create_file_chipset("chipset",
- intf, &intf->chipset_blob);
-
- intf->dev_flags = debugfs_create_file("dev_flags", 0400,
- intf->driver_folder, intf,
- &rt2x00debug_fop_dev_flags);
-
- intf->cap_flags = debugfs_create_file("cap_flags", 0400,
- intf->driver_folder, intf,
- &rt2x00debug_fop_cap_flags);
-
- intf->restart_hw = debugfs_create_file("restart_hw", 0200,
- intf->driver_folder, intf,
- &rt2x00debug_restart_hw);
-
- intf->register_folder =
- debugfs_create_dir("register", intf->driver_folder);
+ register_folder = debugfs_create_dir("register", intf->driver_folder);
#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \
({ \
if (debug->__name.read) { \
- (__intf)->__name##_off_entry = \
- debugfs_create_u32(__stringify(__name) "_offset", \
- 0600, \
- (__intf)->register_folder, \
- &(__intf)->offset_##__name); \
+ debugfs_create_u32(__stringify(__name) "_offset", 0600, \
+ register_folder, \
+ &(__intf)->offset_##__name); \
\
- (__intf)->__name##_val_entry = \
- debugfs_create_file(__stringify(__name) "_value", \
- 0600, \
- (__intf)->register_folder, \
- (__intf), \
- &rt2x00debug_fop_##__name); \
+ debugfs_create_file(__stringify(__name) "_value", 0600, \
+ register_folder, (__intf), \
+ &rt2x00debug_fop_##__name); \
} \
})
@@ -724,26 +685,21 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
#undef RT2X00DEBUGFS_CREATE_REGISTER_ENTRY
- intf->queue_folder =
- debugfs_create_dir("queue", intf->driver_folder);
+ queue_folder = debugfs_create_dir("queue", intf->driver_folder);
- intf->queue_frame_dump_entry =
- debugfs_create_file("dump", 0400, intf->queue_folder,
- intf, &rt2x00debug_fop_queue_dump);
+ debugfs_create_file("dump", 0400, queue_folder, intf,
+ &rt2x00debug_fop_queue_dump);
skb_queue_head_init(&intf->frame_dump_skbqueue);
init_waitqueue_head(&intf->frame_dump_waitqueue);
- intf->queue_stats_entry =
- debugfs_create_file("queue", 0400, intf->queue_folder,
- intf, &rt2x00debug_fop_queue_stats);
+ debugfs_create_file("queue", 0400, queue_folder, intf,
+ &rt2x00debug_fop_queue_stats);
#ifdef CONFIG_RT2X00_LIB_CRYPTO
if (rt2x00_has_cap_hw_crypto(rt2x00dev))
- intf->crypto_stats_entry =
- debugfs_create_file("crypto", 0444, intf->queue_folder,
- intf,
- &rt2x00debug_fop_crypto_stats);
+ debugfs_create_file("crypto", 0444, queue_folder, intf,
+ &rt2x00debug_fop_crypto_stats);
#endif
return;
@@ -758,29 +714,7 @@ void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
skb_queue_purge(&intf->frame_dump_skbqueue);
-#ifdef CONFIG_RT2X00_LIB_CRYPTO
- debugfs_remove(intf->crypto_stats_entry);
-#endif
- debugfs_remove(intf->queue_stats_entry);
- debugfs_remove(intf->queue_frame_dump_entry);
- debugfs_remove(intf->queue_folder);
- debugfs_remove(intf->rfcsr_val_entry);
- debugfs_remove(intf->rfcsr_off_entry);
- debugfs_remove(intf->rf_val_entry);
- debugfs_remove(intf->rf_off_entry);
- debugfs_remove(intf->bbp_val_entry);
- debugfs_remove(intf->bbp_off_entry);
- debugfs_remove(intf->eeprom_val_entry);
- debugfs_remove(intf->eeprom_off_entry);
- debugfs_remove(intf->csr_val_entry);
- debugfs_remove(intf->csr_off_entry);
- debugfs_remove(intf->register_folder);
- debugfs_remove(intf->dev_flags);
- debugfs_remove(intf->restart_hw);
- debugfs_remove(intf->cap_flags);
- debugfs_remove(intf->chipset_entry);
- debugfs_remove(intf->driver_entry);
- debugfs_remove(intf->driver_folder);
+ debugfs_remove_recursive(intf->driver_folder);
kfree(intf->chipset_blob.data);
kfree(intf->driver_blob.data);
kfree(intf);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 7e3a621b9c0d..bc2dfef0de22 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -349,8 +349,7 @@ static void rt2x00usb_work_rxdone(struct work_struct *work)
while (!rt2x00queue_empty(rt2x00dev->rx)) {
entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE);
- if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
- !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
+ if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
break;
/*
@@ -389,8 +388,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
rt2x00lib_dmadone(entry);
/*
- * Schedule the delayed work for reading the RX status
- * from the device.
+ * Schedule the delayed work for processing RX data
*/
queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
}
@@ -402,8 +400,7 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
struct queue_entry_priv_usb *entry_priv = entry->priv_data;
int status;
- if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
- test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
+ if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
return false;
rt2x00lib_dmastart(entry);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 3adb1d3d47ac..ceffe05bd65b 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1525,7 +1525,7 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
/*
* WLAN action by PTA
*/
- rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
+ rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x0c);
/*
* BT select S0/S1 controlled by WiFi
@@ -1568,9 +1568,14 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.ant_sel_rsv));
/*
- * 0x280, 0x00, 0x200, 0x80 - not clear
+ * Different settings per different antenna position.
+ * Antenna Position: | Normal Inverse
+ * --------------------------------------------------
+ * Antenna switch to BT: | 0x280, 0x00
+ * Antenna switch to WiFi: | 0x0, 0x280
+ * Antenna switch to PTA: | 0x200, 0x80
*/
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x80);
/*
* Software control, antenna at WiFi side
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 8136e268b4e6..c6c41fb962ff 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -3891,12 +3891,13 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/* Check if MAC is already powered on */
val8 = rtl8xxxu_read8(priv, REG_CR);
+ val16 = rtl8xxxu_read16(priv, REG_SYS_CLKR);
/*
* Fix 92DU-VC S3 hang with the reason is that secondary mac is not
* initialized. First MAC returns 0xea, second MAC returns 0x00
*/
- if (val8 == 0xea)
+ if (val8 == 0xea || !(val16 & SYS_CLK_MAC_CLK_ENABLE))
macpower = false;
else
macpower = true;
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index 152242ac0aa5..191dafd03189 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -509,13 +509,7 @@ static u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist)
static s32 halbtc_get_wifi_rssi(struct rtl_priv *rtlpriv)
{
- int undec_sm_pwdb = 0;
-
- if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
- undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- else /* associated entry pwdb */
- undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- return undec_sm_pwdb;
+ return rtlpriv->dm.undec_sm_pwdb;
}
static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 4055e0ab75ba..7d96fe5f1a44 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -2409,8 +2409,7 @@ EXPORT_SYMBOL(rtl_pci_disconnect);
****************************************/
int rtl_pci_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->cfg->ops->hw_suspend(hw);
@@ -2422,8 +2421,7 @@ EXPORT_SYMBOL(rtl_pci_suspend);
int rtl_pci_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->cfg->ops->hw_resume(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 6ccb5b93a595..c10432cd703e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -276,22 +276,6 @@ static void _rtl_reg_apply_world_flags(struct wiphy *wiphy,
return;
}
-static void _rtl_dump_channel_map(struct wiphy *wiphy)
-{
- enum nl80211_band band;
- struct ieee80211_supported_band *sband;
- struct ieee80211_channel *ch;
- unsigned int i;
-
- for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!wiphy->bands[band])
- continue;
- sband = wiphy->bands[band];
- for (i = 0; i < sband->n_channels; i++)
- ch = &sband->channels[i];
- }
-}
-
static int _rtl_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request,
struct rtl_regulatory *reg)
@@ -309,8 +293,6 @@ static int _rtl_reg_notifier_apply(struct wiphy *wiphy,
break;
}
- _rtl_dump_channel_map(wiphy);
-
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index 85360353f557..333e355c9281 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -1411,12 +1411,13 @@ void rtl88e_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw,
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
struct fast_ant_training *pfat_table = &rtldm->fat_table;
+ __le32 *pdesc32 = (__le32 *)pdesc;
if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) ||
(rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV)) {
- SET_TX_DESC_ANTSEL_A(pdesc, pfat_table->antsel_a[mac_id]);
- SET_TX_DESC_ANTSEL_B(pdesc, pfat_table->antsel_b[mac_id]);
- SET_TX_DESC_ANTSEL_C(pdesc, pfat_table->antsel_c[mac_id]);
+ set_tx_desc_antsel_a(pdesc32, pfat_table->antsel_a[mac_id]);
+ set_tx_desc_antsel_b(pdesc32, pfat_table->antsel_b[mac_id]);
+ set_tx_desc_antsel_c(pdesc32, pfat_table->antsel_c[mac_id]);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index eab48fed61ed..a0eda51e833c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -115,10 +115,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
- rtlpriv->cfg->mod_params->sw_crypto =
- rtlpriv->cfg->mod_params->sw_crypto;
- rtlpriv->cfg->mod_params->disable_watchdog =
- rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
if (!rtlpriv->psc.inactiveps)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index 483dc8bdc555..aa2e9e88be53 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -25,7 +25,7 @@ static u8 _rtl88ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
}
static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw,
- struct rtl_stats *pstatus, u8 *pdesc,
+ struct rtl_stats *pstatus, __le32 *pdesc,
struct rx_fwinfo_88e *p_drvinfo,
bool bpacket_match_bssid,
bool bpacket_toself, bool packet_beacon)
@@ -271,7 +271,7 @@ static void _rtl88ee_smart_antenna(struct ieee80211_hw *hw,
static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct rtl_stats *pstatus,
- u8 *pdesc,
+ __le32 *pdesc,
struct rx_fwinfo_88e *p_drvinfo)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -313,13 +313,13 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
rtl_process_phyinfo(hw, tmp_buf, pstatus);
}
-static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
- u8 *virtualaddress)
+static void rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
+ __le32 *virtualaddress)
{
u32 dwtmp = 0;
memset(virtualaddress, 0, 8);
- SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
+ set_earlymode_pktnum(virtualaddress, ptcb_desc->empkt_num);
if (ptcb_desc->empkt_num == 1) {
dwtmp = ptcb_desc->empkt_len[0];
} else {
@@ -327,7 +327,7 @@ static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4;
dwtmp += ptcb_desc->empkt_len[1];
}
- SET_EARLYMODE_LEN0(virtualaddress, dwtmp);
+ set_earlymode_len0(virtualaddress, dwtmp);
if (ptcb_desc->empkt_num <= 3) {
dwtmp = ptcb_desc->empkt_len[2];
@@ -336,7 +336,7 @@ static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4;
dwtmp += ptcb_desc->empkt_len[3];
}
- SET_EARLYMODE_LEN1(virtualaddress, dwtmp);
+ set_earlymode_len1(virtualaddress, dwtmp);
if (ptcb_desc->empkt_num <= 5) {
dwtmp = ptcb_desc->empkt_len[4];
} else {
@@ -344,8 +344,8 @@ static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4;
dwtmp += ptcb_desc->empkt_len[5];
}
- SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF);
- SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4);
+ set_earlymode_len2_1(virtualaddress, dwtmp & 0xF);
+ set_earlymode_len2_2(virtualaddress, dwtmp >> 4);
if (ptcb_desc->empkt_num <= 7) {
dwtmp = ptcb_desc->empkt_len[6];
} else {
@@ -353,7 +353,7 @@ static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4;
dwtmp += ptcb_desc->empkt_len[7];
}
- SET_EARLYMODE_LEN3(virtualaddress, dwtmp);
+ set_earlymode_len3(virtualaddress, dwtmp);
if (ptcb_desc->empkt_num <= 9) {
dwtmp = ptcb_desc->empkt_len[8];
} else {
@@ -361,50 +361,51 @@ static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4;
dwtmp += ptcb_desc->empkt_len[9];
}
- SET_EARLYMODE_LEN4(virtualaddress, dwtmp);
+ set_earlymode_len4(virtualaddress, dwtmp);
}
bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_stats *status,
struct ieee80211_rx_status *rx_status,
- u8 *pdesc, struct sk_buff *skb)
+ u8 *pdesc8, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rx_fwinfo_88e *p_drvinfo;
struct ieee80211_hdr *hdr;
u8 wake_match;
- u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+ __le32 *pdesc = (__le32 *)pdesc8;
+ u32 phystatus = get_rx_desc_physt(pdesc);
- status->packet_report_type = (u8)GET_RX_STATUS_DESC_RPT_SEL(pdesc);
+ status->packet_report_type = (u8)get_rx_status_desc_rpt_sel(pdesc);
if (status->packet_report_type == TX_REPORT2)
- status->length = (u16)GET_RX_RPT2_DESC_PKT_LEN(pdesc);
+ status->length = (u16)get_rx_rpt2_desc_pkt_len(pdesc);
else
- status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc);
- status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ status->length = (u16)get_rx_desc_pkt_len(pdesc);
+ status->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) *
RX_DRV_INFO_SIZE_UNIT;
- status->rx_bufshift = (u8)(GET_RX_DESC_SHIFT(pdesc) & 0x03);
- status->icv = (u16)GET_RX_DESC_ICV(pdesc);
- status->crc = (u16)GET_RX_DESC_CRC32(pdesc);
+ status->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03);
+ status->icv = (u16)get_rx_desc_icv(pdesc);
+ status->crc = (u16)get_rx_desc_crc32(pdesc);
status->hwerror = (status->crc | status->icv);
- status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
- status->rate = (u8)GET_RX_DESC_RXMCS(pdesc);
- status->shortpreamble = (u16)GET_RX_DESC_SPLCP(pdesc);
- status->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
- status->isfirst_ampdu = (bool)((GET_RX_DESC_PAGGR(pdesc) == 1) &&
- (GET_RX_DESC_FAGGR(pdesc) == 1));
+ status->decrypted = !get_rx_desc_swdec(pdesc);
+ status->rate = (u8)get_rx_desc_rxmcs(pdesc);
+ status->shortpreamble = (u16)get_rx_desc_splcp(pdesc);
+ status->isampdu = (bool) (get_rx_desc_paggr(pdesc) == 1);
+ status->isfirst_ampdu = (bool)((get_rx_desc_paggr(pdesc) == 1) &&
+ (get_rx_desc_faggr(pdesc) == 1));
if (status->packet_report_type == NORMAL_RX)
- status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
- status->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc);
- status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
+ status->timestamp_low = get_rx_desc_tsfl(pdesc);
+ status->rx_is40mhzpacket = (bool)get_rx_desc_bw(pdesc);
+ status->is_ht = (bool)get_rx_desc_rxht(pdesc);
status->is_cck = RTL8188_RX_HAL_IS_CCK_RATE(status->rate);
- status->macid = GET_RX_DESC_MACID(pdesc);
- if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
+ status->macid = get_rx_desc_macid(pdesc);
+ if (get_rx_status_desc_pattern_match(pdesc))
wake_match = BIT(2);
- else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
+ else if (get_rx_status_desc_magic_match(pdesc))
wake_match = BIT(1);
- else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
+ else if (get_rx_status_desc_unicast_match(pdesc))
wake_match = BIT(0);
else
wake_match = 0;
@@ -465,15 +466,15 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
rx_status->signal = status->recvsignalpower + 10;
if (status->packet_report_type == TX_REPORT2) {
status->macid_valid_entry[0] =
- GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
+ get_rx_rpt2_desc_macid_valid_1(pdesc);
status->macid_valid_entry[1] =
- GET_RX_RPT2_DESC_MACID_VALID_2(pdesc);
+ get_rx_rpt2_desc_macid_valid_2(pdesc);
}
return true;
}
void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_hdr *hdr, u8 *pdesc8,
u8 *txbd, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
@@ -484,7 +485,6 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- u8 *pdesc = (u8 *)pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
unsigned int buf_len = 0;
@@ -497,6 +497,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
dma_addr_t mapping;
u8 bw_40 = 0;
u8 short_gi = 0;
+ __le32 *pdesc = (u32 *)pdesc8;
if (mac->opmode == NL80211_IFTYPE_STATION) {
bw_40 = mac->bw_40;
@@ -521,77 +522,77 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
"DMA mapping error\n");
return;
}
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_88e));
+ clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_88e));
if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
firstseg = true;
lastseg = true;
}
if (firstseg) {
if (rtlhal->earlymode_enable) {
- SET_TX_DESC_PKT_OFFSET(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN +
+ set_tx_desc_pkt_offset(pdesc, 1);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN +
EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"Insert 8 byte.pTcb->EMPktNum:%d\n",
ptcb_desc->empkt_num);
- _rtl88ee_insert_emcontent(ptcb_desc,
- (u8 *)(skb->data));
+ rtl88ee_insert_emcontent(ptcb_desc,
+ (__le32 *)(skb->data));
}
} else {
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
}
ptcb_desc->use_driver_rate = true;
- SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+ set_tx_desc_tx_rate(pdesc, ptcb_desc->hw_rate);
if (ptcb_desc->hw_rate > DESC92C_RATEMCS0)
short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
else
short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
- SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi);
+ set_tx_desc_data_shortgi(pdesc, short_gi);
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- SET_TX_DESC_AGG_ENABLE(pdesc, 1);
- SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+ set_tx_desc_agg_enable(pdesc, 1);
+ set_tx_desc_max_agg_num(pdesc, 0x14);
}
- SET_TX_DESC_SEQ(pdesc, seq_number);
- SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
+ set_tx_desc_seq(pdesc, seq_number);
+ set_tx_desc_rts_enable(pdesc, ((ptcb_desc->rts_enable &&
!ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0);
- SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
-
- SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
- SET_TX_DESC_RTS_BW(pdesc, 0);
- SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
- SET_TX_DESC_RTS_SHORT(pdesc,
+ set_tx_desc_hw_rts_enable(pdesc, 0);
+ set_tx_desc_cts2self(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0));
+ set_tx_desc_rts_stbc(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
+
+ set_tx_desc_rts_rate(pdesc, ptcb_desc->rts_rate);
+ set_tx_desc_rts_bw(pdesc, 0);
+ set_tx_desc_rts_sc(pdesc, ptcb_desc->rts_sc);
+ set_tx_desc_rts_short(pdesc,
((ptcb_desc->rts_rate <= DESC92C_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
(ptcb_desc->rts_use_shortgi ? 1 : 0)));
if (ptcb_desc->tx_enable_sw_calc_duration)
- SET_TX_DESC_NAV_USE_HDR(pdesc, 1);
+ set_tx_desc_nav_use_hdr(pdesc, 1);
if (bw_40) {
if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) {
- SET_TX_DESC_DATA_BW(pdesc, 1);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+ set_tx_desc_data_bw(pdesc, 1);
+ set_tx_desc_tx_sub_carrier(pdesc, 3);
} else {
- SET_TX_DESC_DATA_BW(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc,
+ set_tx_desc_data_bw(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc,
mac->cur_40_prime_sc);
}
} else {
- SET_TX_DESC_DATA_BW(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ set_tx_desc_data_bw(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc, 0);
}
- SET_TX_DESC_LINIP(pdesc, 0);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16)skb_len);
+ set_tx_desc_linip(pdesc, 0);
+ set_tx_desc_pkt_size(pdesc, (u16)skb_len);
if (sta) {
u8 ampdu_density = sta->ht_cap.ampdu_density;
- SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+ set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
if (info->control.hw_key) {
struct ieee80211_key_conf *keyconf;
@@ -601,76 +602,77 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ set_tx_desc_sec_type(pdesc, 0x1);
break;
case WLAN_CIPHER_SUITE_CCMP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ set_tx_desc_sec_type(pdesc, 0x3);
break;
default:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ set_tx_desc_sec_type(pdesc, 0x0);
break;
}
}
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
- SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
- SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
- SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ?
+ set_tx_desc_queue_sel(pdesc, fw_qsel);
+ set_tx_desc_data_rate_fb_limit(pdesc, 0x1F);
+ set_tx_desc_rts_rate_fb_limit(pdesc, 0xF);
+ set_tx_desc_disable_fb(pdesc, ptcb_desc->disable_ratefallback ?
1 : 0);
- SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
+ set_tx_desc_use_rate(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
- /*SET_TX_DESC_PWR_STATUS(pdesc, pwr_status);*/
+ /*set_tx_desc_pwr_status(pdesc, pwr_status);*/
/* Set TxRate and RTSRate in TxDesc */
/* This prevent Tx initial rate of new-coming packets */
/* from being overwritten by retried packet rate.*/
if (!ptcb_desc->use_driver_rate) {
- /*SET_TX_DESC_RTS_RATE(pdesc, 0x08); */
- /* SET_TX_DESC_TX_RATE(pdesc, 0x0b); */
+ /*set_tx_desc_rts_rate(pdesc, 0x08); */
+ /* set_tx_desc_tx_rate(pdesc, 0x0b); */
}
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"Enable RDG function.\n");
- SET_TX_DESC_RDG_ENABLE(pdesc, 1);
- SET_TX_DESC_HTC(pdesc, 1);
+ set_tx_desc_rdg_enable(pdesc, 1);
+ set_tx_desc_htc(pdesc, 1);
}
}
}
- SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
- SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)buf_len);
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0));
+ set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0));
+ set_tx_desc_tx_buffer_size(pdesc, (u16)buf_len);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
if (rtlpriv->dm.useramask) {
- SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+ set_tx_desc_rate_id(pdesc, ptcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, ptcb_desc->mac_id);
} else {
- SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, ptcb_desc->ratr_index);
+ set_tx_desc_rate_id(pdesc, 0xC + ptcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, ptcb_desc->ratr_index);
}
if (ieee80211_is_data_qos(fc))
- SET_TX_DESC_QOS(pdesc, 1);
+ set_tx_desc_qos(pdesc, 1);
if (!ieee80211_is_data_qos(fc))
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
- SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
+ set_tx_desc_hwseq_en(pdesc, 1);
+ set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
- SET_TX_DESC_BMC(pdesc, 1);
+ set_tx_desc_bmc(pdesc, 1);
}
- rtl88e_dm_set_tx_ant_by_tx_info(hw, pdesc, ptcb_desc->mac_id);
+ rtl88e_dm_set_tx_ant_by_tx_info(hw, pdesc8, ptcb_desc->mac_id);
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
- u8 *pdesc, bool firstseg,
+ u8 *pdesc8, bool firstseg,
bool lastseg, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 fw_queue = QSLT_BEACON;
+ __le32 *pdesc = (__le32 *)pdesc8;
dma_addr_t mapping = pci_map_single(rtlpci->pdev,
skb->data, skb->len,
@@ -684,58 +686,60 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
"DMA mapping error\n");
return;
}
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+ clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
if (firstseg)
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
- SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
+ set_tx_desc_tx_rate(pdesc, DESC92C_RATE1M);
- SET_TX_DESC_SEQ(pdesc, 0);
+ set_tx_desc_seq(pdesc, 0);
- SET_TX_DESC_LINIP(pdesc, 0);
+ set_tx_desc_linip(pdesc, 0);
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+ set_tx_desc_queue_sel(pdesc, fw_queue);
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
+ set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len));
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
- SET_TX_DESC_RATE_ID(pdesc, 7);
- SET_TX_DESC_MACID(pdesc, 0);
+ set_tx_desc_rate_id(pdesc, 7);
+ set_tx_desc_macid(pdesc, 0);
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
+ set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, 0x20);
+ set_tx_desc_offset(pdesc, 0x20);
- SET_TX_DESC_USE_RATE(pdesc, 1);
+ set_tx_desc_use_rate(pdesc, 1);
if (!ieee80211_is_data_qos(fc))
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+ set_tx_desc_hwseq_en(pdesc, 1);
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
"H2C Tx Cmd Content\n",
pdesc, TX_DESC_SIZE);
}
-void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
+void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc8,
bool istx, u8 desc_name, u8 *val)
{
+ __le32 *pdesc = (__le32 *)pdesc8;
+
if (istx == true) {
switch (desc_name) {
case HW_DESC_OWN:
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
break;
case HW_DESC_TX_NEXTDESC_ADDR:
- SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val);
+ set_tx_desc_next_desc_address(pdesc, *(u32 *)val);
break;
default:
WARN_ONCE(true, "rtl8188ee: ERR txdesc :%d not processed\n",
@@ -745,16 +749,16 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
} else {
switch (desc_name) {
case HW_DESC_RXOWN:
- SET_RX_DESC_OWN(pdesc, 1);
+ set_rx_desc_own(pdesc, 1);
break;
case HW_DESC_RXBUFF_ADDR:
- SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *)val);
+ set_rx_desc_buff_addr(pdesc, *(u32 *)val);
break;
case HW_DESC_RXPKT_LEN:
- SET_RX_DESC_PKT_LEN(pdesc, *(u32 *)val);
+ set_rx_desc_pkt_len(pdesc, *(u32 *)val);
break;
case HW_DESC_RXERO:
- SET_RX_DESC_EOR(pdesc, 1);
+ set_rx_desc_eor(pdesc, 1);
break;
default:
WARN_ONCE(true, "rtl8188ee: ERR rxdesc :%d not processed\n",
@@ -765,17 +769,18 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
}
u64 rtl88ee_get_desc(struct ieee80211_hw *hw,
- u8 *pdesc, bool istx, u8 desc_name)
+ u8 *pdesc8, bool istx, u8 desc_name)
{
u32 ret = 0;
+ __le32 *pdesc = (__le32 *)pdesc8;
if (istx == true) {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_TX_DESC_OWN(pdesc);
+ ret = get_tx_desc_own(pdesc);
break;
case HW_DESC_TXBUFF_ADDR:
- ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
+ ret = get_tx_desc_tx_buffer_address(pdesc);
break;
default:
WARN_ONCE(true, "rtl8188ee: ERR txdesc :%d not processed\n",
@@ -785,13 +790,13 @@ u64 rtl88ee_get_desc(struct ieee80211_hw *hw,
} else {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_DESC_OWN(pdesc);
+ ret = get_rx_desc_own(pdesc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_DESC_PKT_LEN(pdesc);
+ ret = get_rx_desc_pkt_len(pdesc);
break;
case HW_DESC_RXBUFF_ADDR:
- ret = GET_RX_DESC_BUFF_ADDR(pdesc);
+ ret = get_rx_desc_buff_addr(pdesc);
break;
default:
WARN_ONCE(true, "rtl8188ee: ERR rxdesc :%d not processed\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
index c29d9bfa5bd4..bd862732d6ae 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
@@ -14,505 +14,545 @@
#define USB_HWDESC_HEADER_LEN 32
#define CRCLENGTH 4
-#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
-#define SET_TX_DESC_OFFSET(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
-#define SET_TX_DESC_BMC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
-#define SET_TX_DESC_HTC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
-#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
-#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
-#define SET_TX_DESC_LINIP(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
-#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
-#define SET_TX_DESC_GF(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
-#define SET_TX_DESC_OWN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
-
-#define GET_TX_DESC_PKT_SIZE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 0, 16)
-#define GET_TX_DESC_OFFSET(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 16, 8)
-#define GET_TX_DESC_BMC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 24, 1)
-#define GET_TX_DESC_HTC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 25, 1)
-#define GET_TX_DESC_LAST_SEG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 26, 1)
-#define GET_TX_DESC_FIRST_SEG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 27, 1)
-#define GET_TX_DESC_LINIP(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 28, 1)
-#define GET_TX_DESC_NO_ACM(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 29, 1)
-#define GET_TX_DESC_GF(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 30, 1)
-#define GET_TX_DESC_OWN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 31, 1)
-
-#define SET_TX_DESC_MACID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 6, __val)
-#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
-#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
-#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
-#define SET_TX_DESC_PIFS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
-#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val)
-#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val)
-#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
-#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
-#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 26, 5, __val)
-#define SET_TX_DESC_PADDING_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val)
-
-#define GET_TX_DESC_MACID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 0, 5)
-#define GET_TX_DESC_AGG_ENABLE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 5, 1)
-#define GET_TX_DESC_AGG_BREAK(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 6, 1)
-#define GET_TX_DESC_RDG_ENABLE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 7, 1)
-#define GET_TX_DESC_QUEUE_SEL(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 8, 5)
-#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
-#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
-#define GET_TX_DESC_PIFS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
-#define GET_TX_DESC_RATE_ID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
-#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
-#define GET_TX_DESC_EN_DESC_ID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
-#define GET_TX_DESC_SEC_TYPE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 22, 2)
-#define GET_TX_DESC_PKT_OFFSET(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 24, 8)
-
-#define SET_TX_DESC_RTS_RC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 6, __val)
-#define SET_TX_DESC_DATA_RC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 6, 6, __val)
-#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val)
-#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val)
-#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
-#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val)
-#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
-#define SET_TX_DESC_RAW(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
-#define SET_TX_DESC_CCX(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
-#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
-#define SET_TX_DESC_BT_INT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val)
-#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 1, __val)
-#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 25, 1, __val)
-#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 26, 2, __val)
-#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 28, 2, __val)
-#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 30, 2, __val)
-
-#define GET_TX_DESC_RTS_RC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 0, 6)
-#define GET_TX_DESC_DATA_RC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 6, 6)
-#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 14, 2)
-#define GET_TX_DESC_MORE_FRAG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 17, 1)
-#define GET_TX_DESC_RAW(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 18, 1)
-#define GET_TX_DESC_CCX(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 19, 1)
-#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 20, 3)
-#define GET_TX_DESC_ANTSEL_A(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 24, 1)
-#define GET_TX_DESC_ANTSEL_B(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 25, 1)
-#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 26, 2)
-#define GET_TX_DESC_TX_ANTL(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 28, 2)
-#define GET_TX_DESC_TX_ANT_HT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 30, 2)
-
-#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 8, __val)
-#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 8, __val)
-#define SET_TX_DESC_SEQ(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 12, __val)
-#define SET_TX_DESC_CPU_HANDLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 28, 1, __val)
-#define SET_TX_DESC_TAG1(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 29, 1, __val)
-#define SET_TX_DESC_TRIGGER_INT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 30, 1, __val)
-#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 31, 1, __val)
-
-#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 0, 8)
-#define GET_TX_DESC_TAIL_PAGE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 8, 8)
-#define GET_TX_DESC_SEQ(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 16, 12)
-
-#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val)
-#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 5, 1, __val)
-#define SET_TX_DESC_QOS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 6, 1, __val)
-#define SET_TX_DESC_HWSEQ_SSN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val)
-#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 1, __val)
-#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 9, 1, __val)
-#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 10, 1, __val)
-#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 11, 1, __val)
-#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 12, 1, __val)
-#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 1, __val)
-#define SET_TX_DESC_PORT_ID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 14, 1, __val)
-#define SET_TX_DESC_PWR_STATUS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 15, 3, __val)
-#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 1, __val)
-#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 19, 1, __val)
-#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 20, 2, __val)
-#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 22, 2, __val)
-#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 1, __val)
-#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 25, 1, __val)
-#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 26, 1, __val)
-#define SET_TX_DESC_RTS_BW(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 27, 1, __val)
-#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 28, 2, __val)
-#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val)
-
-#define GET_TX_DESC_RTS_RATE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 0, 5)
-#define GET_TX_DESC_AP_DCFE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 5, 1)
-#define GET_TX_DESC_QOS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 6, 1)
-#define GET_TX_DESC_HWSEQ_EN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 7, 1)
-#define GET_TX_DESC_USE_RATE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 8, 1)
-#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 9, 1)
-#define GET_TX_DESC_DISABLE_FB(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 10, 1)
-#define GET_TX_DESC_CTS2SELF(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 11, 1)
-#define GET_TX_DESC_RTS_ENABLE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 12, 1)
-#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 13, 1)
-#define GET_TX_DESC_PORT_ID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 14, 1)
-#define GET_TX_DESC_WAIT_DCTS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 18, 1)
-#define GET_TX_DESC_CTS2AP_EN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 19, 1)
-#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 20, 2)
-#define GET_TX_DESC_TX_STBC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 22, 2)
-#define GET_TX_DESC_DATA_SHORT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 24, 1)
-#define GET_TX_DESC_DATA_BW(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 25, 1)
-#define GET_TX_DESC_RTS_SHORT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 26, 1)
-#define GET_TX_DESC_RTS_BW(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 27, 1)
-#define GET_TX_DESC_RTS_SC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 28, 2)
-#define GET_TX_DESC_RTS_STBC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 30, 2)
-
-#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 6, __val)
-#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 6, 1, __val)
-#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
-#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 5, __val)
-#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
-#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 17, 1, __val)
-#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 18, 6, __val)
-#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 8, __val)
-
-#define GET_TX_DESC_TX_RATE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 0, 6)
-#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 6, 1)
-#define GET_TX_DESC_CCX_TAG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 7, 1)
-#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 8, 5)
-#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 13, 4)
-#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 17, 1)
-#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 18, 6)
-#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 24, 8)
-
-#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 5, __val)
-#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 5, 5, __val)
-#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 10, 1, __val)
-#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 11, 5, __val)
-#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 4, __val)
-#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 20, 4, __val)
-#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 24, 4, __val)
-#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 28, 4, __val)
-
-#define GET_TX_DESC_TXAGC_A(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 0, 5)
-#define GET_TX_DESC_TXAGC_B(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 5, 5)
-#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 10, 1)
-#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 11, 5)
-#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 16, 4)
-#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 20, 4)
-#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 24, 4)
-#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 28, 4)
-
-#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
-#define SET_TX_DESC_SW_OFFSET30(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 16, 8, __val)
-#define SET_TX_DESC_SW_OFFSET31(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 4, __val)
-#define SET_TX_DESC_ANTSEL_C(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 29, 1, __val)
-#define SET_TX_DESC_NULL_0(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 30, 1, __val)
-#define SET_TX_DESC_NULL_1(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 30, 1, __val)
-
-#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
-
-#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 32, __val)
-#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val)
-
-#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+32, 0, 32)
-#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+36, 0, 32)
-
-#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
-#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+44, 0, 32, __val)
-
-#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
-#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+44, 0, 32)
-
-#define GET_RX_DESC_PKT_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 0, 14)
-#define GET_RX_DESC_CRC32(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 14, 1)
-#define GET_RX_DESC_ICV(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 15, 1)
-#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 16, 4)
-#define GET_RX_DESC_SECURITY(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 20, 3)
-#define GET_RX_DESC_QOS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 23, 1)
-#define GET_RX_DESC_SHIFT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 24, 2)
-#define GET_RX_DESC_PHYST(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 26, 1)
-#define GET_RX_DESC_SWDEC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 27, 1)
-#define GET_RX_DESC_LS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 28, 1)
-#define GET_RX_DESC_FS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 29, 1)
-#define GET_RX_DESC_EOR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 30, 1)
-#define GET_RX_DESC_OWN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 31, 1)
-
-#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
-#define SET_RX_DESC_EOR(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
-#define SET_RX_DESC_OWN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
-
-#define GET_RX_DESC_MACID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 0, 6)
-#define GET_RX_DESC_PAGGR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
-#define GET_RX_DESC_FAGGR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
-#define GET_RX_DESC_A1_FIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
-#define GET_RX_DESC_A2_FIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 20, 4)
-#define GET_RX_DESC_PAM(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
-#define GET_RX_DESC_PWR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
-#define GET_RX_DESC_MD(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
-#define GET_RX_DESC_MF(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
-#define GET_RX_DESC_TYPE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
-#define GET_RX_DESC_MC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
-#define GET_RX_DESC_BC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
-#define GET_RX_DESC_SEQ(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
-#define GET_RX_DESC_FRAG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
-
-#define GET_RX_DESC_RXMCS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 0, 6)
-#define GET_RX_DESC_RXHT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
-#define GET_RX_STATUS_DESC_RX_GF(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 7, 1)
-#define GET_RX_DESC_SPLCP(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 8, 1)
-#define GET_RX_DESC_BW(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 9, 1)
-#define GET_RX_DESC_HTC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
-#define GET_RX_STATUS_DESC_EOSP(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 11, 1)
-#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 12, 2)
-#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 14, 2)
-
-#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 29, 1)
-#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 30, 1)
-#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 31, 1)
-
-#define GET_RX_DESC_IV1(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 0, 32)
-#define GET_RX_DESC_TSFL(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
-
-#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
-#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
-
-#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
-#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
+static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(23, 16));
+}
+
+static inline void set_tx_desc_bmc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(24));
+}
+
+static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(25));
+}
+
+static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(26));
+}
+
+static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(27));
+}
+
+static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(28));
+}
+
+static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline int get_tx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
+
+static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, GENMASK(5, 0));
+}
+
+static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, GENMASK(19, 16));
+}
+
+static inline void set_tx_desc_nav_use_hdr(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, BIT(20));
+}
+
+static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, GENMASK(23, 22));
+}
+
+static inline void set_tx_desc_pkt_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, GENMASK(30, 26));
+}
+
+static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, BIT(12));
+}
+
+static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, BIT(13));
+}
+
+static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, BIT(17));
+}
+
+static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, GENMASK(22, 20));
+}
+
+static inline void set_tx_desc_antsel_a(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, BIT(24));
+}
+
+static inline void set_tx_desc_antsel_b(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, BIT(25));
+}
+
+static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 3, __val, GENMASK(27, 16));
+}
+
+static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 3, __val, BIT(31));
+}
+
+static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_qos(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(6));
+}
+
+static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(8));
+}
+
+static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(10));
+}
+
+static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(11));
+}
+
+static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(12));
+}
+
+static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(13));
+}
+
+static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, GENMASK(21, 20));
+}
+
+static inline void set_tx_desc_tx_stbc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, GENMASK(23, 22));
+}
+
+static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(25));
+}
+
+static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(26));
+}
+
+static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(27));
+}
+
+static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, GENMASK(29, 28));
+}
+
+static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, GENMASK(31, 30));
+}
+
+static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 5, __val, GENMASK(5, 0));
+}
+
+static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 5, __val, BIT(6));
+}
+
+static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 5, __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 5, __val, GENMASK(16, 13));
+}
+
+static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 6, __val, GENMASK(15, 11));
+}
+
+static inline void set_tx_desc_antsel_c(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 7, __val, BIT(29));
+}
+
+static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 7, __val, GENMASK(15, 0));
+}
+
+static inline int get_tx_desc_tx_buffer_size(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 7), GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 8) = cpu_to_le32(__val);
+}
+
+static inline int get_tx_desc_tx_buffer_address(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 8));
+}
+
+static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 10) = cpu_to_le32(__val);
+}
+
+static inline int get_rx_desc_pkt_len(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(13, 0));
+}
+
+static inline int get_rx_desc_crc32(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(14));
+}
+
+static inline int get_rx_desc_icv(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(15));
+}
+
+static inline int get_rx_desc_drv_info_size(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(19, 16));
+}
+
+static inline int get_rx_desc_security(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(22, 20));
+}
+
+static inline int get_rx_desc_qos(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(23));
+}
+
+static inline int get_rx_desc_shift(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(25, 24));
+}
+
+static inline int get_rx_desc_physt(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(26));
+}
+
+static inline int get_rx_desc_swdec(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(27));
+}
+
+static inline int get_rx_desc_ls(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(28));
+}
+
+static inline int get_rx_desc_fs(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(29));
+}
+
+static inline int get_rx_desc_eor(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(30));
+}
+
+static inline int get_rx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
+
+static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(13, 0));
+}
+
+static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(30));
+}
+
+static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline int get_rx_desc_macid(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), GENMASK(5, 0));
+}
+
+static inline int get_rx_desc_paggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(14));
+}
+
+static inline int get_rx_desc_faggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(15));
+}
+
+static inline int get_rx_desc_a1_fit(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), GENMASK(19, 16));
+}
+
+static inline int get_rx_desc_a2_fit(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), GENMASK(23, 20));
+}
+
+static inline int get_rx_desc_pam(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(24));
+}
+
+static inline int get_rx_desc_pwr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(25));
+}
+
+static inline int get_rx_desc_md(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(26));
+}
+
+static inline int get_rx_desc_mf(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(27));
+}
+
+static inline int get_rx_desc_type(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), GENMASK(29, 28));
+}
+
+static inline int get_rx_desc_mc(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(30));
+}
+
+static inline int get_rx_desc_bc(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(31));
+}
+
+static inline int get_rx_desc_seq(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 2), GENMASK(11, 0));
+}
+
+static inline int get_rx_desc_frag(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 2), GENMASK(15, 12));
+}
+
+static inline int get_rx_desc_rxmcs(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0));
+}
+
+static inline int get_rx_desc_rxht(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(6));
+}
+
+static inline int get_rx_status_desc_rx_gf(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(7));
+}
+
+static inline int get_rx_desc_splcp(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(8));
+}
+
+static inline int get_rx_desc_bw(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(9));
+}
+
+static inline int get_rx_desc_htc(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(10));
+}
+
+static inline int get_rx_status_desc_eosp(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(11));
+}
+
+static inline int get_rx_status_desc_bssid_fit(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), GENMASK(13, 12));
+}
+
+static inline int get_rx_status_desc_rpt_sel(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), GENMASK(15, 14));
+}
+
+static inline int get_rx_status_desc_pattern_match(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(29));
+}
+
+static inline int get_rx_status_desc_unicast_match(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(30));
+}
+
+static inline int get_rx_status_desc_magic_match(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(31));
+}
+
+static inline int get_rx_desc_iv1(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 4));
+}
+
+static inline int get_rx_desc_tsfl(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 5));
+}
+
+static inline int get_rx_desc_buff_addr(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 6));
+}
+
+static inline int get_rx_desc_buff_addr64(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 7));
+}
+
+static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 6) = cpu_to_le32(__val);
+}
+
+static inline void set_rx_desc_buff_addr64(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 7) = cpu_to_le32(__val);
+}
/* TX report 2 format in Rx desc*/
-#define GET_RX_RPT2_DESC_PKT_LEN(__status) \
- LE_BITS_TO_4BYTE(__status, 0, 9)
-#define GET_RX_RPT2_DESC_MACID_VALID_1(__status) \
- LE_BITS_TO_4BYTE(__status+16, 0, 32)
-#define GET_RX_RPT2_DESC_MACID_VALID_2(__status) \
- LE_BITS_TO_4BYTE(__status+20, 0, 32)
-
-#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value)
-#define SET_EARLYMODE_LEN0(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value)
-#define SET_EARLYMODE_LEN1(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value)
-#define SET_EARLYMODE_LEN2_1(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value)
-#define SET_EARLYMODE_LEN2_2(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value)
-#define SET_EARLYMODE_LEN3(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value)
-#define SET_EARLYMODE_LEN4(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value)
-
-#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
-do { \
- if (_size > TX_DESC_NEXT_DESC_OFFSET) \
- memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
- else \
- memset(__pdesc, 0, _size); \
-} while (0)
+static inline int get_rx_rpt2_desc_pkt_len(__le32 *__status)
+{
+ return le32_get_bits(*(__status), GENMASK(8, 0));
+}
+
+static inline int get_rx_rpt2_desc_macid_valid_1(__le32 *__status)
+{
+ return le32_to_cpu(*(__status + 4));
+}
+
+static inline int get_rx_rpt2_desc_macid_valid_2(__le32 *__status)
+{
+ return le32_to_cpu(*(__status + 5));
+}
+
+static inline void set_earlymode_pktnum(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(3, 0));
+}
+
+static inline void set_earlymode_len0(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(15, 4));
+}
+
+static inline void set_earlymode_len1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(27, 16));
+}
+
+static inline void set_earlymode_len2_1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(31, 28));
+}
+
+static inline void set_earlymode_len2_2(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr + 1, __value, GENMASK(7, 0));
+}
+
+static inline void set_earlymode_len3(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr + 1, __value, GENMASK(19, 8));
+}
+
+static inline void set_earlymode_len4(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr + 1, __value, GENMASK(31, 20));
+}
+
+static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size)
+{
+ if (_size > TX_DESC_NEXT_DESC_OFFSET)
+ memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET);
+ else
+ memset(__pdesc, 0, _size);
+}
#define RTL8188_RX_HAL_IS_CCK_RATE(rxmcs)\
(rxmcs == DESC92C_RATE1M ||\
@@ -520,17 +560,7 @@ do { \
rxmcs == DESC92C_RATE5_5M ||\
rxmcs == DESC92C_RATE11M)
-#define IS_LITTLE_ENDIAN 1
-
-struct phy_rx_agc_info_t {
- #if IS_LITTLE_ENDIAN
- u8 gain:7, trsw:1;
- #else
- u8 trsw:1, gain:7;
- #endif
-};
struct phy_status_rpt {
- struct phy_rx_agc_info_t path_agc[2];
u8 ch_corr[2];
u8 cck_sig_qual_ofdm_pwdb_all;
u8 cck_agc_rpt_ofdm_cfosho_a;
@@ -547,7 +577,7 @@ struct phy_status_rpt {
u8 stream_target_csi[2];
u8 sig_evm;
u8 rsvd_3;
-#if IS_LITTLE_ENDIAN
+#if defined(__LITTLE_ENDIAN)
u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
u8 sgi_en:1;
u8 rxsc:2;
@@ -555,7 +585,7 @@ struct phy_status_rpt {
u8 r_ant_train_en:1;
u8 ant_sel_b:1;
u8 ant_sel:1;
-#else /* _BIG_ENDIAN_ */
+#else /* __BIG_ENDIAN */
u8 ant_sel:1;
u8 ant_sel_b:1;
u8 r_ant_train_en:1;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index a9c0111444bc..900788e4018c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -113,8 +113,6 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
- rtlpriv->cfg->mod_params->sw_crypto =
- rtlpriv->cfg->mod_params->sw_crypto;
if (!rtlpriv->psc.inactiveps)
pr_info("rtl8192ce: Power Save off (module option)\n");
if (!rtlpriv->psc.fwctrl_lps)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index 18a0ab59631a..123dbf0903a1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -251,8 +251,8 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
*/
if (i == 0)
pstats->signalquality =
- (u8) (evm & 0xff);
- pstats->rx_mimo_sig_qual[i] = (u8) (evm & 0xff);
+ (u8)(evm & 0xff);
+ pstats->rx_mimo_sig_qual[i] = (u8)(evm & 0xff);
}
}
}
@@ -262,10 +262,10 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
*/
if (is_cck_rate)
pstats->signalstrength =
- (u8) (_rtl92ce_signal_scale_mapping(hw, pwdb_all));
+ (u8)(_rtl92ce_signal_scale_mapping(hw, pwdb_all));
else if (rf_rx_num != 0)
pstats->signalstrength =
- (u8) (_rtl92ce_signal_scale_mapping
+ (u8)(_rtl92ce_signal_scale_mapping
(hw, total_rssi /= rf_rx_num));
}
@@ -317,29 +317,30 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_stats *stats,
struct ieee80211_rx_status *rx_status,
- u8 *p_desc, struct sk_buff *skb)
+ u8 *p_desc8, struct sk_buff *skb)
{
struct rx_fwinfo_92c *p_drvinfo;
- struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
+ struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc8;
struct ieee80211_hdr *hdr;
- u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+ __le32 *p_desc = (__le32 *)p_desc8;
+ u32 phystatus = get_rx_desc_physt(p_desc);
- stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
- stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ stats->length = (u16)get_rx_desc_pkt_len(p_desc);
+ stats->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(p_desc) *
RX_DRV_INFO_SIZE_UNIT;
- stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
- stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
- stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+ stats->rx_bufshift = (u8)(get_rx_desc_shift(p_desc) & 0x03);
+ stats->icv = (u16)get_rx_desc_icv(p_desc);
+ stats->crc = (u16)get_rx_desc_crc32(p_desc);
stats->hwerror = (stats->crc | stats->icv);
- stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
- stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
- stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
- stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
- stats->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
- && (GET_RX_DESC_FAGGR(pdesc) == 1));
- stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
- stats->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc);
- stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
+ stats->decrypted = !get_rx_desc_swdec(p_desc);
+ stats->rate = (u8)get_rx_desc_rxmcs(p_desc);
+ stats->shortpreamble = (u16)get_rx_desc_splcp(p_desc);
+ stats->isampdu = (bool)(get_rx_desc_paggr(p_desc) == 1);
+ stats->isfirst_ampdu = (bool)((get_rx_desc_paggr(p_desc) == 1) &&
+ (get_rx_desc_faggr(p_desc) == 1));
+ stats->timestamp_low = get_rx_desc_tsfl(p_desc);
+ stats->rx_is40mhzpacket = (bool)get_rx_desc_bw(p_desc);
+ stats->is_ht = (bool)get_rx_desc_rxht(p_desc);
stats->is_cck = RX_HAL_IS_CCK_RATE(pdesc->rxmcs);
@@ -400,7 +401,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
}
void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_hdr *hdr, u8 *pdesc8,
u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
@@ -411,7 +412,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool defaultadapter = true;
- u8 *pdesc = pdesc_tx;
+ __le32 *pdesc = (__le32 *)pdesc8;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 fw_qsel = _rtl92ce_map_hwqueue_to_fwqueue(skb, hw_queue);
@@ -447,64 +448,64 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc);
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92c));
+ clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_92c));
if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
firstseg = true;
lastseg = true;
}
if (firstseg) {
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
- SET_TX_DESC_TX_RATE(pdesc, tcb_desc->hw_rate);
+ set_tx_desc_tx_rate(pdesc, tcb_desc->hw_rate);
if (tcb_desc->use_shortgi || tcb_desc->use_shortpreamble)
- SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
+ set_tx_desc_data_shortgi(pdesc, 1);
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- SET_TX_DESC_AGG_BREAK(pdesc, 1);
- SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+ set_tx_desc_agg_break(pdesc, 1);
+ set_tx_desc_max_agg_num(pdesc, 0x14);
}
- SET_TX_DESC_SEQ(pdesc, seq_number);
+ set_tx_desc_seq(pdesc, seq_number);
- SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc->rts_enable &&
+ set_tx_desc_rts_enable(pdesc, ((tcb_desc->rts_enable &&
!tcb_desc->
cts_enable) ? 1 : 0));
- SET_TX_DESC_HW_RTS_ENABLE(pdesc,
+ set_tx_desc_hw_rts_enable(pdesc,
((tcb_desc->rts_enable
|| tcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc->rts_stbc) ? 1 : 0));
+ set_tx_desc_cts2self(pdesc, ((tcb_desc->cts_enable) ? 1 : 0));
+ set_tx_desc_rts_stbc(pdesc, ((tcb_desc->rts_stbc) ? 1 : 0));
- SET_TX_DESC_RTS_RATE(pdesc, tcb_desc->rts_rate);
- SET_TX_DESC_RTS_BW(pdesc, 0);
- SET_TX_DESC_RTS_SC(pdesc, tcb_desc->rts_sc);
- SET_TX_DESC_RTS_SHORT(pdesc,
+ set_tx_desc_rts_rate(pdesc, tcb_desc->rts_rate);
+ set_tx_desc_rts_bw(pdesc, 0);
+ set_tx_desc_rts_sc(pdesc, tcb_desc->rts_sc);
+ set_tx_desc_rts_short(pdesc,
((tcb_desc->rts_rate <= DESC_RATE54M) ?
(tcb_desc->rts_use_shortpreamble ? 1 : 0)
: (tcb_desc->rts_use_shortgi ? 1 : 0)));
if (bw_40) {
if (tcb_desc->packet_bw) {
- SET_TX_DESC_DATA_BW(pdesc, 1);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+ set_tx_desc_data_bw(pdesc, 1);
+ set_tx_desc_tx_sub_carrier(pdesc, 3);
} else {
- SET_TX_DESC_DATA_BW(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc,
+ set_tx_desc_data_bw(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc,
mac->cur_40_prime_sc);
}
} else {
- SET_TX_DESC_DATA_BW(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ set_tx_desc_data_bw(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc, 0);
}
- SET_TX_DESC_LINIP(pdesc, 0);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
+ set_tx_desc_linip(pdesc, 0);
+ set_tx_desc_pkt_size(pdesc, (u16)skb->len);
if (sta) {
u8 ampdu_density = sta->ht_cap.ampdu_density;
- SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+ set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
if (info->control.hw_key) {
@@ -515,77 +516,78 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ set_tx_desc_sec_type(pdesc, 0x1);
break;
case WLAN_CIPHER_SUITE_CCMP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ set_tx_desc_sec_type(pdesc, 0x3);
break;
default:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ set_tx_desc_sec_type(pdesc, 0x0);
break;
}
}
- SET_TX_DESC_PKT_ID(pdesc, 0);
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
+ set_tx_desc_pkt_id(pdesc, 0);
+ set_tx_desc_queue_sel(pdesc, fw_qsel);
- SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
- SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
- SET_TX_DESC_DISABLE_FB(pdesc, 0);
- SET_TX_DESC_USE_RATE(pdesc, tcb_desc->use_driver_rate ? 1 : 0);
+ set_tx_desc_data_rate_fb_limit(pdesc, 0x1F);
+ set_tx_desc_rts_rate_fb_limit(pdesc, 0xF);
+ set_tx_desc_disable_fb(pdesc, 0);
+ set_tx_desc_use_rate(pdesc, tcb_desc->use_driver_rate ? 1 : 0);
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"Enable RDG function\n");
- SET_TX_DESC_RDG_ENABLE(pdesc, 1);
- SET_TX_DESC_HTC(pdesc, 1);
+ set_tx_desc_rdg_enable(pdesc, 1);
+ set_tx_desc_htc(pdesc, 1);
}
}
}
rcu_read_unlock();
- SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
- SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
+ set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0));
+ set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0));
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
+ set_tx_desc_tx_buffer_size(pdesc, (u16)skb->len);
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
if (rtlpriv->dm.useramask) {
- SET_TX_DESC_RATE_ID(pdesc, tcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, tcb_desc->mac_id);
+ set_tx_desc_rate_id(pdesc, tcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, tcb_desc->mac_id);
} else {
- SET_TX_DESC_RATE_ID(pdesc, 0xC + tcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, tcb_desc->ratr_index);
+ set_tx_desc_rate_id(pdesc, 0xC + tcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, tcb_desc->ratr_index);
}
if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) {
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
- SET_TX_DESC_PKT_ID(pdesc, 8);
+ set_tx_desc_hwseq_en(pdesc, 1);
+ set_tx_desc_pkt_id(pdesc, 8);
if (!defaultadapter)
- SET_TX_DESC_QOS(pdesc, 1);
+ set_tx_desc_qos(pdesc, 1);
}
- SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
+ set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
- SET_TX_DESC_BMC(pdesc, 1);
+ set_tx_desc_bmc(pdesc, 1);
}
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
- u8 *pdesc, bool firstseg,
+ u8 *pdesc8, bool firstseg,
bool lastseg, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 fw_queue = QSLT_BEACON;
+ __le32 *pdesc = (__le32 *)pdesc8;
dma_addr_t mapping = pci_map_single(rtlpci->pdev,
skb->data, skb->len,
@@ -599,60 +601,62 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
"DMA mapping error\n");
return;
}
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+ clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
if (firstseg)
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
- SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
+ set_tx_desc_tx_rate(pdesc, DESC_RATE1M);
- SET_TX_DESC_SEQ(pdesc, 0);
+ set_tx_desc_seq(pdesc, 0);
- SET_TX_DESC_LINIP(pdesc, 0);
+ set_tx_desc_linip(pdesc, 0);
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+ set_tx_desc_queue_sel(pdesc, fw_queue);
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len));
+ set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len));
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
- SET_TX_DESC_RATE_ID(pdesc, 7);
- SET_TX_DESC_MACID(pdesc, 0);
+ set_tx_desc_rate_id(pdesc, 7);
+ set_tx_desc_macid(pdesc, 0);
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
+ set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, 0x20);
+ set_tx_desc_offset(pdesc, 0x20);
- SET_TX_DESC_USE_RATE(pdesc, 1);
+ set_tx_desc_use_rate(pdesc, 1);
if (!ieee80211_is_data_qos(fc)) {
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
- SET_TX_DESC_PKT_ID(pdesc, 8);
+ set_tx_desc_hwseq_en(pdesc, 1);
+ set_tx_desc_pkt_id(pdesc, 8);
}
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
"H2C Tx Cmd Content", pdesc, TX_DESC_SIZE);
}
-void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx,
u8 desc_name, u8 *val)
{
+ __le32 *pdesc = (__le32 *)pdesc8;
+
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
break;
case HW_DESC_TX_NEXTDESC_ADDR:
- SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
+ set_tx_desc_next_desc_address(pdesc, *(u32 *)val);
break;
default:
WARN_ONCE(true, "rtl8192ce: ERR txdesc :%d not processed\n",
@@ -663,16 +667,16 @@ void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
switch (desc_name) {
case HW_DESC_RXOWN:
wmb();
- SET_RX_DESC_OWN(pdesc, 1);
+ set_rx_desc_own(pdesc, 1);
break;
case HW_DESC_RXBUFF_ADDR:
- SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val);
+ set_rx_desc_buff_addr(pdesc, *(u32 *)val);
break;
case HW_DESC_RXPKT_LEN:
- SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val);
+ set_rx_desc_pkt_len(pdesc, *(u32 *)val);
break;
case HW_DESC_RXERO:
- SET_RX_DESC_EOR(pdesc, 1);
+ set_rx_desc_eor(pdesc, 1);
break;
default:
WARN_ONCE(true, "rtl8192ce: ERR rxdesc :%d not processed\n",
@@ -682,18 +686,19 @@ void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
}
}
-u64 rtl92ce_get_desc(struct ieee80211_hw *hw, u8 *p_desc,
+u64 rtl92ce_get_desc(struct ieee80211_hw *hw, u8 *p_desc8,
bool istx, u8 desc_name)
{
u32 ret = 0;
+ __le32 *p_desc = (__le32 *)p_desc8;
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_TX_DESC_OWN(p_desc);
+ ret = get_tx_desc_own(p_desc);
break;
case HW_DESC_TXBUFF_ADDR:
- ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc);
+ ret = get_tx_desc_tx_buffer_address(p_desc);
break;
default:
WARN_ONCE(true, "rtl8192ce: ERR txdesc :%d not processed\n",
@@ -703,13 +708,13 @@ u64 rtl92ce_get_desc(struct ieee80211_hw *hw, u8 *p_desc,
} else {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_DESC_OWN(p_desc);
+ ret = get_rx_desc_own(p_desc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_DESC_PKT_LEN(p_desc);
+ ret = get_rx_desc_pkt_len(p_desc);
break;
case HW_DESC_RXBUFF_ADDR:
- ret = GET_RX_DESC_BUFF_ADDR(p_desc);
+ ret = get_rx_desc_buff_addr(p_desc);
break;
default:
WARN_ONCE(true, "rtl8192ce: ERR rxdesc :%d not processed\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
index fb1d4444a52f..709dcac9d84b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
@@ -14,497 +14,322 @@
#define USB_HWDESC_HEADER_LEN 32
#define CRCLENGTH 4
-/* Define a macro that takes a le32 word, converts it to host ordering,
- * right shifts by a specified count, creates a mask of the specified
- * bit count, and extracts that number of bits.
- */
-
-#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
- ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
- BIT_LEN_MASK_32(__mask))
-
-/* Define a macro that clears a bit field in an le32 word and
- * sets the specified value into that bit field. The resulting
- * value remains in le32 ordering; however, it is properly converted
- * to host ordering for the clear and set operations before conversion
- * back to le32.
- */
-
-#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
- (*(__le32 *)(__pdesc) = \
- (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
- (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
- (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
-
/* macros to read/write various fields in RX or TX descriptors */
-#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
-#define SET_TX_DESC_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
-#define SET_TX_DESC_BMC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 24, 1, __val)
-#define SET_TX_DESC_HTC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 25, 1, __val)
-#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
-#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
-#define SET_TX_DESC_LINIP(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
-#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
-#define SET_TX_DESC_GF(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_TX_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_TX_DESC_PKT_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 0, 16)
-#define GET_TX_DESC_OFFSET(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 16, 8)
-#define GET_TX_DESC_BMC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 24, 1)
-#define GET_TX_DESC_HTC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 25, 1)
-#define GET_TX_DESC_LAST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 26, 1)
-#define GET_TX_DESC_FIRST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 27, 1)
-#define GET_TX_DESC_LINIP(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 28, 1)
-#define GET_TX_DESC_NO_ACM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 29, 1)
-#define GET_TX_DESC_GF(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 30, 1)
-#define GET_TX_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
-
-#define SET_TX_DESC_MACID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 0, 5, __val)
-#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 5, 1, __val)
-#define SET_TX_DESC_BK(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 6, 1, __val)
-#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 7, 1, __val)
-#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 8, 5, __val)
-#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 13, 1, __val)
-#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 14, 1, __val)
-#define SET_TX_DESC_PIFS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 15, 1, __val)
-#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 16, 4, __val)
-#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 20, 1, __val)
-#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 21, 1, __val)
-#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 22, 2, __val)
-#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 24, 8, __val)
-
-#define GET_TX_DESC_MACID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
-#define GET_TX_DESC_AGG_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 5, 1)
-#define GET_TX_DESC_AGG_BREAK(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 6, 1)
-#define GET_TX_DESC_RDG_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 7, 1)
-#define GET_TX_DESC_QUEUE_SEL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 8, 5)
-#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 13, 1)
-#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
-#define GET_TX_DESC_PIFS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
-#define GET_TX_DESC_RATE_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
-#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 20, 1)
-#define GET_TX_DESC_EN_DESC_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 21, 1)
-#define GET_TX_DESC_SEC_TYPE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 22, 2)
-#define GET_TX_DESC_PKT_OFFSET(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 24, 8)
-
-#define SET_TX_DESC_RTS_RC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 0, 6, __val)
-#define SET_TX_DESC_DATA_RC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 6, 6, __val)
-#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 14, 2, __val)
-#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 17, 1, __val)
-#define SET_TX_DESC_RAW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 18, 1, __val)
-#define SET_TX_DESC_CCX(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 19, 1, __val)
-#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 20, 3, __val)
-#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 24, 1, __val)
-#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 25, 1, __val)
-#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 26, 2, __val)
-#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 28, 2, __val)
-#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 30, 2, __val)
-
-#define GET_TX_DESC_RTS_RC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 0, 6)
-#define GET_TX_DESC_DATA_RC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 6, 6)
-#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 14, 2)
-#define GET_TX_DESC_MORE_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 17, 1)
-#define GET_TX_DESC_RAW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 18, 1)
-#define GET_TX_DESC_CCX(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 19, 1)
-#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 20, 3)
-#define GET_TX_DESC_ANTSEL_A(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 24, 1)
-#define GET_TX_DESC_ANTSEL_B(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 25, 1)
-#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 26, 2)
-#define GET_TX_DESC_TX_ANTL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 28, 2)
-#define GET_TX_DESC_TX_ANT_HT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 30, 2)
-
-#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 0, 8, __val)
-#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 8, 8, __val)
-#define SET_TX_DESC_SEQ(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 16, 12, __val)
-#define SET_TX_DESC_PKT_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 28, 4, __val)
-
-#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 0, 8)
-#define GET_TX_DESC_TAIL_PAGE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 8, 8)
-#define GET_TX_DESC_SEQ(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 16, 12)
-#define GET_TX_DESC_PKT_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 28, 4)
-
-#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 0, 5, __val)
-#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 5, 1, __val)
-#define SET_TX_DESC_QOS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 6, 1, __val)
-#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 7, 1, __val)
-#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 8, 1, __val)
-#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 9, 1, __val)
-#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 10, 1, __val)
-#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 11, 1, __val)
-#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 12, 1, __val)
-#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 13, 1, __val)
-#define SET_TX_DESC_PORT_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 14, 1, __val)
-#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 18, 1, __val)
-#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 19, 1, __val)
-#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 20, 2, __val)
-#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 22, 2, __val)
-#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 24, 1, __val)
-#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 25, 1, __val)
-#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 26, 1, __val)
-#define SET_TX_DESC_RTS_BW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 27, 1, __val)
-#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 28, 2, __val)
-#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 30, 2, __val)
-
-#define GET_TX_DESC_RTS_RATE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 0, 5)
-#define GET_TX_DESC_AP_DCFE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 5, 1)
-#define GET_TX_DESC_QOS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 6, 1)
-#define GET_TX_DESC_HWSEQ_EN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 7, 1)
-#define GET_TX_DESC_USE_RATE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 8, 1)
-#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 9, 1)
-#define GET_TX_DESC_DISABLE_FB(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 10, 1)
-#define GET_TX_DESC_CTS2SELF(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 11, 1)
-#define GET_TX_DESC_RTS_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 12, 1)
-#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 13, 1)
-#define GET_TX_DESC_PORT_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 14, 1)
-#define GET_TX_DESC_WAIT_DCTS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 18, 1)
-#define GET_TX_DESC_CTS2AP_EN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 19, 1)
-#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 20, 2)
-#define GET_TX_DESC_TX_STBC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 22, 2)
-#define GET_TX_DESC_DATA_SHORT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 24, 1)
-#define GET_TX_DESC_DATA_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 25, 1)
-#define GET_TX_DESC_RTS_SHORT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 26, 1)
-#define GET_TX_DESC_RTS_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 27, 1)
-#define GET_TX_DESC_RTS_SC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 28, 2)
-#define GET_TX_DESC_RTS_STBC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 30, 2)
-
-#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
-#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
-#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
-#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 8, 5, __val)
-#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 13, 4, __val)
-#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 17, 1, __val)
-#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 18, 6, __val)
-#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 24, 8, __val)
-
-#define GET_TX_DESC_TX_RATE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 0, 6)
-#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 6, 1)
-#define GET_TX_DESC_CCX_TAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 7, 1)
-#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 8, 5)
-#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 13, 4)
-#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 17, 1)
-#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 18, 6)
-#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 24, 8)
-
-#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 0, 5, __val)
-#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 5, 5, __val)
-#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 10, 1, __val)
-#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 11, 5, __val)
-#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 16, 4, __val)
-#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 20, 4, __val)
-#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 24, 4, __val)
-#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 28, 4, __val)
-
-#define GET_TX_DESC_TXAGC_A(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 0, 5)
-#define GET_TX_DESC_TXAGC_B(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 5, 5)
-#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 10, 1)
-#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 11, 5)
-#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 16, 4)
-#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 20, 4)
-#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 24, 4)
-#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 28, 4)
-
-#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 0, 16, __val)
-#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 16, 4, __val)
-#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 20, 4, __val)
-#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 24, 4, __val)
-#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 28, 4, __val)
-
-#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 0, 16)
-#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 16, 4)
-#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 20, 4)
-#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 24, 4)
-#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 28, 4)
-
-#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+32, 0, 32, __val)
-#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+36, 0, 32, __val)
-
-#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+32, 0, 32)
-#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+36, 0, 32)
-
-#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+40, 0, 32, __val)
-#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+44, 0, 32, __val)
-
-#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+40, 0, 32)
-#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+44, 0, 32)
-
-#define GET_RX_DESC_PKT_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 0, 14)
-#define GET_RX_DESC_CRC32(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 14, 1)
-#define GET_RX_DESC_ICV(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 15, 1)
-#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 16, 4)
-#define GET_RX_DESC_SECURITY(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 20, 3)
-#define GET_RX_DESC_QOS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 23, 1)
-#define GET_RX_DESC_SHIFT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 24, 2)
-#define GET_RX_DESC_PHYST(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 26, 1)
-#define GET_RX_DESC_SWDEC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 27, 1)
-#define GET_RX_DESC_LS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 28, 1)
-#define GET_RX_DESC_FS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 29, 1)
-#define GET_RX_DESC_EOR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 30, 1)
-#define GET_RX_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
-
-#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
-#define SET_RX_DESC_EOR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_RX_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_RX_DESC_MACID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
-#define GET_RX_DESC_TID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 5, 4)
-#define GET_RX_DESC_HWRSVD(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 9, 5)
-#define GET_RX_DESC_PAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
-#define GET_RX_DESC_FAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
-#define GET_RX_DESC_A1_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
-#define GET_RX_DESC_A2_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 20, 4)
-#define GET_RX_DESC_PAM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 24, 1)
-#define GET_RX_DESC_PWR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 25, 1)
-#define GET_RX_DESC_MD(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 26, 1)
-#define GET_RX_DESC_MF(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 27, 1)
-#define GET_RX_DESC_TYPE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 28, 2)
-#define GET_RX_DESC_MC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 30, 1)
-#define GET_RX_DESC_BC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 31, 1)
-#define GET_RX_DESC_SEQ(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 0, 12)
-#define GET_RX_DESC_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 12, 4)
-#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 16, 14)
-#define GET_RX_DESC_NEXT_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 30, 1)
-#define GET_RX_DESC_RSVD(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 31, 1)
-
-#define GET_RX_DESC_RXMCS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 0, 6)
-#define GET_RX_DESC_RXHT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 6, 1)
-#define GET_RX_DESC_SPLCP(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 8, 1)
-#define GET_RX_DESC_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 9, 1)
-#define GET_RX_DESC_HTC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 10, 1)
-#define GET_RX_DESC_HWPC_ERR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 14, 1)
-#define GET_RX_DESC_HWPC_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 15, 1)
-#define GET_RX_DESC_IV0(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 16, 16)
-
-#define GET_RX_DESC_IV1(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 0, 32)
-#define GET_RX_DESC_TSFL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 0, 32)
-
-#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 0, 32)
-#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 0, 32)
-
-#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 0, 32, __val)
-#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
-
-#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
- memset(__pdesc, 0, min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET))
+static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(23, 16));
+}
+
+static inline void set_tx_desc_bmc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(24));
+}
+
+static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(25));
+}
+
+static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(26));
+}
+
+static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(27));
+}
+
+static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(28));
+}
+
+static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline int get_tx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
+
+static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_agg_break(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(5));
+}
+
+static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(7));
+}
+
+static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(19, 16));
+}
+
+static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22));
+}
+
+static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, BIT(17));
+}
+
+static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20));
+}
+
+static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16));
+}
+
+static inline void set_tx_desc_pkt_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(31, 28));
+}
+
+static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_qos(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(6));
+}
+
+static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(7));
+}
+
+static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(8));
+}
+
+static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(10));
+}
+
+static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(11));
+}
+
+static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(12));
+}
+
+static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(13));
+}
+
+static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(21, 20));
+}
+
+static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(25));
+}
+
+static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(26));
+}
+
+static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(27));
+}
+
+static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(29, 28));
+}
+
+static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(31, 30));
+}
+
+static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(5, 0));
+}
+
+static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, BIT(6));
+}
+
+static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13));
+}
+
+static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 6), __val, GENMASK(15, 11));
+}
+
+static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 8) = cpu_to_le32(__val);
+}
+
+static inline int get_tx_desc_tx_buffer_address(__le32 *__pdesc)
+{
+ return le32_to_cpu(*((__pdesc + 8)));
+}
+
+static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 10) = cpu_to_le32(__val);
+}
+
+static inline int get_rx_desc_pkt_len(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(13, 0));
+}
+
+static inline int get_rx_desc_crc32(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(14));
+}
+
+static inline int get_rx_desc_icv(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(15));
+}
+
+static inline int get_rx_desc_drv_info_size(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(19, 16));
+}
+
+static inline int get_rx_desc_shift(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(25, 24));
+}
+
+static inline int get_rx_desc_physt(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(26));
+}
+
+static inline int get_rx_desc_swdec(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(27));
+}
+
+static inline int get_rx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
+
+static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(13, 0));
+}
+
+static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(30));
+}
+
+static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline int get_rx_desc_paggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*((__pdesc + 1)), BIT(14));
+}
+
+static inline int get_rx_desc_faggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*((__pdesc + 1)), BIT(15));
+}
+
+static inline int get_rx_desc_rxmcs(__le32 *__pdesc)
+{
+ return le32_get_bits(*((__pdesc + 3)), GENMASK(5, 0));
+}
+
+static inline int get_rx_desc_rxht(__le32 *__pdesc)
+{
+ return le32_get_bits(*((__pdesc + 3)), BIT(6));
+}
+
+static inline int get_rx_desc_splcp(__le32 *__pdesc)
+{
+ return le32_get_bits(*((__pdesc + 3)), BIT(8));
+}
+
+static inline int get_rx_desc_bw(__le32 *__pdesc)
+{
+ return le32_get_bits(*((__pdesc + 3)), BIT(9));
+}
+
+static inline int get_rx_desc_tsfl(__le32 *__pdesc)
+{
+ return le32_to_cpu(*((__pdesc + 5)));
+}
+
+static inline int get_rx_desc_buff_addr(__le32 *__pdesc)
+{
+ return le32_to_cpu(*((__pdesc + 6)));
+}
+
+static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 6) = cpu_to_le32(__val);
+}
+
+static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size)
+{
+ memset(__pdesc, 0, min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET));
+}
struct rx_fwinfo_92c {
u8 gain_trsw[4];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index c1c34dca39d2..ab3e4aebad39 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -39,8 +39,6 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->dm.dm_flag = 0;
rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
- rtlpriv->cfg->mod_params->sw_crypto =
- rtlpriv->cfg->mod_params->sw_crypto;
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index d1d84e7d47a4..1c7ee569f4bf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -161,8 +161,6 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
- rtlpriv->cfg->mod_params->sw_crypto =
- rtlpriv->cfg->mod_params->sw_crypto;
if (!rtlpriv->psc.inactiveps)
pr_info("Power Save off (module option)\n");
if (!rtlpriv->psc.fwctrl_lps)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 4b370410c83c..5702ac6deebf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -129,10 +129,6 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
- rtlpriv->cfg->mod_params->sw_crypto =
- rtlpriv->cfg->mod_params->sw_crypto;
- rtlpriv->cfg->mod_params->disable_watchdog =
- rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
rtlpriv->psc.reg_fwctrl_lps = 3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 00e6254bf82b..3c8528f0ecb3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -128,10 +128,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
- rtlpriv->cfg->mod_params->sw_crypto =
- rtlpriv->cfg->mod_params->sw_crypto;
- rtlpriv->cfg->mod_params->disable_watchdog =
- rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
rtlpriv->psc.reg_fwctrl_lps = 2;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index eec7c4ecf3ad..3def6a2b3450 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -145,10 +145,6 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear;
- rtlpriv->cfg->mod_params->sw_crypto =
- rtlpriv->cfg->mod_params->sw_crypto;
- rtlpriv->cfg->mod_params->disable_watchdog =
- rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
rtlpriv->psc.reg_fwctrl_lps = 2;
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index e0bfefd154af..77edee2df8b8 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -9,6 +9,7 @@ rtw88-y += main.o \
rx.o \
mac.o \
phy.o \
+ coex.o \
efuse.o \
fw.o \
ps.o \
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
new file mode 100644
index 000000000000..4577fceddc5e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -0,0 +1,2507 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "coex.h"
+#include "fw.h"
+#include "ps.h"
+#include "debug.h"
+#include "reg.h"
+
+static u8 rtw_coex_next_rssi_state(struct rtw_dev *rtwdev, u8 pre_state,
+ u8 rssi, u8 rssi_thresh)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 tol = chip->rssi_tolerance;
+ u8 next_state;
+
+ if (pre_state == COEX_RSSI_STATE_LOW ||
+ pre_state == COEX_RSSI_STATE_STAY_LOW) {
+ if (rssi >= (rssi_thresh + tol))
+ next_state = COEX_RSSI_STATE_HIGH;
+ else
+ next_state = COEX_RSSI_STATE_STAY_LOW;
+ } else {
+ if (rssi < rssi_thresh)
+ next_state = COEX_RSSI_STATE_LOW;
+ else
+ next_state = COEX_RSSI_STATE_STAY_HIGH;
+ }
+
+ return next_state;
+}
+
+static void rtw_coex_limited_tx(struct rtw_dev *rtwdev,
+ bool tx_limit_en, bool ampdu_limit_en)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ bool wifi_under_b_mode = false;
+
+ if (!chip->scbd_support)
+ return;
+
+ /* force max tx retry limit = 8 */
+ if (coex_stat->wl_tx_limit_en == tx_limit_en &&
+ coex_stat->wl_ampdu_limit_en == ampdu_limit_en)
+ return;
+
+ if (!coex_stat->wl_tx_limit_en) {
+ coex_stat->darfrc = rtw_read32(rtwdev, REG_DARFRC);
+ coex_stat->darfrch = rtw_read32(rtwdev, REG_DARFRCH);
+ coex_stat->retry_limit = rtw_read16(rtwdev, REG_RETRY_LIMIT);
+ }
+
+ if (!coex_stat->wl_ampdu_limit_en)
+ coex_stat->ampdu_max_time =
+ rtw_read8(rtwdev, REG_AMPDU_MAX_TIME_V1);
+
+ coex_stat->wl_tx_limit_en = tx_limit_en;
+ coex_stat->wl_ampdu_limit_en = ampdu_limit_en;
+
+ if (tx_limit_en) {
+ /* set BT polluted packet on for tx rate adaptive,
+ * not including tx retry broken by PTA
+ */
+ rtw_write8_set(rtwdev, REG_TX_HANG_CTRL, BIT_EN_GNT_BT_AWAKE);
+
+ /* set queue life time to avoid can't reach tx retry limit
+ * if tx is always broken by GNT_BT
+ */
+ rtw_write8_set(rtwdev, REG_LIFETIME_EN, 0xf);
+ rtw_write16(rtwdev, REG_RETRY_LIMIT, 0x0808);
+
+ /* auto rate fallback step within 8 retries */
+ if (wifi_under_b_mode) {
+ rtw_write32(rtwdev, REG_DARFRC, 0x1000000);
+ rtw_write32(rtwdev, REG_DARFRCH, 0x1010101);
+ } else {
+ rtw_write32(rtwdev, REG_DARFRC, 0x1000000);
+ rtw_write32(rtwdev, REG_DARFRCH, 0x4030201);
+ }
+ } else {
+ rtw_write8_clr(rtwdev, REG_TX_HANG_CTRL, BIT_EN_GNT_BT_AWAKE);
+ rtw_write8_clr(rtwdev, REG_LIFETIME_EN, 0xf);
+
+ rtw_write16(rtwdev, REG_RETRY_LIMIT, coex_stat->retry_limit);
+ rtw_write32(rtwdev, REG_DARFRC, coex_stat->darfrc);
+ rtw_write32(rtwdev, REG_DARFRCH, coex_stat->darfrch);
+ }
+
+ if (ampdu_limit_en)
+ rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1, 0x20);
+ else
+ rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1,
+ coex_stat->ampdu_max_time);
+}
+
+static void rtw_coex_limited_wl(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ bool tx_limit = false;
+ bool tx_agg_ctrl = false;
+
+ if (coex->under_5g ||
+ coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) {
+ /* no need to limit tx */
+ } else {
+ tx_limit = true;
+ if (coex_stat->bt_hid_exist || coex_stat->bt_hfp_exist ||
+ coex_stat->bt_hid_pair_num > 0)
+ tx_agg_ctrl = true;
+ }
+
+ rtw_coex_limited_tx(rtwdev, tx_limit, tx_agg_ctrl);
+}
+
+static void rtw_coex_wl_ccklock_action(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u8 para[6] = {0};
+
+ if (coex->stop_dm)
+ return;
+
+ para[0] = COEX_H2C69_WL_LEAKAP;
+
+ if (coex_stat->tdma_timer_base == 3 && coex_stat->wl_slot_extend) {
+ para[1] = PARA1_H2C69_DIS_5MS; /* disable 5ms extend */
+ rtw_fw_bt_wifi_control(rtwdev, para[0], &para[1]);
+ coex_stat->wl_slot_extend = false;
+ coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] = 0;
+ return;
+ }
+
+ if (coex_stat->wl_slot_extend && coex_stat->wl_force_lps_ctrl &&
+ !coex_stat->wl_cck_lock_ever) {
+ if (coex_stat->wl_fw_dbg_info[7] <= 5)
+ coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND]++;
+ else
+ coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] = 0;
+
+ if (coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] == 7) {
+ para[1] = 0x1; /* disable 5ms extend */
+ rtw_fw_bt_wifi_control(rtwdev, para[0], &para[1]);
+ coex_stat->wl_slot_extend = false;
+ coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] = 0;
+ }
+ } else if (!coex_stat->wl_slot_extend && coex_stat->wl_cck_lock) {
+ para[1] = 0x0; /* enable 5ms extend */
+ rtw_fw_bt_wifi_control(rtwdev, para[0], &para[1]);
+ coex_stat->wl_slot_extend = true;
+ }
+}
+
+static void rtw_coex_wl_ccklock_detect(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+
+ /* TODO: wait for rx_rate_change_notify implement */
+ coex_stat->wl_cck_lock = false;
+ coex_stat->wl_cck_lock_pre = false;
+ coex_stat->wl_cck_lock_ever = false;
+}
+
+static void rtw_coex_wl_noisy_detect(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 cnt_cck;
+
+ /* wifi noisy environment identification */
+ cnt_cck = dm_info->cck_ok_cnt + dm_info->cck_err_cnt;
+
+ if (!coex_stat->wl_gl_busy) {
+ if (cnt_cck > 250) {
+ if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] < 5)
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY2]++;
+
+ if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] == 5) {
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] = 0;
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] = 0;
+ }
+ } else if (cnt_cck < 100) {
+ if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] < 5)
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY0]++;
+
+ if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] == 5) {
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] = 0;
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] = 0;
+ }
+ } else {
+ if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] < 5)
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY1]++;
+
+ if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] == 5) {
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] = 0;
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] = 0;
+ }
+ }
+
+ if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] == 5)
+ coex_stat->wl_noisy_level = 2;
+ else if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] == 5)
+ coex_stat->wl_noisy_level = 1;
+ else
+ coex_stat->wl_noisy_level = 0;
+ }
+}
+
+static void rtw_coex_tdma_timer_base(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u8 para[2] = {0};
+
+ if (coex_stat->tdma_timer_base == type)
+ return;
+
+ coex_stat->tdma_timer_base = type;
+
+ para[0] = COEX_H2C69_TDMA_SLOT;
+
+ if (type == 3) /* 4-slot */
+ para[1] = PARA1_H2C69_TDMA_4SLOT; /* 4-slot */
+ else /* 2-slot */
+ para[1] = PARA1_H2C69_TDMA_2SLOT;
+
+ rtw_fw_bt_wifi_control(rtwdev, para[0], &para[1]);
+
+ /* no 5ms_wl_slot_extend for 4-slot mode */
+ if (coex_stat->tdma_timer_base == 3)
+ rtw_coex_wl_ccklock_action(rtwdev);
+}
+
+static void rtw_coex_set_wl_pri_mask(struct rtw_dev *rtwdev, u8 bitmap,
+ u8 data)
+{
+ u32 addr;
+
+ addr = REG_BT_COEX_TABLE_H + (bitmap / 8);
+ bitmap = bitmap % 8;
+
+ rtw_write8_mask(rtwdev, addr, BIT(bitmap), data);
+}
+
+void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u16 val = 0x2;
+
+ if (!chip->scbd_support)
+ return;
+
+ val |= coex_stat->score_board;
+
+ /* for 8822b, scbd[10] is CQDDR on
+ * for 8822c, scbd[10] is no fix 2M
+ */
+ if (!chip->new_scbd10_def && (bitpos & COEX_SCBD_FIX2M)) {
+ if (set)
+ val &= ~COEX_SCBD_FIX2M;
+ else
+ val |= COEX_SCBD_FIX2M;
+ } else {
+ if (set)
+ val |= bitpos;
+ else
+ val &= ~bitpos;
+ }
+
+ if (val != coex_stat->score_board) {
+ coex_stat->score_board = val;
+ val |= BIT_BT_INT_EN;
+ rtw_write16(rtwdev, REG_WIFI_BT_INFO, val);
+ }
+}
+
+static u16 rtw_coex_read_scbd(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ if (!chip->scbd_support)
+ return 0;
+
+ return (rtw_read16(rtwdev, REG_WIFI_BT_INFO)) & ~(BIT_BT_INT_EN);
+}
+
+static void rtw_coex_check_rfk(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+ u8 cnt = 0;
+ u32 wait_cnt;
+ bool btk, wlk;
+
+ if (coex_rfe->wlg_at_btg && chip->scbd_support &&
+ coex_stat->bt_iqk_state != 0xff) {
+ wait_cnt = COEX_RFK_TIMEOUT / COEX_MIN_DELAY;
+ do {
+ /* BT RFK */
+ btk = !!(rtw_coex_read_scbd(rtwdev) & COEX_SCBD_BT_RFK);
+
+ /* WL RFK */
+ wlk = !!(rtw_read8(rtwdev, REG_ARFR4) & BIT_WL_RFK);
+
+ if (!btk && !wlk)
+ break;
+
+ mdelay(COEX_MIN_DELAY);
+ } while (++cnt < wait_cnt);
+
+ if (cnt >= wait_cnt)
+ coex_stat->bt_iqk_state = 0xff;
+ }
+}
+
+static void rtw_coex_query_bt_info(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+
+ if (coex_stat->bt_disabled)
+ return;
+
+ rtw_fw_query_bt_info(rtwdev);
+}
+
+static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ bool bt_disabled = false;
+ u16 score_board;
+
+ if (chip->scbd_support) {
+ score_board = rtw_coex_read_scbd(rtwdev);
+ bt_disabled = !(score_board & COEX_SCBD_ONOFF);
+ }
+
+ if (coex_stat->bt_disabled != bt_disabled) {
+ rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: BT state changed (%d) -> (%d)\n",
+ coex_stat->bt_disabled, bt_disabled);
+
+ coex_stat->bt_disabled = bt_disabled;
+ coex_stat->bt_ble_scan_type = 0;
+ coex_dm->cur_bt_lna_lvl = 0;
+ }
+
+ if (!coex_stat->bt_disabled) {
+ coex_stat->bt_reenable = true;
+ ieee80211_queue_delayed_work(rtwdev->hw,
+ &coex->bt_reenable_work, 15 * HZ);
+ } else {
+ coex_stat->bt_mailbox_reply = false;
+ coex_stat->bt_reenable = false;
+ }
+}
+
+static void rtw_coex_update_wl_link_info(struct rtw_dev *rtwdev, u8 reason)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_traffic_stats *stats = &rtwdev->stats;
+ bool is_5G = false;
+ bool scan = false, link = false;
+ int i;
+ u8 rssi_state;
+ u8 rssi_step;
+ u8 rssi;
+
+ scan = rtw_flag_check(rtwdev, RTW_FLAG_SCANNING);
+ coex_stat->wl_connected = !!rtwdev->sta_cnt;
+ coex_stat->wl_gl_busy = rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+
+ if (stats->tx_throughput > stats->rx_throughput)
+ coex_stat->wl_tput_dir = COEX_WL_TPUT_TX;
+ else
+ coex_stat->wl_tput_dir = COEX_WL_TPUT_RX;
+
+ if (scan || link || reason == COEX_RSN_2GCONSTART ||
+ reason == COEX_RSN_2GSCANSTART || reason == COEX_RSN_2GSWITCHBAND)
+ coex_stat->wl_linkscan_proc = true;
+ else
+ coex_stat->wl_linkscan_proc = false;
+
+ rtw_coex_wl_noisy_detect(rtwdev);
+
+ for (i = 0; i < 4; i++) {
+ rssi_state = coex_dm->wl_rssi_state[i];
+ rssi_step = chip->wl_rssi_step[i];
+ rssi = rtwdev->dm_info.min_rssi;
+ rssi_state = rtw_coex_next_rssi_state(rtwdev, rssi_state,
+ rssi, rssi_step);
+ coex_dm->wl_rssi_state[i] = rssi_state;
+ }
+
+ switch (reason) {
+ case COEX_RSN_5GSCANSTART:
+ case COEX_RSN_5GSWITCHBAND:
+ case COEX_RSN_5GCONSTART:
+
+ is_5G = true;
+ break;
+ case COEX_RSN_2GSCANSTART:
+ case COEX_RSN_2GSWITCHBAND:
+ case COEX_RSN_2GCONSTART:
+
+ is_5G = false;
+ break;
+ default:
+ if (rtwdev->hal.current_band_type == RTW_BAND_5G)
+ is_5G = true;
+ else
+ is_5G = false;
+ break;
+ }
+
+ coex->under_5g = is_5G;
+}
+
+static inline u8 *get_payload_from_coex_resp(struct sk_buff *resp)
+{
+ struct rtw_c2h_cmd *c2h;
+ u32 pkt_offset;
+
+ pkt_offset = *((u32 *)resp->cb);
+ c2h = (struct rtw_c2h_cmd *)(resp->data + pkt_offset);
+
+ return c2h->payload;
+}
+
+void rtw_coex_info_response(struct rtw_dev *rtwdev, struct sk_buff *skb)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ u8 *payload = get_payload_from_coex_resp(skb);
+
+ if (payload[0] != COEX_RESP_ACK_BY_WL_FW)
+ return;
+
+ skb_queue_tail(&coex->queue, skb);
+ wake_up(&coex->wait);
+}
+
+static struct sk_buff *rtw_coex_info_request(struct rtw_dev *rtwdev,
+ struct rtw_coex_info_req *req)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct sk_buff *skb_resp = NULL;
+
+ mutex_lock(&coex->mutex);
+
+ rtw_fw_query_bt_mp_info(rtwdev, req);
+
+ if (!wait_event_timeout(coex->wait, !skb_queue_empty(&coex->queue),
+ COEX_REQUEST_TIMEOUT)) {
+ rtw_err(rtwdev, "coex request time out\n");
+ goto out;
+ }
+
+ skb_resp = skb_dequeue(&coex->queue);
+ if (!skb_resp) {
+ rtw_err(rtwdev, "failed to get coex info response\n");
+ goto out;
+ }
+
+out:
+ mutex_unlock(&coex->mutex);
+ return skb_resp;
+}
+
+static bool rtw_coex_get_bt_scan_type(struct rtw_dev *rtwdev, u8 *scan_type)
+{
+ struct rtw_coex_info_req req = {0};
+ struct sk_buff *skb;
+ u8 *payload;
+ bool ret = false;
+
+ req.op_code = BT_MP_INFO_OP_SCAN_TYPE;
+ skb = rtw_coex_info_request(rtwdev, &req);
+ if (!skb)
+ goto out;
+
+ payload = get_payload_from_coex_resp(skb);
+ *scan_type = GET_COEX_RESP_BT_SCAN_TYPE(payload);
+ dev_kfree_skb_any(skb);
+ ret = true;
+
+out:
+ return ret;
+}
+
+static bool rtw_coex_set_lna_constrain_level(struct rtw_dev *rtwdev,
+ u8 lna_constrain_level)
+{
+ struct rtw_coex_info_req req = {0};
+ struct sk_buff *skb;
+ bool ret = false;
+
+ req.op_code = BT_MP_INFO_OP_LNA_CONSTRAINT;
+ req.para1 = lna_constrain_level;
+ skb = rtw_coex_info_request(rtwdev, &req);
+ if (!skb)
+ goto out;
+
+ dev_kfree_skb_any(skb);
+ ret = true;
+
+out:
+ return ret;
+}
+
+static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 i;
+ u8 rssi_state;
+ u8 rssi_step;
+ u8 rssi;
+
+ /* update wl/bt rssi by btinfo */
+ for (i = 0; i < COEX_RSSI_STEP; i++) {
+ rssi_state = coex_dm->bt_rssi_state[i];
+ rssi_step = chip->bt_rssi_step[i];
+ rssi = coex_stat->bt_rssi;
+ rssi_state = rtw_coex_next_rssi_state(rtwdev, rssi_state,
+ rssi, rssi_step);
+ coex_dm->bt_rssi_state[i] = rssi_state;
+ }
+
+ for (i = 0; i < COEX_RSSI_STEP; i++) {
+ rssi_state = coex_dm->wl_rssi_state[i];
+ rssi_step = chip->wl_rssi_step[i];
+ rssi = rtwdev->dm_info.min_rssi;
+ rssi_state = rtw_coex_next_rssi_state(rtwdev, rssi_state,
+ rssi, rssi_step);
+ coex_dm->wl_rssi_state[i] = rssi_state;
+ }
+
+ if (coex_stat->bt_ble_scan_en &&
+ coex_stat->cnt_bt[COEX_CNT_BT_INFOUPDATE] % 3 == 0) {
+ u8 scan_type;
+
+ if (rtw_coex_get_bt_scan_type(rtwdev, &scan_type)) {
+ coex_stat->bt_ble_scan_type = scan_type;
+ if ((coex_stat->bt_ble_scan_type & 0x1) == 0x1)
+ coex_stat->bt_init_scan = true;
+ else
+ coex_stat->bt_init_scan = false;
+ }
+ }
+
+ coex_stat->bt_profile_num = 0;
+
+ /* set link exist status */
+ if (!(coex_stat->bt_info_lb2 & COEX_INFO_CONNECTION)) {
+ coex_stat->bt_link_exist = false;
+ coex_stat->bt_pan_exist = false;
+ coex_stat->bt_a2dp_exist = false;
+ coex_stat->bt_hid_exist = false;
+ coex_stat->bt_hfp_exist = false;
+ } else {
+ /* connection exists */
+ coex_stat->bt_link_exist = true;
+ if (coex_stat->bt_info_lb2 & COEX_INFO_FTP) {
+ coex_stat->bt_pan_exist = true;
+ coex_stat->bt_profile_num++;
+ } else {
+ coex_stat->bt_pan_exist = false;
+ }
+
+ if (coex_stat->bt_info_lb2 & COEX_INFO_A2DP) {
+ coex_stat->bt_a2dp_exist = true;
+ coex_stat->bt_profile_num++;
+ } else {
+ coex_stat->bt_a2dp_exist = false;
+ }
+
+ if (coex_stat->bt_info_lb2 & COEX_INFO_HID) {
+ coex_stat->bt_hid_exist = true;
+ coex_stat->bt_profile_num++;
+ } else {
+ coex_stat->bt_hid_exist = false;
+ }
+
+ if (coex_stat->bt_info_lb2 & COEX_INFO_SCO_ESCO) {
+ coex_stat->bt_hfp_exist = true;
+ coex_stat->bt_profile_num++;
+ } else {
+ coex_stat->bt_hfp_exist = false;
+ }
+ }
+
+ if (coex_stat->bt_info_lb2 & COEX_INFO_INQ_PAGE) {
+ coex_dm->bt_status = COEX_BTSTATUS_INQ_PAGE;
+ } else if (!(coex_stat->bt_info_lb2 & COEX_INFO_CONNECTION)) {
+ coex_dm->bt_status = COEX_BTSTATUS_NCON_IDLE;
+ } else if (coex_stat->bt_info_lb2 == COEX_INFO_CONNECTION) {
+ coex_dm->bt_status = COEX_BTSTATUS_CON_IDLE;
+ } else if ((coex_stat->bt_info_lb2 & COEX_INFO_SCO_ESCO) ||
+ (coex_stat->bt_info_lb2 & COEX_INFO_SCO_BUSY)) {
+ if (coex_stat->bt_info_lb2 & COEX_INFO_ACL_BUSY)
+ coex_dm->bt_status = COEX_BTSTATUS_ACL_SCO_BUSY;
+ else
+ coex_dm->bt_status = COEX_BTSTATUS_SCO_BUSY;
+ } else if (coex_stat->bt_info_lb2 & COEX_INFO_ACL_BUSY) {
+ coex_dm->bt_status = COEX_BTSTATUS_ACL_BUSY;
+ } else {
+ coex_dm->bt_status = COEX_BTSTATUS_MAX;
+ }
+
+ coex_stat->cnt_bt[COEX_CNT_BT_INFOUPDATE]++;
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: bt status(%d)\n", coex_dm->bt_status);
+}
+
+static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex_dm *coex_dm = &rtwdev->coex.dm;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 link = 0;
+ u8 center_chan = 0;
+ u8 bw;
+ int i;
+
+ bw = rtwdev->hal.current_band_width;
+
+ if (type != COEX_MEDIA_DISCONNECT)
+ center_chan = rtwdev->hal.current_channel;
+
+ if (center_chan == 0 || (efuse->share_ant && center_chan <= 14)) {
+ link = 0;
+ } else if (center_chan <= 14) {
+ link = 0x1;
+
+ if (bw == RTW_CHANNEL_WIDTH_40)
+ bw = chip->bt_afh_span_bw40;
+ else
+ bw = chip->bt_afh_span_bw20;
+ } else if (chip->afh_5g_num > 1) {
+ for (i = 0; i < chip->afh_5g_num; i++) {
+ if (center_chan == chip->afh_5g[i].wl_5g_ch) {
+ link = 0x3;
+ center_chan = chip->afh_5g[i].bt_skip_ch;
+ bw = chip->afh_5g[i].bt_skip_span;
+ break;
+ }
+ }
+ }
+
+ coex_dm->wl_ch_info[0] = link;
+ coex_dm->wl_ch_info[1] = center_chan;
+ coex_dm->wl_ch_info[2] = bw;
+
+ rtw_fw_wl_ch_info(rtwdev, link, center_chan, bw);
+}
+
+static void rtw_coex_set_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+
+ if (bt_pwr_dec_lvl == coex_dm->cur_bt_pwr_lvl)
+ return;
+
+ coex_dm->cur_bt_pwr_lvl = bt_pwr_dec_lvl;
+
+ rtw_fw_force_bt_tx_power(rtwdev, bt_pwr_dec_lvl);
+}
+
+static void rtw_coex_set_bt_rx_gain(struct rtw_dev *rtwdev, u8 bt_lna_lvl)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+
+ if (bt_lna_lvl == coex_dm->cur_bt_lna_lvl)
+ return;
+
+ coex_dm->cur_bt_lna_lvl = bt_lna_lvl;
+
+ /* notify BT rx gain table changed */
+ if (bt_lna_lvl < 7) {
+ rtw_coex_set_lna_constrain_level(rtwdev, bt_lna_lvl);
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_RXGAIN, true);
+ } else {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_RXGAIN, false);
+ }
+}
+
+static void rtw_coex_set_rf_para(struct rtw_dev *rtwdev,
+ struct coex_rf_para para)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u8 offset = 0;
+
+ if (coex->freerun && coex_stat->wl_noisy_level <= 1)
+ offset = 3;
+
+ rtw_coex_set_wl_tx_power(rtwdev, para.wl_pwr_dec_lvl);
+ rtw_coex_set_bt_tx_power(rtwdev, para.bt_pwr_dec_lvl + offset);
+ rtw_coex_set_wl_rx_gain(rtwdev, para.wl_low_gain_en);
+ rtw_coex_set_bt_rx_gain(rtwdev, para.bt_lna_lvl);
+}
+
+static u32 rtw_coex_read_indirect_reg(struct rtw_dev *rtwdev, u16 addr)
+{
+ u32 val;
+
+ if (!ltecoex_read_reg(rtwdev, addr, &val)) {
+ rtw_err(rtwdev, "failed to read indirect register\n");
+ return 0;
+ }
+
+ return val;
+}
+
+void rtw_coex_write_indirect_reg(struct rtw_dev *rtwdev, u16 addr,
+ u32 mask, u32 val)
+{
+ u32 shift = __ffs(mask);
+ u32 tmp;
+
+ tmp = rtw_coex_read_indirect_reg(rtwdev, addr);
+ tmp = (tmp & (~mask)) | ((val << shift) & mask);
+
+ if (!ltecoex_reg_write(rtwdev, addr, tmp))
+ rtw_err(rtwdev, "failed to write indirect register\n");
+}
+
+static void rtw_coex_coex_ctrl_owner(struct rtw_dev *rtwdev, bool wifi_control)
+{
+ if (wifi_control)
+ rtw_write32_set(rtwdev, REG_SYS_SDIO_CTRL, BIT_LTE_MUX_CTRL_PATH);
+ else
+ rtw_write32_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT_LTE_MUX_CTRL_PATH);
+}
+
+static void rtw_coex_set_gnt_bt(struct rtw_dev *rtwdev, u8 state)
+{
+ rtw_coex_write_indirect_reg(rtwdev, 0x38, 0xc000, state);
+ rtw_coex_write_indirect_reg(rtwdev, 0x38, 0x0c00, state);
+}
+
+static void rtw_coex_set_gnt_wl(struct rtw_dev *rtwdev, u8 state)
+{
+ rtw_coex_write_indirect_reg(rtwdev, 0x38, 0x3000, state);
+ rtw_coex_write_indirect_reg(rtwdev, 0x38, 0x0300, state);
+}
+
+static void rtw_coex_set_table(struct rtw_dev *rtwdev, u32 table0, u32 table1)
+{
+#define DEF_BRK_TABLE_VAL 0xf0ffffff
+ rtw_write32(rtwdev, REG_BT_COEX_TABLE0, table0);
+ rtw_write32(rtwdev, REG_BT_COEX_TABLE1, table1);
+ rtw_write32(rtwdev, REG_BT_COEX_BRK_TABLE, DEF_BRK_TABLE_VAL);
+}
+
+static void rtw_coex_table(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+
+ coex_dm->cur_table = type;
+
+ if (efuse->share_ant) {
+ if (type < chip->table_sant_num)
+ rtw_coex_set_table(rtwdev,
+ chip->table_sant[type].bt,
+ chip->table_sant[type].wl);
+ } else {
+ type = type - 100;
+ if (type < chip->table_nsant_num)
+ rtw_coex_set_table(rtwdev,
+ chip->table_nsant[type].bt,
+ chip->table_nsant[type].wl);
+ }
+}
+
+static void rtw_coex_ignore_wlan_act(struct rtw_dev *rtwdev, bool enable)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+
+ if (coex->stop_dm)
+ return;
+
+ rtw_fw_bt_ignore_wlan_action(rtwdev, enable);
+}
+
+static void rtw_coex_power_save_state(struct rtw_dev *rtwdev, u8 ps_type,
+ u8 lps_val, u8 rpwm_val)
+{
+ struct rtw_lps_conf *lps_conf = &rtwdev->lps_conf;
+ struct rtw_vif *rtwvif;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u8 lps_mode = 0x0;
+
+ lps_mode = rtwdev->lps_conf.mode;
+
+ switch (ps_type) {
+ case COEX_PS_WIFI_NATIVE:
+ /* recover to original 32k low power setting */
+ coex_stat->wl_force_lps_ctrl = false;
+
+ rtwvif = lps_conf->rtwvif;
+ if (rtwvif && rtw_in_lps(rtwdev))
+ rtw_leave_lps(rtwdev, rtwvif);
+ break;
+ case COEX_PS_LPS_OFF:
+ coex_stat->wl_force_lps_ctrl = true;
+ if (lps_mode)
+ rtw_fw_coex_tdma_type(rtwdev, 0x8, 0, 0, 0, 0);
+
+ rtwvif = lps_conf->rtwvif;
+ if (rtwvif && rtw_in_lps(rtwdev))
+ rtw_leave_lps(rtwdev, rtwvif);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2,
+ u8 byte3, u8 byte4, u8 byte5)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 ps_type = COEX_PS_WIFI_NATIVE;
+ bool ap_enable = false;
+
+ if (ap_enable && (byte1 & BIT(4) && !(byte1 & BIT(5)))) {
+ byte1 &= ~BIT(4);
+ byte1 |= BIT(5);
+
+ byte5 |= BIT(5);
+ byte5 &= ~BIT(6);
+
+ ps_type = COEX_PS_WIFI_NATIVE;
+ rtw_coex_power_save_state(rtwdev, ps_type, 0x0, 0x0);
+ } else if (byte1 & BIT(4) && !(byte1 & BIT(5))) {
+ if (chip->pstdma_type == COEX_PSTDMA_FORCE_LPSOFF)
+ ps_type = COEX_PS_LPS_OFF;
+ else
+ ps_type = COEX_PS_LPS_ON;
+ rtw_coex_power_save_state(rtwdev, ps_type, 0x50, 0x4);
+ } else {
+ ps_type = COEX_PS_WIFI_NATIVE;
+ rtw_coex_power_save_state(rtwdev, ps_type, 0x0, 0x0);
+ }
+
+ coex_dm->ps_tdma_para[0] = byte1;
+ coex_dm->ps_tdma_para[1] = byte2;
+ coex_dm->ps_tdma_para[2] = byte3;
+ coex_dm->ps_tdma_para[3] = byte4;
+ coex_dm->ps_tdma_para[4] = byte5;
+
+ rtw_fw_coex_tdma_type(rtwdev, byte1, byte2, byte3, byte4, byte5);
+}
+
+static void rtw_coex_tdma(struct rtw_dev *rtwdev, bool force, u32 tcase)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 n, type;
+ bool turn_on;
+
+ if (tcase & TDMA_4SLOT)/* 4-slot (50ms) mode */
+ rtw_coex_tdma_timer_base(rtwdev, 3);
+ else
+ rtw_coex_tdma_timer_base(rtwdev, 0);
+
+ type = (u8)(tcase & 0xff);
+
+ turn_on = (type == 0 || type == 100) ? false : true;
+
+ if (!force) {
+ if (turn_on == coex_dm->cur_ps_tdma_on &&
+ type == coex_dm->cur_ps_tdma) {
+ return;
+ }
+ }
+
+ if (turn_on) {
+ /* enable TBTT interrupt */
+ rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_TDMA, true);
+ } else {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_TDMA, false);
+ }
+
+ if (efuse->share_ant) {
+ if (type < chip->tdma_sant_num)
+ rtw_coex_set_tdma(rtwdev,
+ chip->tdma_sant[type].para[0],
+ chip->tdma_sant[type].para[1],
+ chip->tdma_sant[type].para[2],
+ chip->tdma_sant[type].para[3],
+ chip->tdma_sant[type].para[4]);
+ } else {
+ n = type - 100;
+ if (n < chip->tdma_nsant_num)
+ rtw_coex_set_tdma(rtwdev,
+ chip->tdma_nsant[n].para[0],
+ chip->tdma_nsant[n].para[1],
+ chip->tdma_nsant[n].para[2],
+ chip->tdma_nsant[n].para[3],
+ chip->tdma_nsant[n].para[4]);
+ }
+
+ /* update pre state */
+ coex_dm->cur_ps_tdma_on = turn_on;
+ coex_dm->cur_ps_tdma = type;
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: coex tdma type (%d)\n", type);
+}
+
+static void rtw_coex_set_ant_path(struct rtw_dev *rtwdev, bool force, u8 phase)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ u8 ctrl_type = COEX_SWITCH_CTRL_MAX;
+ u8 pos_type = COEX_SWITCH_TO_MAX;
+
+ if (!force && coex_dm->cur_ant_pos_type == phase)
+ return;
+
+ coex_dm->cur_ant_pos_type = phase;
+
+ /* avoid switch coex_ctrl_owner during BT IQK */
+ rtw_coex_check_rfk(rtwdev);
+
+ switch (phase) {
+ case COEX_SET_ANT_POWERON:
+ /* set path control owner to BT at power-on */
+ if (coex_stat->bt_disabled)
+ rtw_coex_coex_ctrl_owner(rtwdev, true);
+ else
+ rtw_coex_coex_ctrl_owner(rtwdev, false);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_BBSW;
+ pos_type = COEX_SWITCH_TO_BT;
+ break;
+ case COEX_SET_ANT_INIT:
+ if (coex_stat->bt_disabled) {
+ /* set GNT_BT to SW low */
+ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_LOW);
+
+ /* set GNT_WL to SW high */
+ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH);
+ } else {
+ /* set GNT_BT to SW high */
+ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_HIGH);
+
+ /* set GNT_WL to SW low */
+ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_LOW);
+ }
+
+ /* set path control owner to wl at initial step */
+ rtw_coex_coex_ctrl_owner(rtwdev, true);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_BBSW;
+ pos_type = COEX_SWITCH_TO_BT;
+ break;
+ case COEX_SET_ANT_WONLY:
+ /* set GNT_BT to SW Low */
+ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_LOW);
+
+ /* Set GNT_WL to SW high */
+ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH);
+
+ /* set path control owner to wl at initial step */
+ rtw_coex_coex_ctrl_owner(rtwdev, true);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_BBSW;
+ pos_type = COEX_SWITCH_TO_WLG;
+ break;
+ case COEX_SET_ANT_WOFF:
+ /* set path control owner to BT */
+ rtw_coex_coex_ctrl_owner(rtwdev, false);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_BT;
+ pos_type = COEX_SWITCH_TO_NOCARE;
+ break;
+ case COEX_SET_ANT_2G:
+ /* set GNT_BT to PTA */
+ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_HW_PTA);
+
+ /* set GNT_WL to PTA */
+ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_HW_PTA);
+
+ /* set path control owner to wl at runtime step */
+ rtw_coex_coex_ctrl_owner(rtwdev, true);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_PTA;
+ pos_type = COEX_SWITCH_TO_NOCARE;
+ break;
+ case COEX_SET_ANT_5G:
+ /* set GNT_BT to PTA */
+ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_HIGH);
+
+ /* set GNT_WL to SW high */
+ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH);
+
+ /* set path control owner to wl at runtime step */
+ rtw_coex_coex_ctrl_owner(rtwdev, true);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_BBSW;
+ pos_type = COEX_SWITCH_TO_WLA;
+ break;
+ case COEX_SET_ANT_2G_FREERUN:
+ /* set GNT_BT to SW high */
+ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_HIGH);
+
+ /* Set GNT_WL to SW high */
+ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH);
+
+ /* set path control owner to wl at runtime step */
+ rtw_coex_coex_ctrl_owner(rtwdev, true);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_BBSW;
+ pos_type = COEX_SWITCH_TO_WLG_BT;
+ break;
+ case COEX_SET_ANT_2G_WLBT:
+ /* set GNT_BT to SW high */
+ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_HW_PTA);
+
+ /* Set GNT_WL to SW high */
+ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_HW_PTA);
+
+ /* set path control owner to wl at runtime step */
+ rtw_coex_coex_ctrl_owner(rtwdev, true);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_BBSW;
+ pos_type = COEX_SWITCH_TO_WLG_BT;
+ break;
+ default:
+ WARN_ON("unknown phase when setting antenna path\n");
+ return;
+ }
+
+ if (ctrl_type < COEX_SWITCH_CTRL_MAX && pos_type < COEX_SWITCH_TO_MAX)
+ rtw_coex_set_ant_switch(rtwdev, ctrl_type, pos_type);
+}
+
+static u8 rtw_coex_algorithm(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u8 algorithm = COEX_ALGO_NOPROFILE;
+ u8 profile_map = 0;
+
+ if (coex_stat->bt_hfp_exist)
+ profile_map |= BPM_HFP;
+ if (coex_stat->bt_hid_exist)
+ profile_map |= BPM_HID;
+ if (coex_stat->bt_a2dp_exist)
+ profile_map |= BPM_A2DP;
+ if (coex_stat->bt_pan_exist)
+ profile_map |= BPM_PAN;
+
+ switch (profile_map) {
+ case BPM_HFP:
+ algorithm = COEX_ALGO_HFP;
+ break;
+ case BPM_HID:
+ case BPM_HFP + BPM_HID:
+ algorithm = COEX_ALGO_HID;
+ break;
+ case BPM_HFP + BPM_A2DP:
+ case BPM_HID + BPM_A2DP:
+ case BPM_HFP + BPM_HID + BPM_A2DP:
+ algorithm = COEX_ALGO_A2DP_HID;
+ break;
+ case BPM_HFP + BPM_PAN:
+ case BPM_HID + BPM_PAN:
+ case BPM_HFP + BPM_HID + BPM_PAN:
+ algorithm = COEX_ALGO_PAN_HID;
+ break;
+ case BPM_HFP + BPM_A2DP + BPM_PAN:
+ case BPM_HID + BPM_A2DP + BPM_PAN:
+ case BPM_HFP + BPM_HID + BPM_A2DP + BPM_PAN:
+ algorithm = COEX_ALGO_A2DP_PAN_HID;
+ break;
+ case BPM_PAN:
+ algorithm = COEX_ALGO_PAN;
+ break;
+ case BPM_A2DP + BPM_PAN:
+ algorithm = COEX_ALGO_A2DP_PAN;
+ break;
+ case BPM_A2DP:
+ if (coex_stat->bt_multi_link) {
+ if (coex_stat->bt_hid_pair_num > 0)
+ algorithm = COEX_ALGO_A2DP_HID;
+ else
+ algorithm = COEX_ALGO_A2DP_PAN;
+ } else {
+ algorithm = COEX_ALGO_A2DP;
+ }
+ break;
+ default:
+ algorithm = COEX_ALGO_NOPROFILE;
+ break;
+ }
+
+ return algorithm;
+}
+
+static void rtw_coex_action_coex_all_off(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 2;
+ tdma_case = 0;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 100;
+ tdma_case = 100;
+ }
+
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_freerun(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 level = 0;
+
+ if (efuse->share_ant)
+ return;
+
+ coex->freerun = true;
+
+ if (coex_stat->wl_connected)
+ rtw_coex_update_wl_ch_info(rtwdev, COEX_MEDIA_CONNECT);
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G_FREERUN);
+
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false);
+
+ if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[0]))
+ level = 2;
+ else if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1]))
+ level = 3;
+ else if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[2]))
+ level = 4;
+ else
+ level = 5;
+
+ if (level > chip->wl_rf_para_num - 1)
+ level = chip->wl_rf_para_num - 1;
+
+ if (coex_stat->wl_tput_dir == COEX_WL_TPUT_TX)
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_tx[level]);
+ else
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[level]);
+
+ rtw_coex_table(rtwdev, 100);
+ rtw_coex_tdma(rtwdev, false, 100);
+}
+
+static void rtw_coex_action_bt_whql_test(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 2;
+ tdma_case = 0;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 100;
+ tdma_case = 100;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 1;
+ tdma_case = 0;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 100;
+ tdma_case = 100;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_idle(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+ u8 table_case = 0xff, tdma_case = 0xff;
+
+ if (coex_rfe->ant_switch_with_bt &&
+ coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) {
+ if (efuse->share_ant &&
+ COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1])) {
+ table_case = 0;
+ tdma_case = 0;
+ } else if (!efuse->share_ant) {
+ table_case = 100;
+ tdma_case = 100;
+ }
+ }
+
+ if (table_case != 0xff && tdma_case != 0xff) {
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G_FREERUN);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+ return;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (!coex_stat->wl_gl_busy) {
+ table_case = 10;
+ tdma_case = 3;
+ } else if (coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) {
+ table_case = 6;
+ tdma_case = 7;
+ } else {
+ table_case = 12;
+ tdma_case = 7;
+ }
+ } else {
+ /* Non-Shared-Ant */
+ if (!coex_stat->wl_gl_busy) {
+ table_case = 112;
+ tdma_case = 104;
+ } else if ((coex_stat->bt_ble_scan_type & 0x2) &&
+ coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) {
+ table_case = 114;
+ tdma_case = 103;
+ } else {
+ table_case = 112;
+ tdma_case = 103;
+ }
+ }
+
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ bool wl_hi_pri = false;
+ u8 table_case, tdma_case;
+
+ if (coex_stat->wl_linkscan_proc || coex_stat->wl_hi_pri_task1 ||
+ coex_stat->wl_hi_pri_task2)
+ wl_hi_pri = true;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (wl_hi_pri) {
+ table_case = 15;
+ if (coex_stat->bt_a2dp_exist &&
+ !coex_stat->bt_pan_exist)
+ tdma_case = 11;
+ else if (coex_stat->wl_hi_pri_task1)
+ tdma_case = 6;
+ else if (!coex_stat->bt_page)
+ tdma_case = 8;
+ else
+ tdma_case = 9;
+ } else if (coex_stat->wl_connected) {
+ table_case = 10;
+ tdma_case = 10;
+ } else {
+ table_case = 1;
+ tdma_case = 0;
+ }
+ } else {
+ /* Non_Shared-Ant */
+ if (wl_hi_pri) {
+ table_case = 113;
+ if (coex_stat->bt_a2dp_exist &&
+ !coex_stat->bt_pan_exist)
+ tdma_case = 111;
+ else if (coex_stat->wl_hi_pri_task1)
+ tdma_case = 106;
+ else if (!coex_stat->bt_page)
+ tdma_case = 108;
+ else
+ tdma_case = 109;
+ } else if (coex_stat->wl_connected) {
+ table_case = 101;
+ tdma_case = 110;
+ } else {
+ table_case = 100;
+ tdma_case = 100;
+ }
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: wifi hi(%d), bt page(%d)\n",
+ wl_hi_pri, coex_stat->bt_page);
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (coex_stat->bt_multi_link) {
+ table_case = 10;
+ tdma_case = 17;
+ } else {
+ table_case = 10;
+ tdma_case = 5;
+ }
+ } else {
+ /* Non-Shared-Ant */
+ if (coex_stat->bt_multi_link) {
+ table_case = 112;
+ tdma_case = 117;
+ } else {
+ table_case = 105;
+ tdma_case = 100;
+ }
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_hid(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+ u32 wl_bw;
+
+ wl_bw = rtwdev->hal.current_band_width;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (coex_stat->bt_ble_exist) {
+ /* RCU */
+ if (!coex_stat->wl_gl_busy)
+ table_case = 14;
+ else
+ table_case = 15;
+
+ if (coex_stat->bt_a2dp_active || wl_bw == 0)
+ tdma_case = 18;
+ else if (coex_stat->wl_gl_busy)
+ tdma_case = 8;
+ else
+ tdma_case = 4;
+ } else {
+ if (coex_stat->bt_a2dp_active || wl_bw == 0) {
+ table_case = 8;
+ tdma_case = 4;
+ } else {
+ /* for 4/18 HID */
+ if (coex_stat->bt_418_hid_exist &&
+ coex_stat->wl_gl_busy)
+ table_case = 12;
+ else
+ table_case = 10;
+ tdma_case = 4;
+ }
+ }
+ } else {
+ /* Non-Shared-Ant */
+ if (coex_stat->bt_a2dp_active) {
+ table_case = 113;
+ tdma_case = 118;
+ } else if (coex_stat->bt_ble_exist) {
+ /* BLE */
+ table_case = 113;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 106;
+ else
+ tdma_case = 104;
+ } else {
+ table_case = 113;
+ tdma_case = 104;
+ }
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+ u32 slot_type = 0;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (coex_stat->wl_gl_busy && coex_stat->wl_noisy_level == 0)
+ table_case = 10;
+ else
+ table_case = 9;
+
+ slot_type = TDMA_4SLOT;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 13;
+ else
+ tdma_case = 14;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 112;
+
+ if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1]))
+ tdma_case = 112;
+ else
+ tdma_case = 113;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
+}
+
+static void rtw_coex_action_bt_a2dpsink(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+ bool ap_enable = false;
+
+ if (efuse->share_ant) { /* Shared-Ant */
+ if (ap_enable) {
+ table_case = 2;
+ tdma_case = 0;
+ } else if (coex_stat->wl_gl_busy) {
+ table_case = 28;
+ tdma_case = 20;
+ } else {
+ table_case = 28;
+ tdma_case = 26;
+ }
+ } else { /* Non-Shared-Ant */
+ if (ap_enable) {
+ table_case = 100;
+ tdma_case = 100;
+ } else {
+ table_case = 119;
+ tdma_case = 120;
+ }
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_pan(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (coex_stat->wl_gl_busy && coex_stat->wl_noisy_level == 0)
+ table_case = 14;
+ else
+ table_case = 10;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 17;
+ else
+ tdma_case = 19;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 112;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 117;
+ else
+ tdma_case = 119;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+ u32 slot_type = 0;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (coex_stat->bt_ble_exist)
+ table_case = 26;
+ else
+ table_case = 9;
+
+ if (coex_stat->wl_gl_busy) {
+ slot_type = TDMA_4SLOT;
+ tdma_case = 13;
+ } else {
+ tdma_case = 14;
+ }
+ } else {
+ /* Non-Shared-Ant */
+ if (coex_stat->bt_ble_exist)
+ table_case = 121;
+ else
+ table_case = 113;
+
+ if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1]))
+ tdma_case = 112;
+ else
+ tdma_case = 113;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
+}
+
+static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (coex_stat->wl_gl_busy &&
+ coex_stat->wl_noisy_level == 0)
+ table_case = 14;
+ else
+ table_case = 10;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 15;
+ else
+ tdma_case = 20;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 112;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 115;
+ else
+ tdma_case = 120;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_pan_hid(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 9;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 18;
+ else
+ tdma_case = 19;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 113;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 117;
+ else
+ tdma_case = 119;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_a2dp_pan_hid(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 10;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 15;
+ else
+ tdma_case = 20;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 113;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 115;
+ else
+ tdma_case = 120;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false);
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 0;
+ tdma_case = 0;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 100;
+ tdma_case = 100;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_wl_only(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 2;
+ tdma_case = 0;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 100;
+ tdma_case = 100;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (coex->under_5g)
+ return;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 28;
+ tdma_case = 0;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 100;
+ tdma_case = 100;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (coex_stat->bt_a2dp_exist) {
+ table_case = 9;
+ tdma_case = 11;
+ } else {
+ table_case = 9;
+ tdma_case = 7;
+ }
+ } else {
+ /* Non-Shared-Ant */
+ if (coex_stat->bt_a2dp_exist) {
+ table_case = 112;
+ tdma_case = 111;
+ } else {
+ table_case = 112;
+ tdma_case = 107;
+ }
+ }
+
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_wl_not_connected(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 1;
+ tdma_case = 0;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 100;
+ tdma_case = 100;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_wl_connected(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 algorithm;
+
+ /* Non-Shared-Ant */
+ if (!efuse->share_ant && coex_stat->wl_gl_busy &&
+ COEX_RSSI_HIGH(coex_dm->wl_rssi_state[3]) &&
+ COEX_RSSI_HIGH(coex_dm->bt_rssi_state[0])) {
+ rtw_coex_action_freerun(rtwdev);
+ return;
+ }
+
+ algorithm = rtw_coex_algorithm(rtwdev);
+
+ switch (algorithm) {
+ case COEX_ALGO_HFP:
+ rtw_coex_action_bt_hfp(rtwdev);
+ break;
+ case COEX_ALGO_HID:
+ rtw_coex_action_bt_hid(rtwdev);
+ break;
+ case COEX_ALGO_A2DP:
+ if (coex_stat->bt_a2dp_sink)
+ rtw_coex_action_bt_a2dpsink(rtwdev);
+ else
+ rtw_coex_action_bt_a2dp(rtwdev);
+ break;
+ case COEX_ALGO_PAN:
+ rtw_coex_action_bt_pan(rtwdev);
+ break;
+ case COEX_ALGO_A2DP_HID:
+ rtw_coex_action_bt_a2dp_hid(rtwdev);
+ break;
+ case COEX_ALGO_A2DP_PAN:
+ rtw_coex_action_bt_a2dp_pan(rtwdev);
+ break;
+ case COEX_ALGO_PAN_HID:
+ rtw_coex_action_bt_pan_hid(rtwdev);
+ break;
+ case COEX_ALGO_A2DP_PAN_HID:
+ rtw_coex_action_bt_a2dp_pan_hid(rtwdev);
+ break;
+ default:
+ case COEX_ALGO_NOPROFILE:
+ rtw_coex_action_bt_idle(rtwdev);
+ break;
+ }
+}
+
+static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ coex_dm->reason = reason;
+
+ /* update wifi_link_info_ext variable */
+ rtw_coex_update_wl_link_info(rtwdev, reason);
+
+ rtw_coex_monitor_bt_enable(rtwdev);
+
+ if (coex->stop_dm)
+ return;
+
+ if (coex_stat->wl_under_ips)
+ return;
+
+ if (coex->freeze && !coex_stat->bt_setup_link)
+ return;
+
+ coex_stat->cnt_wl[COEX_CNT_WL_COEXRUN]++;
+ coex->freerun = false;
+
+ /* Pure-5G Coex Process */
+ if (coex->under_5g) {
+ coex_stat->wl_coex_mode = COEX_WLINK_5G;
+ rtw_coex_action_wl_under5g(rtwdev);
+ goto exit;
+ }
+
+ coex_stat->wl_coex_mode = COEX_WLINK_2G1PORT;
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false);
+ if (coex_stat->bt_disabled) {
+ rtw_coex_action_wl_only(rtwdev);
+ goto exit;
+ }
+
+ if (coex_stat->wl_under_lps && !coex_stat->wl_force_lps_ctrl) {
+ rtw_coex_action_wl_native_lps(rtwdev);
+ goto exit;
+ }
+
+ if (coex_stat->bt_whck_test) {
+ rtw_coex_action_bt_whql_test(rtwdev);
+ goto exit;
+ }
+
+ if (coex_stat->bt_setup_link) {
+ rtw_coex_action_bt_relink(rtwdev);
+ goto exit;
+ }
+
+ if (coex_stat->bt_inq_page) {
+ rtw_coex_action_bt_inquiry(rtwdev);
+ goto exit;
+ }
+
+ if ((coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE ||
+ coex_dm->bt_status == COEX_BTSTATUS_CON_IDLE) &&
+ coex_stat->wl_connected) {
+ rtw_coex_action_bt_idle(rtwdev);
+ goto exit;
+ }
+
+ if (coex_stat->wl_linkscan_proc) {
+ rtw_coex_action_wl_linkscan(rtwdev);
+ goto exit;
+ }
+
+ if (coex_stat->wl_connected)
+ rtw_coex_action_wl_connected(rtwdev);
+ else
+ rtw_coex_action_wl_not_connected(rtwdev);
+
+exit:
+ rtw_coex_set_gnt_fix(rtwdev);
+ rtw_coex_limited_wl(rtwdev);
+}
+
+static void rtw_coex_init_coex_var(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ u8 i;
+
+ memset(coex_dm, 0, sizeof(*coex_dm));
+ memset(coex_stat, 0, sizeof(*coex_stat));
+
+ for (i = 0; i < COEX_CNT_WL_MAX; i++)
+ coex_stat->cnt_wl[i] = 0;
+
+ for (i = 0; i < COEX_CNT_BT_MAX; i++)
+ coex_stat->cnt_bt[i] = 0;
+
+ for (i = 0; i < ARRAY_SIZE(coex_dm->bt_rssi_state); i++)
+ coex_dm->bt_rssi_state[i] = COEX_RSSI_STATE_LOW;
+
+ for (i = 0; i < ARRAY_SIZE(coex_dm->wl_rssi_state); i++)
+ coex_dm->wl_rssi_state[i] = COEX_RSSI_STATE_LOW;
+
+ coex_stat->wl_coex_mode = COEX_WLINK_MAX;
+}
+
+static void __rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+
+ rtw_coex_init_coex_var(rtwdev);
+ rtw_coex_monitor_bt_enable(rtwdev);
+ rtw_coex_set_rfe_type(rtwdev);
+ rtw_coex_set_init(rtwdev);
+
+ /* set Tx response = Hi-Pri (ex: Transmitting ACK,BA,CTS) */
+ rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_TX_RSP, 1);
+
+ /* set Tx beacon = Hi-Pri */
+ rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_TX_BEACON, 1);
+
+ /* set Tx beacon queue = Hi-Pri */
+ rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_TX_BEACONQ, 1);
+
+ /* antenna config */
+ if (coex->wl_rf_off) {
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_WOFF);
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ALL, false);
+ coex->stop_dm = true;
+ } else if (wifi_only) {
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_WONLY);
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN,
+ true);
+ coex->stop_dm = true;
+ } else {
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_INIT);
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN,
+ true);
+ coex->stop_dm = false;
+ coex->freeze = true;
+ }
+
+ /* PTA parameter */
+ rtw_coex_table(rtwdev, 0);
+ rtw_coex_tdma(rtwdev, true, 0);
+ rtw_coex_query_bt_info(rtwdev);
+}
+
+void rtw_coex_power_on_setting(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+
+ coex->stop_dm = true;
+ coex->wl_rf_off = false;
+
+ /* enable BB, we can write 0x948 */
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN, BIT(0) | BIT(1));
+
+ rtw_coex_monitor_bt_enable(rtwdev);
+ rtw_coex_set_rfe_type(rtwdev);
+
+ /* set antenna path to BT */
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_POWERON);
+
+ /* red x issue */
+ rtw_write8(rtwdev, 0xff1a, 0x0);
+}
+
+void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only)
+{
+ __rtw_coex_init_hw_config(rtwdev, wifi_only);
+}
+
+void rtw_coex_ips_notify(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+
+ if (coex->stop_dm)
+ return;
+
+ if (type == COEX_IPS_ENTER) {
+ coex_stat->wl_under_ips = true;
+
+ /* for lps off */
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ALL, false);
+
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_WOFF);
+ rtw_coex_action_coex_all_off(rtwdev);
+ } else if (type == COEX_IPS_LEAVE) {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_ONOFF, true);
+
+ /* run init hw config (exclude wifi only) */
+ __rtw_coex_init_hw_config(rtwdev, false);
+ /* sw all off */
+
+ coex_stat->wl_under_ips = false;
+ }
+}
+
+void rtw_coex_lps_notify(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+
+ if (coex->stop_dm)
+ return;
+
+ if (type == COEX_LPS_ENABLE) {
+ coex_stat->wl_under_lps = true;
+
+ if (coex_stat->wl_force_lps_ctrl) {
+ /* for ps-tdma */
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true);
+ } else {
+ /* for native ps */
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, false);
+
+ rtw_coex_run_coex(rtwdev, COEX_RSN_LPS);
+ }
+ } else if (type == COEX_LPS_DISABLE) {
+ coex_stat->wl_under_lps = false;
+
+ /* for lps off */
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true);
+
+ if (!coex_stat->wl_force_lps_ctrl)
+ rtw_coex_query_bt_info(rtwdev);
+ }
+}
+
+void rtw_coex_scan_notify(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+
+ if (coex->stop_dm)
+ return;
+
+ coex->freeze = false;
+
+ if (type != COEX_SCAN_FINISH)
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN |
+ COEX_SCBD_ONOFF, true);
+
+ if (type == COEX_SCAN_START_5G) {
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_5GSCANSTART);
+ } else if ((type == COEX_SCAN_START_2G) || (type == COEX_SCAN_START)) {
+ coex_stat->wl_hi_pri_task2 = true;
+
+ /* Force antenna setup for no scan result issue */
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_2GSCANSTART);
+ } else {
+ coex_stat->wl_hi_pri_task2 = false;
+ rtw_coex_run_coex(rtwdev, COEX_RSN_SCANFINISH);
+ }
+}
+
+void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+
+ if (coex->stop_dm)
+ return;
+
+ if (type == COEX_SWITCH_TO_5G)
+ rtw_coex_run_coex(rtwdev, COEX_RSN_5GSWITCHBAND);
+ else if (type == COEX_SWITCH_TO_24G_NOFORSCAN)
+ rtw_coex_run_coex(rtwdev, COEX_RSN_2GSWITCHBAND);
+ else
+ rtw_coex_scan_notify(rtwdev, COEX_SCAN_START_2G);
+}
+
+void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+
+ if (coex->stop_dm)
+ return;
+
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN |
+ COEX_SCBD_ONOFF, true);
+
+ if (type == COEX_ASSOCIATE_5G_START) {
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_5GCONSTART);
+ } else if (type == COEX_ASSOCIATE_5G_FINISH) {
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_5GCONFINISH);
+ } else if (type == COEX_ASSOCIATE_START) {
+ coex_stat->wl_hi_pri_task1 = true;
+ coex_stat->cnt_wl[COEX_CNT_WL_CONNPKT] = 2;
+
+ /* Force antenna setup for no scan result issue */
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
+
+ rtw_coex_run_coex(rtwdev, COEX_RSN_2GCONSTART);
+
+ /* To keep TDMA case during connect process,
+ * to avoid changed by Btinfo and runcoexmechanism
+ */
+ coex->freeze = true;
+ ieee80211_queue_delayed_work(rtwdev->hw, &coex->defreeze_work,
+ 5 * HZ);
+ } else {
+ coex_stat->wl_hi_pri_task1 = false;
+ coex->freeze = false;
+
+ rtw_coex_run_coex(rtwdev, COEX_RSN_2GCONFINISH);
+ }
+}
+
+void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u8 para[6] = {0};
+
+ if (coex->stop_dm)
+ return;
+
+ if (type == COEX_MEDIA_CONNECT_5G) {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true);
+
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_5GMEDIA);
+ } else if (type == COEX_MEDIA_CONNECT) {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true);
+
+ /* Force antenna setup for no scan result issue */
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
+
+ /* Set CCK Rx high Pri */
+ rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_RX_CCK, 1);
+
+ /* always enable 5ms extend if connect */
+ para[0] = COEX_H2C69_WL_LEAKAP;
+ para[1] = PARA1_H2C69_EN_5MS; /* enable 5ms extend */
+ rtw_fw_bt_wifi_control(rtwdev, para[0], &para[1]);
+ coex_stat->wl_slot_extend = true;
+ rtw_coex_run_coex(rtwdev, COEX_RSN_2GMEDIA);
+ } else {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, false);
+
+ rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_RX_CCK, 0);
+
+ rtw_coex_run_coex(rtwdev, COEX_RSN_MEDIADISCON);
+ }
+
+ rtw_coex_update_wl_ch_info(rtwdev, type);
+}
+
+void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ unsigned long bt_relink_time;
+ u8 i, rsp_source = 0, type;
+
+ rsp_source = buf[0] & 0xf;
+ if (rsp_source >= COEX_BTINFO_SRC_MAX)
+ rsp_source = COEX_BTINFO_SRC_WL_FW;
+
+ if (rsp_source == COEX_BTINFO_SRC_BT_IQK) {
+ coex_stat->bt_iqk_state = buf[1];
+ if (coex_stat->bt_iqk_state == 1)
+ coex_stat->cnt_bt[COEX_CNT_BT_IQK]++;
+ else if (coex_stat->bt_iqk_state == 2)
+ coex_stat->cnt_bt[COEX_CNT_BT_IQKFAIL]++;
+
+ return;
+ }
+
+ if (rsp_source == COEX_BTINFO_SRC_BT_SCBD) {
+ rtw_coex_monitor_bt_enable(rtwdev);
+ if (coex_stat->bt_disabled != coex_stat->bt_disabled_pre) {
+ coex_stat->bt_disabled_pre = coex_stat->bt_disabled;
+ rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO);
+ }
+ return;
+ }
+
+ if (rsp_source == COEX_BTINFO_SRC_BT_RSP ||
+ rsp_source == COEX_BTINFO_SRC_BT_ACT) {
+ if (coex_stat->bt_disabled) {
+ coex_stat->bt_disabled = false;
+ coex_stat->bt_reenable = true;
+ ieee80211_queue_delayed_work(rtwdev->hw,
+ &coex->bt_reenable_work,
+ 15 * HZ);
+ }
+ }
+
+ for (i = 0; i < length; i++) {
+ if (i < COEX_BTINFO_LENGTH_MAX)
+ coex_stat->bt_info_c2h[rsp_source][i] = buf[i];
+ else
+ break;
+ }
+
+ if (rsp_source == COEX_BTINFO_SRC_WL_FW) {
+ rtw_coex_update_bt_link_info(rtwdev);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO);
+ return;
+ }
+
+ /* get the same info from bt, skip it */
+ if (coex_stat->bt_info_c2h[rsp_source][1] == coex_stat->bt_info_lb2 &&
+ coex_stat->bt_info_c2h[rsp_source][2] == coex_stat->bt_info_lb3 &&
+ coex_stat->bt_info_c2h[rsp_source][3] == coex_stat->bt_info_hb0 &&
+ coex_stat->bt_info_c2h[rsp_source][4] == coex_stat->bt_info_hb1 &&
+ coex_stat->bt_info_c2h[rsp_source][5] == coex_stat->bt_info_hb2 &&
+ coex_stat->bt_info_c2h[rsp_source][6] == coex_stat->bt_info_hb3)
+ return;
+
+ coex_stat->bt_info_lb2 = coex_stat->bt_info_c2h[rsp_source][1];
+ coex_stat->bt_info_lb3 = coex_stat->bt_info_c2h[rsp_source][2];
+ coex_stat->bt_info_hb0 = coex_stat->bt_info_c2h[rsp_source][3];
+ coex_stat->bt_info_hb1 = coex_stat->bt_info_c2h[rsp_source][4];
+ coex_stat->bt_info_hb2 = coex_stat->bt_info_c2h[rsp_source][5];
+ coex_stat->bt_info_hb3 = coex_stat->bt_info_c2h[rsp_source][6];
+
+ /* 0xff means BT is under WHCK test */
+ coex_stat->bt_whck_test = (coex_stat->bt_info_lb2 == 0xff);
+ coex_stat->bt_inq_page = ((coex_stat->bt_info_lb2 & BIT(2)) == BIT(2));
+ coex_stat->bt_acl_busy = ((coex_stat->bt_info_lb2 & BIT(3)) == BIT(3));
+ coex_stat->cnt_bt[COEX_CNT_BT_RETRY] = coex_stat->bt_info_lb3 & 0xf;
+ if (coex_stat->cnt_bt[COEX_CNT_BT_RETRY] >= 1)
+ coex_stat->cnt_bt[COEX_CNT_BT_POPEVENT]++;
+
+ coex_stat->bt_fix_2M = ((coex_stat->bt_info_lb3 & BIT(4)) == BIT(4));
+ coex_stat->bt_inq = ((coex_stat->bt_info_lb3 & BIT(5)) == BIT(5));
+ if (coex_stat->bt_inq)
+ coex_stat->cnt_bt[COEX_CNT_BT_INQ]++;
+
+ coex_stat->bt_page = ((coex_stat->bt_info_lb3 & BIT(7)) == BIT(7));
+ if (coex_stat->bt_page) {
+ coex_stat->cnt_bt[COEX_CNT_BT_PAGE]++;
+ if (coex_stat->wl_linkscan_proc ||
+ coex_stat->wl_hi_pri_task1 ||
+ coex_stat->wl_hi_pri_task2 || coex_stat->wl_gl_busy)
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, true);
+ else
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, false);
+ } else {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, false);
+ }
+
+ /* unit: % (value-100 to translate to unit: dBm in coex info) */
+ if (chip->bt_rssi_type == COEX_BTRSSI_RATIO) {
+ coex_stat->bt_rssi = coex_stat->bt_info_hb0 * 2 + 10;
+ } else { /* original unit: dbm -> unit: % -> value-100 in coex info */
+ if (coex_stat->bt_info_hb0 <= 127)
+ coex_stat->bt_rssi = 100;
+ else if (256 - coex_stat->bt_info_hb0 <= 100)
+ coex_stat->bt_rssi = 100 - (256 - coex_stat->bt_info_hb0);
+ else
+ coex_stat->bt_rssi = 0;
+ }
+
+ coex_stat->bt_ble_exist = ((coex_stat->bt_info_hb1 & BIT(0)) == BIT(0));
+ if (coex_stat->bt_info_hb1 & BIT(1))
+ coex_stat->cnt_bt[COEX_CNT_BT_REINIT]++;
+
+ if (coex_stat->bt_info_hb1 & BIT(2)) {
+ coex_stat->cnt_bt[COEX_CNT_BT_SETUPLINK]++;
+ coex_stat->bt_setup_link = true;
+ if (coex_stat->bt_reenable)
+ bt_relink_time = 6 * HZ;
+ else
+ bt_relink_time = 2 * HZ;
+
+ ieee80211_queue_delayed_work(rtwdev->hw,
+ &coex->bt_relink_work,
+ bt_relink_time);
+ }
+
+ if (coex_stat->bt_info_hb1 & BIT(3))
+ coex_stat->cnt_bt[COEX_CNT_BT_IGNWLANACT]++;
+
+ coex_stat->bt_ble_voice = ((coex_stat->bt_info_hb1 & BIT(4)) == BIT(4));
+ coex_stat->bt_ble_scan_en = ((coex_stat->bt_info_hb1 & BIT(5)) == BIT(5));
+ if (coex_stat->bt_info_hb1 & BIT(6))
+ coex_stat->cnt_bt[COEX_CNT_BT_ROLESWITCH]++;
+
+ coex_stat->bt_multi_link = ((coex_stat->bt_info_hb1 & BIT(7)) == BIT(7));
+ /* resend wifi info to bt, it is reset and lost the info */
+ if ((coex_stat->bt_info_hb1 & BIT(1))) {
+ if (coex_stat->wl_connected)
+ type = COEX_MEDIA_CONNECT;
+ else
+ type = COEX_MEDIA_DISCONNECT;
+ rtw_coex_update_wl_ch_info(rtwdev, type);
+ }
+
+ /* if ignore_wlan_act && not set_up_link */
+ if ((coex_stat->bt_info_hb1 & BIT(3)) &&
+ (!(coex_stat->bt_info_hb1 & BIT(2))))
+ rtw_coex_ignore_wlan_act(rtwdev, false);
+
+ coex_stat->bt_opp_exist = ((coex_stat->bt_info_hb2 & BIT(0)) == BIT(0));
+ if (coex_stat->bt_info_hb2 & BIT(1))
+ coex_stat->cnt_bt[COEX_CNT_BT_AFHUPDATE]++;
+
+ coex_stat->bt_a2dp_active = (coex_stat->bt_info_hb2 & BIT(2)) == BIT(2);
+ coex_stat->bt_slave = ((coex_stat->bt_info_hb2 & BIT(3)) == BIT(3));
+ coex_stat->bt_hid_slot = (coex_stat->bt_info_hb2 & 0x30) >> 4;
+ coex_stat->bt_hid_pair_num = (coex_stat->bt_info_hb2 & 0xc0) >> 6;
+ if (coex_stat->bt_hid_pair_num > 0 && coex_stat->bt_hid_slot >= 2)
+ coex_stat->bt_418_hid_exist = true;
+ else if (coex_stat->bt_hid_pair_num == 0)
+ coex_stat->bt_418_hid_exist = false;
+
+ if ((coex_stat->bt_info_lb2 & 0x49) == 0x49)
+ coex_stat->bt_a2dp_bitpool = (coex_stat->bt_info_hb3 & 0x7f);
+ else
+ coex_stat->bt_a2dp_bitpool = 0;
+
+ coex_stat->bt_a2dp_sink = ((coex_stat->bt_info_hb3 & BIT(7)) == BIT(7));
+
+ rtw_coex_update_bt_link_info(rtwdev);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO);
+}
+
+void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u8 val;
+ int i;
+
+ if (WARN(length < 8, "invalid wl info c2h length\n"))
+ return;
+
+ if (buf[0] != 0x08)
+ return;
+
+ for (i = 1; i < 8; i++) {
+ val = coex_stat->wl_fw_dbg_info_pre[i];
+ if (buf[i] >= val)
+ coex_stat->wl_fw_dbg_info[i] = buf[i] - val;
+ else
+ coex_stat->wl_fw_dbg_info[i] = val - buf[i];
+
+ coex_stat->wl_fw_dbg_info_pre[i] = buf[i];
+ }
+
+ coex_stat->cnt_wl[COEX_CNT_WL_FW_NOTIFY]++;
+ rtw_coex_wl_ccklock_action(rtwdev);
+ rtw_coex_wl_ccklock_detect(rtwdev);
+}
+
+void rtw_coex_coex_dm_reset(struct rtw_dev *rtwdev)
+{
+ __rtw_coex_init_hw_config(rtwdev, false);
+}
+
+void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+
+ if (coex->stop_dm)
+ return;
+
+ rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS);
+}
+
+void rtw_coex_bt_relink_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+ coex.bt_relink_work.work);
+ struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+ mutex_lock(&rtwdev->mutex);
+ coex_stat->bt_setup_link = false;
+ rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw_coex_bt_reenable_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+ coex.bt_reenable_work.work);
+ struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+ mutex_lock(&rtwdev->mutex);
+ coex_stat->bt_reenable = false;
+ mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw_coex_defreeze_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+ coex.defreeze_work.work);
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+ mutex_lock(&rtwdev->mutex);
+ coex->freeze = false;
+ coex_stat->wl_hi_pri_task1 = false;
+ rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS);
+ mutex_unlock(&rtwdev->mutex);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h
new file mode 100644
index 000000000000..56e871b2d6c2
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/coex.h
@@ -0,0 +1,369 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_COEX_H__
+#define __RTW_COEX_H__
+
+/* BT profile map bit definition */
+#define BPM_HFP BIT(0)
+#define BPM_HID BIT(1)
+#define BPM_A2DP BIT(2)
+#define BPM_PAN BIT(3)
+
+#define COEX_RESP_ACK_BY_WL_FW 0x1
+#define COEX_REQUEST_TIMEOUT msecs_to_jiffies(10)
+
+#define COEX_MIN_DELAY 10 /* delay unit in ms */
+#define COEX_RFK_TIMEOUT 600 /* RFK timeout in ms */
+
+#define COEX_RF_OFF 0x0
+#define COEX_RF_ON 0x1
+
+#define COEX_H2C69_WL_LEAKAP 0xc
+#define PARA1_H2C69_DIS_5MS 0x1
+#define PARA1_H2C69_EN_5MS 0x0
+
+#define COEX_H2C69_TDMA_SLOT 0xb
+#define PARA1_H2C69_TDMA_4SLOT 0xc1
+#define PARA1_H2C69_TDMA_2SLOT 0x1
+
+#define TDMA_4SLOT BIT(8)
+
+#define COEX_RSSI_STEP 4
+#define COEX_RSSI_HIGH(rssi) \
+ ({ typeof(rssi) __rssi__ = rssi; \
+ (__rssi__ == COEX_RSSI_STATE_HIGH || \
+ __rssi__ == COEX_RSSI_STATE_STAY_HIGH ? true : false); })
+
+#define COEX_RSSI_MEDIUM(rssi) \
+ ({ typeof(rssi) __rssi__ = rssi; \
+ (__rssi__ == COEX_RSSI_STATE_MEDIUM || \
+ __rssi__ == COEX_RSSI_STATE_STAY_MEDIUM ? true : false); })
+
+#define COEX_RSSI_LOW(rssi) \
+ ({ typeof(rssi) __rssi__ = rssi; \
+ (__rssi__ == COEX_RSSI_STATE_LOW || \
+ __rssi__ == COEX_RSSI_STATE_STAY_LOW ? true : false); })
+
+#define GET_COEX_RESP_BT_SCAN_TYPE(payload) \
+ le64_get_bits(*((__le64 *)(payload)), GENMASK(31, 24))
+
+enum coex_mp_info_op {
+ BT_MP_INFO_OP_PATCH_VER = 0x00,
+ BT_MP_INFO_OP_READ_REG = 0x11,
+ BT_MP_INFO_OP_SUPP_FEAT = 0x2a,
+ BT_MP_INFO_OP_SUPP_VER = 0x2b,
+ BT_MP_INFO_OP_SCAN_TYPE = 0x2d,
+ BT_MP_INFO_OP_LNA_CONSTRAINT = 0x32,
+};
+
+enum coex_set_ant_phase {
+ COEX_SET_ANT_INIT,
+ COEX_SET_ANT_WONLY,
+ COEX_SET_ANT_WOFF,
+ COEX_SET_ANT_2G,
+ COEX_SET_ANT_5G,
+ COEX_SET_ANT_POWERON,
+ COEX_SET_ANT_2G_WLBT,
+ COEX_SET_ANT_2G_FREERUN,
+
+ COEX_SET_ANT_MAX
+};
+
+enum coex_runreason {
+ COEX_RSN_2GSCANSTART = 0,
+ COEX_RSN_5GSCANSTART = 1,
+ COEX_RSN_SCANFINISH = 2,
+ COEX_RSN_2GSWITCHBAND = 3,
+ COEX_RSN_5GSWITCHBAND = 4,
+ COEX_RSN_2GCONSTART = 5,
+ COEX_RSN_5GCONSTART = 6,
+ COEX_RSN_2GCONFINISH = 7,
+ COEX_RSN_5GCONFINISH = 8,
+ COEX_RSN_2GMEDIA = 9,
+ COEX_RSN_5GMEDIA = 10,
+ COEX_RSN_MEDIADISCON = 11,
+ COEX_RSN_BTINFO = 12,
+ COEX_RSN_LPS = 13,
+ COEX_RSN_WLSTATUS = 14,
+
+ COEX_RSN_MAX
+};
+
+enum coex_lte_coex_table_type {
+ COEX_CTT_WL_VS_LTE,
+ COEX_CTT_BT_VS_LTE,
+};
+
+enum coex_gnt_setup_state {
+ COEX_GNT_SET_HW_PTA = 0x0,
+ COEX_GNT_SET_SW_LOW = 0x1,
+ COEX_GNT_SET_SW_HIGH = 0x3,
+};
+
+enum coex_ext_ant_switch_pos_type {
+ COEX_SWITCH_TO_BT,
+ COEX_SWITCH_TO_WLG,
+ COEX_SWITCH_TO_WLA,
+ COEX_SWITCH_TO_NOCARE,
+ COEX_SWITCH_TO_WLG_BT,
+
+ COEX_SWITCH_TO_MAX
+};
+
+enum coex_ext_ant_switch_ctrl_type {
+ COEX_SWITCH_CTRL_BY_BBSW,
+ COEX_SWITCH_CTRL_BY_PTA,
+ COEX_SWITCH_CTRL_BY_ANTDIV,
+ COEX_SWITCH_CTRL_BY_MAC,
+ COEX_SWITCH_CTRL_BY_BT,
+ COEX_SWITCH_CTRL_BY_FW,
+
+ COEX_SWITCH_CTRL_MAX
+};
+
+enum coex_algorithm {
+ COEX_ALGO_NOPROFILE = 0,
+ COEX_ALGO_HFP = 1,
+ COEX_ALGO_HID = 2,
+ COEX_ALGO_A2DP = 3,
+ COEX_ALGO_PAN = 4,
+ COEX_ALGO_A2DP_HID = 5,
+ COEX_ALGO_A2DP_PAN = 6,
+ COEX_ALGO_PAN_HID = 7,
+ COEX_ALGO_A2DP_PAN_HID = 8,
+
+ COEX_ALGO_MAX
+};
+
+enum coex_wl_link_mode {
+ COEX_WLINK_2G1PORT = 0x0,
+ COEX_WLINK_5G = 0x3,
+ COEX_WLINK_MAX
+};
+
+enum coex_wl2bt_scoreboard {
+ COEX_SCBD_ACTIVE = BIT(0),
+ COEX_SCBD_ONOFF = BIT(1),
+ COEX_SCBD_SCAN = BIT(2),
+ COEX_SCBD_UNDERTEST = BIT(3),
+ COEX_SCBD_RXGAIN = BIT(4),
+ COEX_SCBD_BT_RFK = BIT(5),
+ COEX_SCBD_WLBUSY = BIT(6),
+ COEX_SCBD_EXTFEM = BIT(8),
+ COEX_SCBD_TDMA = BIT(9),
+ COEX_SCBD_FIX2M = BIT(10),
+ COEX_SCBD_ALL = GENMASK(15, 0),
+};
+
+enum coex_power_save_type {
+ COEX_PS_WIFI_NATIVE = 0,
+ COEX_PS_LPS_ON = 1,
+ COEX_PS_LPS_OFF = 2,
+};
+
+enum coex_rssi_state {
+ COEX_RSSI_STATE_HIGH,
+ COEX_RSSI_STATE_MEDIUM,
+ COEX_RSSI_STATE_LOW,
+ COEX_RSSI_STATE_STAY_HIGH,
+ COEX_RSSI_STATE_STAY_MEDIUM,
+ COEX_RSSI_STATE_STAY_LOW,
+};
+
+enum coex_notify_type_ips {
+ COEX_IPS_LEAVE = 0x0,
+ COEX_IPS_ENTER = 0x1,
+};
+
+enum coex_notify_type_lps {
+ COEX_LPS_DISABLE = 0x0,
+ COEX_LPS_ENABLE = 0x1,
+};
+
+enum coex_notify_type_scan {
+ COEX_SCAN_FINISH,
+ COEX_SCAN_START,
+ COEX_SCAN_START_2G,
+ COEX_SCAN_START_5G,
+};
+
+enum coex_notify_type_switchband {
+ COEX_NOT_SWITCH,
+ COEX_SWITCH_TO_24G,
+ COEX_SWITCH_TO_5G,
+ COEX_SWITCH_TO_24G_NOFORSCAN,
+};
+
+enum coex_notify_type_associate {
+ COEX_ASSOCIATE_FINISH,
+ COEX_ASSOCIATE_START,
+ COEX_ASSOCIATE_5G_FINISH,
+ COEX_ASSOCIATE_5G_START,
+};
+
+enum coex_notify_type_media_status {
+ COEX_MEDIA_DISCONNECT,
+ COEX_MEDIA_CONNECT,
+ COEX_MEDIA_CONNECT_5G,
+};
+
+enum coex_bt_status {
+ COEX_BTSTATUS_NCON_IDLE = 0,
+ COEX_BTSTATUS_CON_IDLE = 1,
+ COEX_BTSTATUS_INQ_PAGE = 2,
+ COEX_BTSTATUS_ACL_BUSY = 3,
+ COEX_BTSTATUS_SCO_BUSY = 4,
+ COEX_BTSTATUS_ACL_SCO_BUSY = 5,
+
+ COEX_BTSTATUS_MAX
+};
+
+enum coex_wl_tput_dir {
+ COEX_WL_TPUT_TX = 0x0,
+ COEX_WL_TPUT_RX = 0x1,
+ COEX_WL_TPUT_MAX
+};
+
+enum coex_wl_priority_mask {
+ COEX_WLPRI_RX_RSP = 2,
+ COEX_WLPRI_TX_RSP = 3,
+ COEX_WLPRI_TX_BEACON = 4,
+ COEX_WLPRI_TX_OFDM = 11,
+ COEX_WLPRI_TX_CCK = 12,
+ COEX_WLPRI_TX_BEACONQ = 27,
+ COEX_WLPRI_RX_CCK = 28,
+ COEX_WLPRI_RX_OFDM = 29,
+ COEX_WLPRI_MAX
+};
+
+enum coex_commom_chip_setup {
+ COEX_CSETUP_INIT_HW = 0x0,
+ COEX_CSETUP_ANT_SWITCH = 0x1,
+ COEX_CSETUP_GNT_FIX = 0x2,
+ COEX_CSETUP_GNT_DEBUG = 0x3,
+ COEX_CSETUP_RFE_TYPE = 0x4,
+ COEX_CSETUP_COEXINFO_HW = 0x5,
+ COEX_CSETUP_WL_TX_POWER = 0x6,
+ COEX_CSETUP_WL_RX_GAIN = 0x7,
+ COEX_CSETUP_WLAN_ACT_IPS = 0x8,
+ COEX_CSETUP_MAX
+};
+
+enum coex_indirect_reg_type {
+ COEX_INDIRECT_1700 = 0x0,
+ COEX_INDIRECT_7C0 = 0x1,
+ COEX_INDIRECT_MAX
+};
+
+enum coex_pstdma_type {
+ COEX_PSTDMA_FORCE_LPSOFF = 0x0,
+ COEX_PSTDMA_FORCE_LPSON = 0x1,
+ COEX_PSTDMA_MAX
+};
+
+enum coex_btrssi_type {
+ COEX_BTRSSI_RATIO = 0x0,
+ COEX_BTRSSI_DBM = 0x1,
+ COEX_BTRSSI_MAX
+};
+
+struct coex_table_para {
+ u32 bt;
+ u32 wl;
+};
+
+struct coex_tdma_para {
+ u8 para[5];
+};
+
+struct coex_5g_afh_map {
+ u32 wl_5g_ch;
+ u8 bt_skip_ch;
+ u8 bt_skip_span;
+};
+
+struct coex_rf_para {
+ u8 wl_pwr_dec_lvl;
+ u8 bt_pwr_dec_lvl;
+ bool wl_low_gain_en;
+ u8 bt_lna_lvl;
+};
+
+static inline void rtw_coex_set_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->coex_set_init(rtwdev);
+}
+
+static inline
+void rtw_coex_set_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type, u8 pos_type)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ if (!chip->ops->coex_set_ant_switch)
+ return;
+
+ chip->ops->coex_set_ant_switch(rtwdev, ctrl_type, pos_type);
+}
+
+static inline void rtw_coex_set_gnt_fix(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->coex_set_gnt_fix(rtwdev);
+}
+
+static inline void rtw_coex_set_gnt_debug(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->coex_set_gnt_debug(rtwdev);
+}
+
+static inline void rtw_coex_set_rfe_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->coex_set_rfe_type(rtwdev);
+}
+
+static inline void rtw_coex_set_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->coex_set_wl_tx_power(rtwdev, wl_pwr);
+}
+
+static inline
+void rtw_coex_set_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->coex_set_wl_rx_gain(rtwdev, low_gain);
+}
+
+void rtw_coex_info_response(struct rtw_dev *rtwdev, struct sk_buff *skb);
+void rtw_coex_write_indirect_reg(struct rtw_dev *rtwdev, u16 addr,
+ u32 mask, u32 val);
+void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set);
+
+void rtw_coex_bt_relink_work(struct work_struct *work);
+void rtw_coex_bt_reenable_work(struct work_struct *work);
+void rtw_coex_defreeze_work(struct work_struct *work);
+
+void rtw_coex_power_on_setting(struct rtw_dev *rtwdev);
+void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only);
+void rtw_coex_ips_notify(struct rtw_dev *rtwdev, u8 type);
+void rtw_coex_lps_notify(struct rtw_dev *rtwdev, u8 type);
+void rtw_coex_scan_notify(struct rtw_dev *rtwdev, u8 type);
+void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 action);
+void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 status);
+void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 len);
+void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
+void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type);
+void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index f0ae26018f97..383b04c16703 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -8,6 +8,7 @@
#include "sec.h"
#include "fw.h"
#include "debug.h"
+#include "phy.h"
#ifdef CONFIG_RTW88_DEBUGFS
@@ -460,6 +461,112 @@ static int rtw_debug_get_rf_dump(struct seq_file *m, void *v)
return 0;
}
+static void rtw_print_cck_rate_txt(struct seq_file *m, u8 rate)
+{
+ static const char * const
+ cck_rate[] = {"1M", "2M", "5.5M", "11M"};
+ u8 idx = rate - DESC_RATE1M;
+
+ seq_printf(m, " CCK_%-5s", cck_rate[idx]);
+}
+
+static void rtw_print_ofdm_rate_txt(struct seq_file *m, u8 rate)
+{
+ static const char * const
+ ofdm_rate[] = {"6M", "9M", "12M", "18M", "24M", "36M", "48M", "54M"};
+ u8 idx = rate - DESC_RATE6M;
+
+ seq_printf(m, " OFDM_%-4s", ofdm_rate[idx]);
+}
+
+static void rtw_print_ht_rate_txt(struct seq_file *m, u8 rate)
+{
+ u8 mcs_n = rate - DESC_RATEMCS0;
+
+ seq_printf(m, " MCS%-6u", mcs_n);
+}
+
+static void rtw_print_vht_rate_txt(struct seq_file *m, u8 rate)
+{
+ u8 idx = rate - DESC_RATEVHT1SS_MCS0;
+ u8 n_ss, mcs_n;
+
+ /* n spatial stream */
+ n_ss = 1 + idx / 10;
+ /* MCS n */
+ mcs_n = idx % 10;
+ seq_printf(m, " VHT%uSMCS%u", n_ss, mcs_n);
+}
+
+static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_hal *hal = &rtwdev->hal;
+ void (*print_rate)(struct seq_file *, u8) = NULL;
+ u8 path, rate;
+ struct rtw_power_params pwr_param = {0};
+ u8 bw = hal->current_band_width;
+ u8 ch = hal->current_channel;
+ u8 regd = rtwdev->regd.txpwr_regd;
+
+ seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s)\n",
+ "path", "rate", "pwr", "", "base", "", "byr", "lmt");
+
+ mutex_lock(&hal->tx_power_mutex);
+ for (path = RF_PATH_A; path <= RF_PATH_B; path++) {
+ /* there is no CCK rates used in 5G */
+ if (hal->current_band_type == RTW_BAND_5G)
+ rate = DESC_RATE6M;
+ else
+ rate = DESC_RATE1M;
+
+ /* now, not support vht 3ss and vht 4ss*/
+ for (; rate <= DESC_RATEVHT2SS_MCS9; rate++) {
+ /* now, not support ht 3ss and ht 4ss*/
+ if (rate > DESC_RATEMCS15 &&
+ rate < DESC_RATEVHT1SS_MCS0)
+ continue;
+
+ switch (rate) {
+ case DESC_RATE1M...DESC_RATE11M:
+ print_rate = rtw_print_cck_rate_txt;
+ break;
+ case DESC_RATE6M...DESC_RATE54M:
+ print_rate = rtw_print_ofdm_rate_txt;
+ break;
+ case DESC_RATEMCS0...DESC_RATEMCS15:
+ print_rate = rtw_print_ht_rate_txt;
+ break;
+ case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT2SS_MCS9:
+ print_rate = rtw_print_vht_rate_txt;
+ break;
+ default:
+ print_rate = NULL;
+ break;
+ }
+
+ rtw_get_tx_power_params(rtwdev, path, rate, bw,
+ ch, regd, &pwr_param);
+
+ seq_printf(m, "%4c ", path + 'A');
+ if (print_rate)
+ print_rate(m, rate);
+ seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d)\n",
+ hal->tx_pwr_tbl[path][rate],
+ hal->tx_pwr_tbl[path][rate],
+ pwr_param.pwr_base,
+ min_t(s8, pwr_param.pwr_offset,
+ pwr_param.pwr_limit),
+ pwr_param.pwr_offset, pwr_param.pwr_limit);
+ }
+ }
+
+ mutex_unlock(&hal->tx_power_mutex);
+
+ return 0;
+}
+
#define rtw_debug_impl_mac(page, addr) \
static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \
.cb_read = rtw_debug_get_mac_page, \
@@ -514,6 +621,10 @@ static struct rtw_debugfs_priv rtw_debug_priv_rf_dump = {
.cb_read = rtw_debug_get_rf_dump,
};
+static struct rtw_debugfs_priv rtw_debug_priv_tx_pwr_tbl = {
+ .cb_read = rtw_debugfs_get_tx_pwr_tbl,
+};
+
static struct rtw_debugfs_priv rtw_debug_priv_write_reg = {
.cb_write = rtw_debugfs_set_write_reg,
};
@@ -610,6 +721,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_r(bb_41);
}
rtw_debugfs_add_r(rf_dump);
+ rtw_debugfs_add_r(tx_pwr_tbl);
}
#endif /* CONFIG_RTW88_DEBUGFS */
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 628477971213..b082e2cc95f5 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -3,6 +3,7 @@
*/
#include "main.h"
+#include "coex.h"
#include "fw.h"
#include "tx.h"
#include "reg.h"
@@ -36,17 +37,51 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
len = skb->len - pkt_offset - 2;
- rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n",
- c2h->id, c2h->seq, len);
+ mutex_lock(&rtwdev->mutex);
switch (c2h->id) {
+ case C2H_BT_INFO:
+ rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
+ break;
+ case C2H_WLAN_INFO:
+ rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
+ break;
case C2H_HALMAC:
rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
break;
default:
break;
}
+
+ mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
+ struct sk_buff *skb)
+{
+ struct rtw_c2h_cmd *c2h;
+ u8 len;
+
+ c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
+ len = skb->len - pkt_offset - 2;
+ *((u32 *)skb->cb) = pkt_offset;
+
+ rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n",
+ c2h->id, c2h->seq, len);
+
+ switch (c2h->id) {
+ case C2H_BT_MP_INFO:
+ rtw_coex_info_response(rtwdev, skb);
+ break;
+ default:
+ /* pass offset for further operation */
+ *((u32 *)skb->cb) = pkt_offset;
+ skb_queue_tail(&rtwdev->c2h_queue, skb);
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
+ break;
+ }
}
+EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe);
static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
u8 *h2c)
@@ -181,6 +216,102 @@ void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para)
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
}
+void rtw_fw_query_bt_info(struct rtw_dev *rtwdev)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_INFO);
+
+ SET_QUERY_BT_INFO(h2c_pkt, true);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_CH_INFO);
+
+ SET_WL_CH_INFO_LINK(h2c_pkt, link);
+ SET_WL_CH_INFO_CHNL(h2c_pkt, ch);
+ SET_WL_CH_INFO_BW(h2c_pkt, bw);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev,
+ struct rtw_coex_info_req *req)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_MP_INFO);
+
+ SET_BT_MP_INFO_SEQ(h2c_pkt, req->seq);
+ SET_BT_MP_INFO_OP_CODE(h2c_pkt, req->op_code);
+ SET_BT_MP_INFO_PARA1(h2c_pkt, req->para1);
+ SET_BT_MP_INFO_PARA2(h2c_pkt, req->para2);
+ SET_BT_MP_INFO_PARA3(h2c_pkt, req->para3);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u8 index = 0 - bt_pwr_dec_lvl;
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_FORCE_BT_TX_POWER);
+
+ SET_BT_TX_POWER_INDEX(h2c_pkt, index);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_IGNORE_WLAN_ACTION);
+
+ SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, enable);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
+ u8 para1, u8 para2, u8 para3, u8 para4, u8 para5)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_COEX_TDMA_TYPE);
+
+ SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, para1);
+ SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, para2);
+ SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, para3);
+ SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, para4);
+ SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, para5);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BT_WIFI_CONTROL);
+
+ SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, op_code);
+
+ SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, *data);
+ SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, *(data + 1));
+ SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, *(data + 2));
+ SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, *(data + 3));
+ SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, *(data + 4));
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 703466393ecb..e95d85bd097f 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -35,7 +35,9 @@
enum rtw_c2h_cmd_id {
C2H_BT_INFO = 0x09,
+ C2H_BT_MP_INFO = 0x0b,
C2H_HW_FEATURE_REPORT = 0x19,
+ C2H_WLAN_INFO = 0x27,
C2H_HW_FEATURE_DUMP = 0xfd,
C2H_HALMAC = 0xff,
};
@@ -71,6 +73,14 @@ enum rtw_fw_rf_type {
FW_RF_MAX_TYPE = 0xF,
};
+struct rtw_coex_info_req {
+ u8 seq;
+ u8 op_code;
+ u8 para1;
+ u8 para2;
+ u8 para3;
+};
+
struct rtw_iqk_para {
u8 clear;
u8 segment_iqk;
@@ -139,6 +149,14 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_RA_INFO 0x40
#define H2C_CMD_RSSI_MONITOR 0x42
+#define H2C_CMD_COEX_TDMA_TYPE 0x60
+#define H2C_CMD_QUERY_BT_INFO 0x61
+#define H2C_CMD_FORCE_BT_TX_POWER 0x62
+#define H2C_CMD_IGNORE_WLAN_ACTION 0x63
+#define H2C_CMD_WL_CH_INFO 0x66
+#define H2C_CMD_QUERY_BT_MP_INFO 0x67
+#define H2C_CMD_BT_WIFI_CONTROL 0x69
+
#define SET_H2C_CMD_ID_CLASS(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(7, 0))
@@ -191,6 +209,50 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
#define SET_RA_INFO_RA_MASK3(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(31, 24))
+#define SET_QUERY_BT_INFO(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_WL_CH_INFO_LINK(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_WL_CH_INFO_CHNL(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_WL_CH_INFO_BW(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_BT_MP_INFO_SEQ(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 12))
+#define SET_BT_MP_INFO_OP_CODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_BT_MP_INFO_PARA1(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_BT_MP_INFO_PARA2(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0))
+#define SET_BT_MP_INFO_PARA3(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
+#define SET_BT_TX_POWER_INDEX(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0))
+#define SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
+#define SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0))
+#define SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
+#define SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
{
@@ -200,12 +262,23 @@ static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
return (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
}
+void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
+ struct sk_buff *skb);
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
void rtw_fw_send_general_info(struct rtw_dev *rtwdev);
void rtw_fw_send_phydm_info(struct rtw_dev *rtwdev);
void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para);
void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev);
+void rtw_fw_query_bt_info(struct rtw_dev *rtwdev);
+void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw);
+void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev,
+ struct rtw_coex_info_req *req);
+void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl);
+void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
+ u8 para1, u8 para2, u8 para3, u8 para4, u8 para5);
+void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data);
void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index abe6a148673b..fedea28c7a97 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -7,6 +7,7 @@
#include "tx.h"
#include "fw.h"
#include "mac.h"
+#include "coex.h"
#include "ps.h"
#include "reg.h"
#include "debug.h"
@@ -253,6 +254,7 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
enum rtw_net_type net_type;
if (conf->assoc) {
+ rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_FINISH);
net_type = RTW_NET_MGD_LINKED;
chip->ops->do_iqk(rtwdev);
@@ -262,6 +264,7 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_add_rsvd_page(rtwdev, RSVD_NULL, true);
rtw_fw_download_rsvd_page(rtwdev, vif);
rtw_send_rsvd_page_h2c(rtwdev);
+ rtw_coex_media_status_notify(rtwdev, conf->assoc);
} else {
net_type = RTW_NET_NO_LINK;
rtwvif->aid = 0;
@@ -469,6 +472,8 @@ static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
config |= PORT_SET_MAC_ADDR;
rtw_vif_port_config(rtwdev, rtwvif, config);
+ rtw_coex_scan_notify(rtwdev, COEX_SCAN_START);
+
rtw_flag_set(rtwdev, RTW_FLAG_DIG_DISABLE);
rtw_flag_set(rtwdev, RTW_FLAG_SCANNING);
@@ -491,6 +496,19 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
config |= PORT_SET_MAC_ADDR;
rtw_vif_port_config(rtwdev, rtwvif, config);
+ rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH);
+
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw_ops_mgd_prepare_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 duration)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_START);
mutex_unlock(&rtwdev->mutex);
}
@@ -509,5 +527,6 @@ const struct ieee80211_ops rtw_ops = {
.ampdu_action = rtw_ops_ampdu_action,
.sw_scan_start = rtw_ops_sw_scan_start,
.sw_scan_complete = rtw_ops_sw_scan_complete,
+ .mgd_prepare_tx = rtw_ops_mgd_prepare_tx,
};
EXPORT_SYMBOL(rtw_ops);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 5a2c06267d07..e5a6bc094808 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -8,6 +8,7 @@
#include "ps.h"
#include "sec.h"
#include "mac.h"
+#include "coex.h"
#include "phy.h"
#include "reg.h"
#include "efuse.h"
@@ -149,6 +150,7 @@ static void rtw_watch_dog_work(struct work_struct *work)
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
watch_dog_work.work);
struct rtw_watch_dog_iter_data data = {};
+ bool busy_traffic = rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
if (!rtw_flag_check(rtwdev, RTW_FLAG_RUNNING))
return;
@@ -156,6 +158,14 @@ static void rtw_watch_dog_work(struct work_struct *work)
ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
RTW_WATCH_DOG_DELAY_TIME);
+ if (rtwdev->stats.tx_cnt > 100 || rtwdev->stats.rx_cnt > 100)
+ rtw_flag_set(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+ else
+ rtw_flag_clear(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+
+ if (busy_traffic != rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC))
+ rtw_coex_wl_status_change_notify(rtwdev);
+
/* reset tx/rx statictics */
rtwdev->stats.tx_unicast = 0;
rtwdev->stats.rx_unicast = 0;
@@ -298,6 +308,15 @@ void rtw_set_channel(struct rtw_dev *rtwdev)
chip->ops->set_channel(rtwdev, center_chan, bandwidth, primary_chan_idx);
+ if (hal->current_band_type == RTW_BAND_5G) {
+ rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
+ } else {
+ if (rtw_flag_check(rtwdev, RTW_FLAG_SCANNING))
+ rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G);
+ else
+ rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G_NOFORSCAN);
+ }
+
rtw_phy_set_tx_power_level(rtwdev, center_chan);
}
@@ -641,6 +660,7 @@ static int rtw_power_on(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fw_state *fw = &rtwdev->fw;
+ bool wifi_only;
int ret;
ret = rtw_hci_setup(rtwdev);
@@ -684,6 +704,10 @@ static int rtw_power_on(struct rtw_dev *rtwdev)
goto err_off;
}
+ wifi_only = !rtwdev->efuse.btcoex;
+ rtw_coex_power_on_setting(rtwdev);
+ rtw_coex_init_hw_config(rtwdev, wifi_only);
+
return 0;
err_off:
@@ -722,10 +746,15 @@ static void rtw_power_off(struct rtw_dev *rtwdev)
void rtw_core_stop(struct rtw_dev *rtwdev)
{
+ struct rtw_coex *coex = &rtwdev->coex;
+
rtw_flag_clear(rtwdev, RTW_FLAG_RUNNING);
rtw_flag_clear(rtwdev, RTW_FLAG_FW_RUNNING);
cancel_delayed_work_sync(&rtwdev->watch_dog_work);
+ cancel_delayed_work_sync(&coex->bt_relink_work);
+ cancel_delayed_work_sync(&coex->bt_reenable_work);
+ cancel_delayed_work_sync(&coex->defreeze_work);
rtw_power_off(rtwdev);
}
@@ -876,7 +905,6 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_efuse *efuse = &rtwdev->efuse;
- u32 wl_bt_pwr_ctrl;
int ret = 0;
switch (rtw_hci_type(rtwdev)) {
@@ -888,9 +916,6 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
return -EINVAL;
}
- wl_bt_pwr_ctrl = rtw_read32(rtwdev, REG_WL_BT_PWR_CTRL);
- if (wl_bt_pwr_ctrl & BIT_BT_FUNC_EN)
- rtwdev->efuse.btcoex = true;
hal->chip_version = rtw_read32(rtwdev, REG_SYS_CFG1);
hal->fab_version = BIT_GET_VENDOR_ID(hal->chip_version) >> 2;
hal->cut_version = BIT_GET_CHIP_VER(hal->chip_version);
@@ -1044,11 +1069,14 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
efuse->lna_type_5g = 0;
if (efuse->channel_plan == 0xff)
efuse->channel_plan = 0x7f;
+ if (efuse->rf_board_option == 0xff)
+ efuse->rf_board_option = 0;
if (efuse->bt_setting & BIT(0))
efuse->share_ant = true;
if (efuse->regd == 0xff)
efuse->regd = 0;
+ efuse->btcoex = (efuse->rf_board_option & 0xe0) == 0x20;
efuse->ext_pa_2g = efuse->pa_type_2g & BIT(4) ? 1 : 0;
efuse->ext_lna_2g = efuse->lna_type_2g & BIT(3) ? 1 : 0;
efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0;
@@ -1111,6 +1139,7 @@ EXPORT_SYMBOL(rtw_chip_info_setup);
int rtw_core_init(struct rtw_dev *rtwdev)
{
+ struct rtw_coex *coex = &rtwdev->coex;
int ret;
INIT_LIST_HEAD(&rtwdev->rsvd_page_list);
@@ -1120,8 +1149,12 @@ int rtw_core_init(struct rtw_dev *rtwdev)
INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
INIT_DELAYED_WORK(&rtwdev->lps_work, rtw_lps_work);
+ INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work);
+ INIT_DELAYED_WORK(&coex->bt_reenable_work, rtw_coex_bt_reenable_work);
+ INIT_DELAYED_WORK(&coex->defreeze_work, rtw_coex_defreeze_work);
INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
skb_queue_head_init(&rtwdev->c2h_queue);
+ skb_queue_head_init(&rtwdev->coex.queue);
skb_queue_head_init(&rtwdev->tx_report.queue);
spin_lock_init(&rtwdev->dm_lock);
@@ -1130,8 +1163,11 @@ int rtw_core_init(struct rtw_dev *rtwdev)
spin_lock_init(&rtwdev->tx_report.q_lock);
mutex_init(&rtwdev->mutex);
+ mutex_init(&rtwdev->coex.mutex);
mutex_init(&rtwdev->hal.tx_power_mutex);
+ init_waitqueue_head(&rtwdev->coex.wait);
+
rtwdev->sec.total_cam_num = 32;
rtwdev->hal.current_channel = 1;
set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map);
@@ -1174,6 +1210,7 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
}
mutex_destroy(&rtwdev->mutex);
+ mutex_destroy(&rtwdev->coex.mutex);
mutex_destroy(&rtwdev->hal.tx_power_mutex);
}
EXPORT_SYMBOL(rtw_core_deinit);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 8fa05751836b..9208b9ce5513 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -310,6 +310,7 @@ enum rtw_flags {
RTW_FLAG_INACTIVE_PS,
RTW_FLAG_LEISURE_PS,
RTW_FLAG_DIG_DISABLE,
+ RTW_FLAG_BUSY_TRAFFIC,
NUM_OF_RTW_FLAGS,
};
@@ -640,6 +641,16 @@ struct rtw_chip_ops {
void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable);
void (*false_alarm_statistics)(struct rtw_dev *rtwdev);
void (*do_iqk)(struct rtw_dev *rtwdev);
+
+ /* for coex */
+ void (*coex_set_init)(struct rtw_dev *rtwdev);
+ void (*coex_set_ant_switch)(struct rtw_dev *rtwdev,
+ u8 ctrl_type, u8 pos_type);
+ void (*coex_set_gnt_fix)(struct rtw_dev *rtwdev);
+ void (*coex_set_gnt_debug)(struct rtw_dev *rtwdev);
+ void (*coex_set_rfe_type)(struct rtw_dev *rtwdev);
+ void (*coex_set_wl_tx_power)(struct rtw_dev *rtwdev, u8 wl_pwr);
+ void (*coex_set_wl_rx_gain)(struct rtw_dev *rtwdev, bool low_gain);
};
#define RTW_PWR_POLLING_CNT 20000
@@ -852,6 +863,216 @@ struct rtw_chip_info {
const struct rtw_rfe_def *rfe_defs;
u32 rfe_defs_size;
+
+ /* coex paras */
+ u32 coex_para_ver;
+ u8 bt_desired_ver;
+ bool scbd_support;
+ bool new_scbd10_def; /* true: fix 2M(8822c) */
+ u8 pstdma_type; /* 0: LPSoff, 1:LPSon */
+ u8 bt_rssi_type;
+ u8 ant_isolation;
+ u8 rssi_tolerance;
+ u8 table_sant_num;
+ u8 table_nsant_num;
+ u8 tdma_sant_num;
+ u8 tdma_nsant_num;
+ u8 bt_afh_span_bw20;
+ u8 bt_afh_span_bw40;
+ u8 afh_5g_num;
+ u8 wl_rf_para_num;
+ const u8 *bt_rssi_step;
+ const u8 *wl_rssi_step;
+ const struct coex_table_para *table_nsant;
+ const struct coex_table_para *table_sant;
+ const struct coex_tdma_para *tdma_sant;
+ const struct coex_tdma_para *tdma_nsant;
+ const struct coex_rf_para *wl_rf_para_tx;
+ const struct coex_rf_para *wl_rf_para_rx;
+ const struct coex_5g_afh_map *afh_5g;
+};
+
+enum rtw_coex_bt_state_cnt {
+ COEX_CNT_BT_RETRY,
+ COEX_CNT_BT_REINIT,
+ COEX_CNT_BT_REENABLE,
+ COEX_CNT_BT_POPEVENT,
+ COEX_CNT_BT_SETUPLINK,
+ COEX_CNT_BT_IGNWLANACT,
+ COEX_CNT_BT_INQ,
+ COEX_CNT_BT_PAGE,
+ COEX_CNT_BT_ROLESWITCH,
+ COEX_CNT_BT_AFHUPDATE,
+ COEX_CNT_BT_INFOUPDATE,
+ COEX_CNT_BT_IQK,
+ COEX_CNT_BT_IQKFAIL,
+
+ COEX_CNT_BT_MAX
+};
+
+enum rtw_coex_wl_state_cnt {
+ COEX_CNT_WL_CONNPKT,
+ COEX_CNT_WL_COEXRUN,
+ COEX_CNT_WL_NOISY0,
+ COEX_CNT_WL_NOISY1,
+ COEX_CNT_WL_NOISY2,
+ COEX_CNT_WL_5MS_NOEXTEND,
+ COEX_CNT_WL_FW_NOTIFY,
+
+ COEX_CNT_WL_MAX
+};
+
+struct rtw_coex_rfe {
+ bool ant_switch_exist;
+ bool ant_switch_diversity;
+ bool ant_switch_with_bt;
+ u8 rfe_module_type;
+ u8 ant_switch_polarity;
+
+ /* true if WLG at BTG, else at WLAG */
+ bool wlg_at_btg;
+};
+
+struct rtw_coex_dm {
+ bool cur_ps_tdma_on;
+ bool cur_wl_rx_low_gain_en;
+
+ u8 reason;
+ u8 bt_rssi_state[4];
+ u8 wl_rssi_state[4];
+ u8 wl_ch_info[3];
+ u8 cur_ps_tdma;
+ u8 cur_table;
+ u8 ps_tdma_para[5];
+ u8 cur_bt_pwr_lvl;
+ u8 cur_bt_lna_lvl;
+ u8 cur_wl_pwr_lvl;
+ u8 bt_status;
+ u32 cur_ant_pos_type;
+ u32 cur_switch_status;
+ u32 setting_tdma;
+};
+
+#define COEX_BTINFO_SRC_WL_FW 0x0
+#define COEX_BTINFO_SRC_BT_RSP 0x1
+#define COEX_BTINFO_SRC_BT_ACT 0x2
+#define COEX_BTINFO_SRC_BT_IQK 0x3
+#define COEX_BTINFO_SRC_BT_SCBD 0x4
+#define COEX_BTINFO_SRC_MAX 0x5
+
+#define COEX_INFO_FTP BIT(7)
+#define COEX_INFO_A2DP BIT(6)
+#define COEX_INFO_HID BIT(5)
+#define COEX_INFO_SCO_BUSY BIT(4)
+#define COEX_INFO_ACL_BUSY BIT(3)
+#define COEX_INFO_INQ_PAGE BIT(2)
+#define COEX_INFO_SCO_ESCO BIT(1)
+#define COEX_INFO_CONNECTION BIT(0)
+#define COEX_BTINFO_LENGTH_MAX 10
+
+struct rtw_coex_stat {
+ bool bt_disabled;
+ bool bt_disabled_pre;
+ bool bt_link_exist;
+ bool bt_whck_test;
+ bool bt_inq_page;
+ bool bt_inq;
+ bool bt_page;
+ bool bt_ble_voice;
+ bool bt_ble_exist;
+ bool bt_hfp_exist;
+ bool bt_a2dp_exist;
+ bool bt_hid_exist;
+ bool bt_pan_exist; /* PAN or OPP */
+ bool bt_opp_exist; /* OPP only */
+ bool bt_acl_busy;
+ bool bt_fix_2M;
+ bool bt_setup_link;
+ bool bt_multi_link;
+ bool bt_a2dp_sink;
+ bool bt_a2dp_active;
+ bool bt_reenable;
+ bool bt_ble_scan_en;
+ bool bt_init_scan;
+ bool bt_slave;
+ bool bt_418_hid_exist;
+ bool bt_mailbox_reply;
+
+ bool wl_under_lps;
+ bool wl_under_ips;
+ bool wl_hi_pri_task1;
+ bool wl_hi_pri_task2;
+ bool wl_force_lps_ctrl;
+ bool wl_gl_busy;
+ bool wl_linkscan_proc;
+ bool wl_ps_state_fail;
+ bool wl_tx_limit_en;
+ bool wl_ampdu_limit_en;
+ bool wl_connected;
+ bool wl_slot_extend;
+ bool wl_cck_lock;
+ bool wl_cck_lock_pre;
+ bool wl_cck_lock_ever;
+
+ u32 bt_supported_version;
+ u32 bt_supported_feature;
+ s8 bt_rssi;
+ u8 kt_ver;
+ u8 gnt_workaround_state;
+ u8 tdma_timer_base;
+ u8 bt_profile_num;
+ u8 bt_info_c2h[COEX_BTINFO_SRC_MAX][COEX_BTINFO_LENGTH_MAX];
+ u8 bt_info_lb2;
+ u8 bt_info_lb3;
+ u8 bt_info_hb0;
+ u8 bt_info_hb1;
+ u8 bt_info_hb2;
+ u8 bt_info_hb3;
+ u8 bt_ble_scan_type;
+ u8 bt_hid_pair_num;
+ u8 bt_hid_slot;
+ u8 bt_a2dp_bitpool;
+ u8 bt_iqk_state;
+
+ u8 wl_noisy_level;
+ u8 wl_fw_dbg_info[10];
+ u8 wl_fw_dbg_info_pre[10];
+ u8 wl_coex_mode;
+ u8 ampdu_max_time;
+ u8 wl_tput_dir;
+
+ u16 score_board;
+ u16 retry_limit;
+
+ /* counters to record bt states */
+ u32 cnt_bt[COEX_CNT_BT_MAX];
+
+ /* counters to record wifi states */
+ u32 cnt_wl[COEX_CNT_WL_MAX];
+
+ u32 darfrc;
+ u32 darfrch;
+};
+
+struct rtw_coex {
+ /* protects coex info request section */
+ struct mutex mutex;
+ struct sk_buff_head queue;
+ wait_queue_head_t wait;
+
+ bool under_5g;
+ bool stop_dm;
+ bool freeze;
+ bool freerun;
+ bool wl_rf_off;
+
+ struct rtw_coex_stat stat;
+ struct rtw_coex_dm dm;
+ struct rtw_coex_rfe rfe;
+
+ struct delayed_work bt_relink_work;
+ struct delayed_work bt_reenable_work;
+ struct delayed_work defreeze_work;
};
#define DACK_MSBK_BACKUP_NUM 0xf
@@ -861,6 +1082,16 @@ struct rtw_dm_info {
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
u32 total_fa_cnt;
+
+ u32 cck_ok_cnt;
+ u32 cck_err_cnt;
+ u32 ofdm_ok_cnt;
+ u32 ofdm_err_cnt;
+ u32 ht_ok_cnt;
+ u32 ht_err_cnt;
+ u32 vht_ok_cnt;
+ u32 vht_err_cnt;
+
u8 min_rssi;
u8 pre_min_rssi;
u16 fa_history[4];
@@ -888,6 +1119,7 @@ struct rtw_efuse {
u8 addr[ETH_ALEN];
u8 channel_plan;
u8 country_code[2];
+ u8 rf_board_option;
u8 rfe_option;
u8 thermal_meter;
u8 crystal_cap;
@@ -1047,6 +1279,7 @@ struct rtw_dev {
struct rtw_regulatory regd;
struct rtw_dm_info dm_info;
+ struct rtw_coex coex;
/* ensures exclusive access from mac80211 callbacks */
struct mutex mutex;
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 353871c27779..00ef229552d5 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -8,6 +8,7 @@
#include "pci.h"
#include "tx.h"
#include "rx.h"
+#include "fw.h"
#include "debug.h"
static u32 rtw_pci_tx_queue_idx_addr[] = {
@@ -206,6 +207,23 @@ static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
return 0;
}
+static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
+ struct rtw_pci_rx_ring *rx_ring,
+ u32 idx, u32 desc_sz)
+{
+ struct device *dev = rtwdev->dev;
+ struct rtw_pci_rx_buffer_desc *buf_desc;
+ int buf_sz = RTK_PCI_RX_BUF_SIZE;
+
+ dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
+
+ buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
+ idx * desc_sz);
+ memset(buf_desc, 0, sizeof(*buf_desc));
+ buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
+ buf_desc->dma = cpu_to_le32(dma);
+}
+
static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
struct rtw_pci_rx_ring *rx_ring,
u8 desc_size, u32 len)
@@ -765,6 +783,7 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
u32 pkt_offset;
u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
u32 buf_desc_sz = chip->rx_buf_desc_sz;
+ u32 new_len;
u8 *rx_desc;
dma_addr_t dma;
@@ -783,8 +802,8 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
rtw_pci_dma_check(rtwdev, ring, cur_rp);
skb = ring->buf[cur_rp];
dma = *((dma_addr_t *)skb->cb);
- pci_unmap_single(rtwpci->pdev, dma, RTK_PCI_RX_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
rx_desc = skb->data;
chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
@@ -792,40 +811,32 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
pkt_stat.shift;
- if (pkt_stat.is_c2h) {
- /* keep rx_desc, halmac needs it */
- skb_put(skb, pkt_stat.pkt_len + pkt_offset);
+ /* allocate a new skb for this frame,
+ * discard the frame if none available
+ */
+ new_len = pkt_stat.pkt_len + pkt_offset;
+ new = dev_alloc_skb(new_len);
+ if (WARN_ONCE(!new, "rx routine starvation\n"))
+ goto next_rp;
- /* pass offset for further operation */
- *((u32 *)skb->cb) = pkt_offset;
- skb_queue_tail(&rtwdev->c2h_queue, skb);
- ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
+ /* put the DMA data including rx_desc from phy to new skb */
+ skb_put_data(new, skb->data, new_len);
+
+ if (pkt_stat.is_c2h) {
+ rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
} else {
- /* remove rx_desc, maybe use skb_pull? */
- skb_put(skb, pkt_stat.pkt_len);
- skb_reserve(skb, pkt_offset);
-
- /* alloc a smaller skb to mac80211 */
- new = dev_alloc_skb(pkt_stat.pkt_len);
- if (!new) {
- new = skb;
- } else {
- skb_put_data(new, skb->data, skb->len);
- dev_kfree_skb_any(skb);
- }
- /* TODO: merge into rx.c */
- rtw_rx_stats(rtwdev, pkt_stat.vif, skb);
+ /* remove rx_desc */
+ skb_pull(new, pkt_offset);
+
+ rtw_rx_stats(rtwdev, pkt_stat.vif, new);
memcpy(new->cb, &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(rtwdev->hw, new);
}
- /* skb delivered to mac80211, alloc a new one in rx ring */
- new = dev_alloc_skb(RTK_PCI_RX_BUF_SIZE);
- if (WARN(!new, "rx routine starvation\n"))
- return;
-
- ring->buf[cur_rp] = new;
- rtw_pci_reset_rx_desc(rtwdev, new, ring, cur_rp, buf_desc_sz);
+next_rp:
+ /* new skb delivered to mac80211, re-enable original skb DMA */
+ rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
+ buf_desc_sz);
/* host read next element in ring */
if (++cur_rp >= ring->r.len)
@@ -977,7 +988,6 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
u16 cut;
u16 value;
u16 offset;
- u16 ip_sel;
int i;
cut = BIT(0) << rtwdev->hal.cut_version;
@@ -990,7 +1000,6 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
break;
offset = para->offset;
value = para->value;
- ip_sel = para->ip_sel;
if (para->ip_sel == RTW_IP_SEL_PHY)
rtw_mdio_write(rtwdev, offset, value, true);
else
@@ -1005,7 +1014,6 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
break;
offset = para->offset;
value = para->value;
- ip_sel = para->ip_sel;
if (para->ip_sel == RTW_IP_SEL_PHY)
rtw_mdio_write(rtwdev, offset, value, false);
else
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 4ec8dcf17361..528ee1ee2fd2 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -29,15 +29,6 @@ struct phy_pg_cfg_pair {
u32 data;
};
-struct txpwr_lmt_cfg_pair {
- u8 regd;
- u8 band;
- u8 bw;
- u8 rs;
- u8 ch;
- s8 txpwr_lmt;
-};
-
static const u32 db_invert_table[12][8] = {
{10, 13, 16, 20,
25, 32, 40, 50},
@@ -1267,10 +1258,8 @@ static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev)
void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev,
const struct rtw_table *tbl)
{
- const struct txpwr_lmt_cfg_pair *p = tbl->data;
- const struct txpwr_lmt_cfg_pair *end = p + tbl->size / 6;
-
- BUILD_BUG_ON(sizeof(struct txpwr_lmt_cfg_pair) != sizeof(u8) * 6);
+ const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data;
+ const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size;
for (; p < end; p++) {
rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band,
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index 7c8eb732b13c..cc87b157f23e 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -45,6 +45,15 @@ void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel);
void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal);
void rtw_phy_tx_power_limit_config(struct rtw_hal *hal);
+struct rtw_txpwr_lmt_cfg_pair {
+ u8 regd;
+ u8 band;
+ u8 bw;
+ u8 rs;
+ u8 ch;
+ s8 txpwr_lmt;
+};
+
#define RTW_DECL_TABLE_PHY_COND_CORE(name, cfg, path) \
const struct rtw_table name ## _tbl = { \
.data = name, \
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index 607bfa4317d9..9ecd14feb76b 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -6,6 +6,7 @@
#include "fw.h"
#include "ps.h"
#include "mac.h"
+#include "coex.h"
#include "debug.h"
static int rtw_ips_pwr_up(struct rtw_dev *rtwdev)
@@ -26,6 +27,8 @@ int rtw_enter_ips(struct rtw_dev *rtwdev)
{
rtw_flag_set(rtwdev, RTW_FLAG_INACTIVE_PS);
+ rtw_coex_ips_notify(rtwdev, COEX_IPS_ENTER);
+
rtw_core_stop(rtwdev);
return 0;
@@ -53,6 +56,8 @@ int rtw_leave_ips(struct rtw_dev *rtwdev)
rtw_iterate_vifs_atomic(rtwdev, rtw_restore_port_cfg_iter, rtwdev);
+ rtw_coex_ips_notify(rtwdev, COEX_IPS_LEAVE);
+
return 0;
}
@@ -67,6 +72,8 @@ static void rtw_leave_lps_core(struct rtw_dev *rtwdev)
rtw_fw_set_pwr_mode(rtwdev);
rtw_flag_clear(rtwdev, RTW_FLAG_LEISURE_PS);
+
+ rtw_coex_lps_notify(rtwdev, COEX_LPS_DISABLE);
}
static void rtw_enter_lps_core(struct rtw_dev *rtwdev)
@@ -78,6 +85,8 @@ static void rtw_enter_lps_core(struct rtw_dev *rtwdev)
conf->rlbm = 1;
conf->smart_ps = 2;
+ rtw_coex_lps_notify(rtwdev, COEX_LPS_ENABLE);
+
rtw_fw_set_pwr_mode(rtwdev);
rtw_flag_set(rtwdev, RTW_FLAG_LEISURE_PS);
}
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index e2628f05812c..0bd0717baa8b 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -37,17 +37,28 @@
#define REG_GPIO_MUXCFG 0x0040
#define BIT_FSPI_EN BIT(19)
+#define BIT_BT_AOD_GPIO3 BIT(9)
+#define BIT_BT_PTA_EN BIT(5)
#define BIT_WLRFE_4_5_EN BIT(2)
#define REG_LED_CFG 0x004C
#define BIT_LNAON_SEL_EN BIT(26)
#define BIT_PAPE_SEL_EN BIT(25)
+#define BIT_DPDT_WL_SEL BIT(24)
+#define BIT_DPDT_SEL_EN BIT(23)
#define REG_PAD_CTRL1 0x0064
#define BIT_PAPE_WLBT_SEL BIT(29)
#define BIT_LNAON_WLBT_SEL BIT(28)
+#define BIT_BTGP_JTAG_EN BIT(24)
+#define BIT_BTGP_SPI_EN BIT(20)
+#define BIT_LED1DIS BIT(15)
+#define BIT_SW_DPDT_SEL_DATA BIT(0)
#define REG_WL_BT_PWR_CTRL 0x0068
#define BIT_BT_FUNC_EN BIT(18)
#define BIT_BT_DIG_CLK_EN BIT(8)
+#define REG_SYS_SDIO_CTRL 0x0070
+#define BIT_DBG_GNT_WL_BT BIT(27)
+#define BIT_LTE_MUX_CTRL_PATH BIT(26)
#define REG_HCI_OPT_CTRL 0x0074
#define REG_MCUFW_CTRL 0x0080
@@ -70,6 +81,8 @@
#define FW_READY_MASK 0xffff
#define REG_WLRF1 0x00EC
+#define REG_WIFI_BT_INFO 0x00AA
+#define BIT_BT_INT_EN BIT(15)
#define REG_SYS_CFG1 0x00F0
#define BIT_RTL_ID BIT(23)
#define BIT_RF_TYPE_ID BIT(27)
@@ -187,6 +200,7 @@
#define REG_LIFETIME_EN 0x0426
#define BIT_BA_PARSER_EN BIT(5)
#define REG_SPEC_SIFS 0x0428
+#define REG_RETRY_LIMIT 0x042a
#define REG_DARFRC 0x0430
#define REG_DARFRCH 0x0434
#define REG_RARFRCH 0x043C
@@ -199,18 +213,25 @@
#define REG_AMPDU_MAX_TIME_V1 0x0455
#define REG_BCNQ1_BDNY_V1 0x0456
#define REG_TX_HANG_CTRL 0x045E
+#define BIT_EN_GNT_BT_AWAKE BIT(3)
#define BIT_EN_EOF_V1 BIT(2)
#define REG_DATA_SC 0x0483
#define REG_ARFR4 0x049C
+#define BIT_WL_RFK BIT(0)
#define REG_ARFRH4 0x04A0
#define REG_ARFR5 0x04A4
#define REG_ARFRH5 0x04A8
#define REG_SW_AMPDU_BURST_MODE_CTRL 0x04BC
#define BIT_PRE_TX_CMD BIT(6)
+#define REG_QUEUE_CTRL 0x04C6
+#define BIT_PTA_WL_TX_EN BIT(4)
+#define BIT_PTA_EDCCA_EN BIT(5)
#define REG_PROT_MODE_CTRL 0x04C8
#define REG_BAR_MODE_CTRL 0x04CC
#define REG_PRECNT_CTRL 0x04E5
+#define BIT_BTCCA_CTRL (BIT(0) | BIT(1))
#define BIT_EN_PRECNT BIT(11)
+#define REG_DUMMY_PAGE4_V1 0x04FC
#define REG_EDCA_VO_PARAM 0x0500
#define REG_EDCA_VI_PARAM 0x0504
@@ -297,11 +318,34 @@
#define REG_RXFLTMAP0 0x06A0
#define REG_RXFLTMAP1 0x06A2
#define REG_RXFLTMAP2 0x06A4
+#define REG_BT_COEX_TABLE0 0x06C0
+#define REG_BT_COEX_TABLE1 0x06C4
+#define REG_BT_COEX_BRK_TABLE 0x06C8
+#define REG_BT_COEX_TABLE_H 0x06CC
+#define REG_BT_COEX_TABLE_H1 0x06CD
+#define REG_BT_COEX_TABLE_H2 0x06CE
+#define REG_BT_COEX_TABLE_H3 0x06CF
#define REG_BBPSF_CTRL 0x06DC
+#define REG_BT_COEX_V2 0x0763
+#define BIT_GNT_BT_POLARITY BIT(4)
+#define BIT_LTE_COEX_EN BIT(7)
+#define REG_BT_STAT_CTRL 0x0778
+#define REG_BT_TDMA_TIME 0x0790
#define REG_WMAC_OPTION_FUNCTION 0x07D0
#define REG_WMAC_OPTION_FUNCTION_1 0x07D4
+#define REG_RX_GAIN_EN 0x081c
+
+#define REG_RFE_CTRL_E 0x0974
+
+#define REG_RFE_CTRL8 0x0cb4
+#define BIT_MASK_RFE_SEL89 GENMASK(7, 0)
+#define REG_RFE_INV8 0x0cbd
+#define BIT_MASK_RFE_INV89 GENMASK(1, 0)
+#define REG_RFE_INV16 0x0cbe
+#define BIT_RFE_BUF_EN BIT(3)
+
#define REG_ANAPAR_XTAL_0 0x1040
#define REG_CPU_DMEM_CON 0x1080
#define BIT_WL_PLATFORM_RST BIT(16)
@@ -407,15 +451,33 @@
#define LTECOEX_WRITE_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1
#define LTECOEX_READ_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1
+#define REG_IGN_GNT_BT1 0x1860
+
+#define REG_RFESEL_CTRL 0x1990
+
+#define REG_NOMASK_TXBT 0x1ca7
+#define REG_ANAPAR 0x1c30
+#define BIT_ANAPAR_BTPS BIT(22)
+#define REG_RSTB_SEL 0x1c38
+
+#define REG_IGN_GNTBT4 0x4160
+
+#define RF_MODOPT 0x01
#define RF_DTXLOK 0x08
#define RF_CFGCH 0x18
+#define RF_RCK 0x1d
#define RF_LUTWA 0x33
#define RF_LUTWD1 0x3e
#define RF_LUTWD0 0x3f
#define RF_XTALX2 0xb8
#define RF_MALSEL 0xbe
+#define RF_RCKD 0xde
#define RF_LUTDBG 0xdf
#define RF_LUTWE2 0xee
#define RF_LUTWE 0xef
+#define LTE_COEX_CTRL 0x38
+#define LTE_WL_TRX_CTRL 0xa0
+#define LTE_BT_TRX_CTRL 0xa4
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 1172f6c0605b..568033afb024 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -3,6 +3,7 @@
*/
#include "main.h"
+#include "coex.h"
#include "fw.h"
#include "tx.h"
#include "rx.h"
@@ -31,6 +32,7 @@ static int rtw8822b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
map = (struct rtw8822b_efuse *)log_map;
efuse->rfe_option = map->rfe_option;
+ efuse->rf_board_option = map->rf_board_option;
efuse->crystal_cap = map->xtal_k;
efuse->pa_type_2g = map->pa_type;
efuse->pa_type_5g = map->pa_type;
@@ -104,24 +106,6 @@ static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev)
rtw_phy_init(rtwdev);
rtw8822b_phy_rfe_init(rtwdev);
-
- /* wifi path controller */
- rtw_write32_mask(rtwdev, 0x70, 0x4000000, 1);
- /* BB control */
- rtw_write32_mask(rtwdev, 0x4c, 0x01800000, 0x2);
- /* antenna mux switch */
- rtw_write8(rtwdev, 0x974, 0xff);
- rtw_write32_mask(rtwdev, 0x1990, 0x300, 0);
- rtw_write32_mask(rtwdev, 0xcbc, 0x80000, 0x0);
- /* SW control */
- rtw_write8(rtwdev, 0xcb4, 0x77);
- /* switch to WL side controller and gnt_wl gnt_bt debug signal */
- rtw_write32_mask(rtwdev, 0x70, 0xff000000, 0x0e);
- /* gnt_wl = 1, gnt_bt = 0 */
- rtw_write32(rtwdev, 0x1704, 0x7700);
- rtw_write32(rtwdev, 0x1700, 0xc00f0038);
- /* switch for WL 2G */
- rtw_write8(rtwdev, 0xcbd, 0x2);
}
#define WLAN_SLOT_TIME 0x09
@@ -960,6 +944,7 @@ static void rtw8822b_false_alarm_statistics(struct rtw_dev *rtwdev)
u32 cck_enable;
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
+ u32 crc32_cnt;
cck_enable = rtw_read32(rtwdev, 0x808) & BIT(28);
cck_fa_cnt = rtw_read16(rtwdev, 0xa5c);
@@ -970,6 +955,19 @@ static void rtw8822b_false_alarm_statistics(struct rtw_dev *rtwdev)
dm_info->total_fa_cnt = ofdm_fa_cnt;
dm_info->total_fa_cnt += cck_enable ? cck_fa_cnt : 0;
+ crc32_cnt = rtw_read32(rtwdev, 0xf04);
+ dm_info->cck_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->cck_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ crc32_cnt = rtw_read32(rtwdev, 0xf14);
+ dm_info->ofdm_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->ofdm_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ crc32_cnt = rtw_read32(rtwdev, 0xf10);
+ dm_info->ht_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->ht_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ crc32_cnt = rtw_read32(rtwdev, 0xf0c);
+ dm_info->vht_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->vht_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+
rtw_write32_set(rtwdev, 0x9a4, BIT(17));
rtw_write32_clr(rtwdev, 0x9a4, BIT(17));
rtw_write32_clr(rtwdev, 0xa2c, BIT(15));
@@ -997,12 +995,260 @@ static void rtw8822b_do_iqk(struct rtw_dev *rtwdev)
rtw_write_rf(rtwdev, RF_PATH_A, RF_DTXLOK, RFREG_MASK, 0x0);
reload = !!rtw_read32_mask(rtwdev, REG_IQKFAILMSK, BIT(16));
- iqk_fail_mask = rtw_read32_mask(rtwdev, REG_IQKFAILMSK, GENMASK(0, 7));
+ iqk_fail_mask = rtw_read32_mask(rtwdev, REG_IQKFAILMSK, GENMASK(7, 0));
rtw_dbg(rtwdev, RTW_DBG_PHY,
"iqk counter=%d reload=%d do_iqk_cnt=%d n_iqk_fail(mask)=0x%02x\n",
counter, reload, ++do_iqk_cnt, iqk_fail_mask);
}
+static void rtw8822b_coex_cfg_init(struct rtw_dev *rtwdev)
+{
+ /* enable TBTT nterrupt */
+ rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
+
+ /* BT report packet sample rate */
+ /* 0x790[5:0]=0x5 */
+ rtw_write8_set(rtwdev, REG_BT_TDMA_TIME, 0x05);
+
+ /* enable BT counter statistics */
+ rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1);
+
+ /* enable PTA (3-wire function form BT side) */
+ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN);
+ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_AOD_GPIO3);
+
+ /* enable PTA (tx/rx signal form WiFi side) */
+ rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN);
+ /* wl tx signal to PTA not case EDCCA */
+ rtw_write8_clr(rtwdev, REG_QUEUE_CTRL, BIT_PTA_EDCCA_EN);
+ /* GNT_BT=1 while select both */
+ rtw_write8_set(rtwdev, REG_BT_COEX_V2, BIT_GNT_BT_POLARITY);
+}
+
+static void rtw8822b_coex_cfg_ant_switch(struct rtw_dev *rtwdev,
+ u8 ctrl_type, u8 pos_type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+ bool polarity_inverse;
+ u8 regval = 0;
+
+ if (((ctrl_type << 8) + pos_type) == coex_dm->cur_switch_status)
+ return;
+
+ coex_dm->cur_switch_status = (ctrl_type << 8) + pos_type;
+
+ if (coex_rfe->ant_switch_diversity &&
+ ctrl_type == COEX_SWITCH_CTRL_BY_BBSW)
+ ctrl_type = COEX_SWITCH_CTRL_BY_ANTDIV;
+
+ polarity_inverse = (coex_rfe->ant_switch_polarity == 1);
+
+ switch (ctrl_type) {
+ default:
+ case COEX_SWITCH_CTRL_BY_BBSW:
+ /* 0x4c[23] = 0 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x0);
+ /* 0x4c[24] = 1 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 3, BIT_DPDT_WL_SEL >> 24, 0x1);
+ /* BB SW, DPDT use RFE_ctrl8 and RFE_ctrl9 as ctrl pin */
+ rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89, 0x77);
+
+ if (pos_type == COEX_SWITCH_TO_WLG_BT) {
+ if (coex_rfe->rfe_module_type != 0x4 &&
+ coex_rfe->rfe_module_type != 0x2)
+ regval = 0x3;
+ else
+ regval = (!polarity_inverse ? 0x2 : 0x1);
+ } else if (pos_type == COEX_SWITCH_TO_WLG) {
+ regval = (!polarity_inverse ? 0x2 : 0x1);
+ } else {
+ regval = (!polarity_inverse ? 0x1 : 0x2);
+ }
+
+ rtw_write8_mask(rtwdev, REG_RFE_INV8, BIT_MASK_RFE_INV89, regval);
+ break;
+ case COEX_SWITCH_CTRL_BY_PTA:
+ /* 0x4c[23] = 0 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x0);
+ /* 0x4c[24] = 1 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 3, BIT_DPDT_WL_SEL >> 24, 0x1);
+ /* PTA, DPDT use RFE_ctrl8 and RFE_ctrl9 as ctrl pin */
+ rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89, 0x66);
+
+ regval = (!polarity_inverse ? 0x2 : 0x1);
+ rtw_write8_mask(rtwdev, REG_RFE_INV8, BIT_MASK_RFE_INV89, regval);
+ break;
+ case COEX_SWITCH_CTRL_BY_ANTDIV:
+ /* 0x4c[23] = 0 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x0);
+ /* 0x4c[24] = 1 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 3, BIT_DPDT_WL_SEL >> 24, 0x1);
+ rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89, 0x88);
+ break;
+ case COEX_SWITCH_CTRL_BY_MAC:
+ /* 0x4c[23] = 1 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x1);
+
+ regval = (!polarity_inverse ? 0x0 : 0x1);
+ rtw_write8_mask(rtwdev, REG_PAD_CTRL1, BIT_SW_DPDT_SEL_DATA, regval);
+ break;
+ case COEX_SWITCH_CTRL_BY_FW:
+ /* 0x4c[23] = 0 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x0);
+ /* 0x4c[24] = 1 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 3, BIT_DPDT_WL_SEL >> 24, 0x1);
+ break;
+ case COEX_SWITCH_CTRL_BY_BT:
+ /* 0x4c[23] = 0 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x0);
+ /* 0x4c[24] = 0 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 3, BIT_DPDT_WL_SEL >> 24, 0x0);
+ break;
+ }
+}
+
+static void rtw8822b_coex_cfg_gnt_fix(struct rtw_dev *rtwdev)
+{
+}
+
+static void rtw8822b_coex_cfg_gnt_debug(struct rtw_dev *rtwdev)
+{
+ rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 2, BIT_BTGP_SPI_EN >> 16, 0);
+ rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 3, BIT_BTGP_JTAG_EN >> 24, 0);
+ rtw_write8_mask(rtwdev, REG_GPIO_MUXCFG + 2, BIT_FSPI_EN >> 16, 0);
+ rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 1, BIT_LED1DIS >> 8, 0);
+ rtw_write8_mask(rtwdev, REG_SYS_SDIO_CTRL + 3, BIT_DBG_GNT_WL_BT >> 24, 0);
+}
+
+static void rtw8822b_coex_cfg_rfe_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ bool is_ext_fem = false;
+
+ coex_rfe->rfe_module_type = rtwdev->efuse.rfe_option;
+ coex_rfe->ant_switch_polarity = 0;
+ coex_rfe->ant_switch_diversity = false;
+ if (coex_rfe->rfe_module_type == 0x12 ||
+ coex_rfe->rfe_module_type == 0x15 ||
+ coex_rfe->rfe_module_type == 0x16)
+ coex_rfe->ant_switch_exist = false;
+ else
+ coex_rfe->ant_switch_exist = true;
+
+ if (coex_rfe->rfe_module_type == 2 ||
+ coex_rfe->rfe_module_type == 4) {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_EXTFEM, true);
+ is_ext_fem = true;
+ } else {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_EXTFEM, false);
+ }
+
+ coex_rfe->wlg_at_btg = false;
+
+ if (efuse->share_ant &&
+ coex_rfe->ant_switch_exist && !is_ext_fem)
+ coex_rfe->ant_switch_with_bt = true;
+ else
+ coex_rfe->ant_switch_with_bt = false;
+
+ /* Ext switch buffer mux */
+ rtw_write8(rtwdev, REG_RFE_CTRL_E, 0xff);
+ rtw_write8_mask(rtwdev, REG_RFESEL_CTRL + 1, 0x3, 0x0);
+ rtw_write8_mask(rtwdev, REG_RFE_INV16, BIT_RFE_BUF_EN, 0x0);
+
+ /* Disable LTE Coex Function in WiFi side */
+ rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, BIT_LTE_COEX_EN, 0);
+
+ /* BTC_CTT_WL_VS_LTE */
+ rtw_coex_write_indirect_reg(rtwdev, LTE_WL_TRX_CTRL, MASKLWORD, 0xffff);
+
+ /* BTC_CTT_BT_VS_LTE */
+ rtw_coex_write_indirect_reg(rtwdev, LTE_BT_TRX_CTRL, MASKLWORD, 0xffff);
+}
+
+static void rtw8822b_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ static const u16 reg_addr[] = {0xc58, 0xe58};
+ static const u8 wl_tx_power[] = {0xd8, 0xd4, 0xd0, 0xcc, 0xc8};
+ u8 i, pwr;
+
+ if (wl_pwr == coex_dm->cur_wl_pwr_lvl)
+ return;
+
+ coex_dm->cur_wl_pwr_lvl = wl_pwr;
+
+ if (coex_dm->cur_wl_pwr_lvl >= ARRAY_SIZE(wl_tx_power))
+ coex_dm->cur_wl_pwr_lvl = ARRAY_SIZE(wl_tx_power) - 1;
+
+ pwr = wl_tx_power[coex_dm->cur_wl_pwr_lvl];
+
+ for (i = 0; i < ARRAY_SIZE(reg_addr); i++)
+ rtw_write8_mask(rtwdev, reg_addr[i], 0xff, pwr);
+}
+
+static void rtw8822b_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ /* WL Rx Low gain on */
+ static const u32 wl_rx_low_gain_on[] = {
+ 0xff000003, 0xbd120003, 0xbe100003, 0xbf080003, 0xbf060003,
+ 0xbf050003, 0xbc140003, 0xbb160003, 0xba180003, 0xb91a0003,
+ 0xb81c0003, 0xb71e0003, 0xb4200003, 0xb5220003, 0xb4240003,
+ 0xb3260003, 0xb2280003, 0xb12a0003, 0xb02c0003, 0xaf2e0003,
+ 0xae300003, 0xad320003, 0xac340003, 0xab360003, 0x8d380003,
+ 0x8c3a0003, 0x8b3c0003, 0x8a3e0003, 0x6e400003, 0x6d420003,
+ 0x6c440003, 0x6b460003, 0x6a480003, 0x694a0003, 0x684c0003,
+ 0x674e0003, 0x66500003, 0x65520003, 0x64540003, 0x64560003,
+ 0x007e0403
+ };
+
+ /* WL Rx Low gain off */
+ static const u32 wl_rx_low_gain_off[] = {
+ 0xff000003, 0xf4120003, 0xf5100003, 0xf60e0003, 0xf70c0003,
+ 0xf80a0003, 0xf3140003, 0xf2160003, 0xf1180003, 0xf01a0003,
+ 0xef1c0003, 0xee1e0003, 0xed200003, 0xec220003, 0xeb240003,
+ 0xea260003, 0xe9280003, 0xe82a0003, 0xe72c0003, 0xe62e0003,
+ 0xe5300003, 0xc8320003, 0xc7340003, 0xc6360003, 0xc5380003,
+ 0xc43a0003, 0xc33c0003, 0xc23e0003, 0xc1400003, 0xc0420003,
+ 0xa5440003, 0xa4460003, 0xa3480003, 0xa24a0003, 0xa14c0003,
+ 0x834e0003, 0x82500003, 0x81520003, 0x80540003, 0x65560003,
+ 0x007e0403
+ };
+ u8 i;
+
+ if (low_gain == coex_dm->cur_wl_rx_low_gain_en)
+ return;
+
+ coex_dm->cur_wl_rx_low_gain_en = low_gain;
+
+ if (coex_dm->cur_wl_rx_low_gain_en) {
+ for (i = 0; i < ARRAY_SIZE(wl_rx_low_gain_on); i++)
+ rtw_write32(rtwdev, REG_RX_GAIN_EN, wl_rx_low_gain_on[i]);
+
+ /* set Rx filter corner RCK offset */
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_RCKD, 0x2, 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_RCK, 0x3f, 0x3f);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_RCKD, 0x2, 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_RCK, 0x3f, 0x3f);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(wl_rx_low_gain_off); i++)
+ rtw_write32(rtwdev, 0x81c, wl_rx_low_gain_off[i]);
+
+ /* set Rx filter corner RCK offset */
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_RCK, 0x3f, 0x4);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_RCKD, 0x2, 0x0);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_RCK, 0x3f, 0x4);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_RCKD, 0x2, 0x0);
+ }
+}
+
static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -1549,8 +1795,160 @@ static struct rtw_chip_ops rtw8822b_ops = {
.cfg_ldo25 = rtw8822b_cfg_ldo25,
.false_alarm_statistics = rtw8822b_false_alarm_statistics,
.do_iqk = rtw8822b_do_iqk,
+
+ .coex_set_init = rtw8822b_coex_cfg_init,
+ .coex_set_ant_switch = rtw8822b_coex_cfg_ant_switch,
+ .coex_set_gnt_fix = rtw8822b_coex_cfg_gnt_fix,
+ .coex_set_gnt_debug = rtw8822b_coex_cfg_gnt_debug,
+ .coex_set_rfe_type = rtw8822b_coex_cfg_rfe_type,
+ .coex_set_wl_tx_power = rtw8822b_coex_cfg_wl_tx_power,
+ .coex_set_wl_rx_gain = rtw8822b_coex_cfg_wl_rx_gain,
+};
+
+/* Shared-Antenna Coex Table */
+static const struct coex_table_para table_sant_8822b[] = {
+ {0xffffffff, 0xffffffff}, /* case-0 */
+ {0x55555555, 0x55555555},
+ {0x66555555, 0x66555555},
+ {0xaaaaaaaa, 0xaaaaaaaa},
+ {0x5a5a5a5a, 0x5a5a5a5a},
+ {0xfafafafa, 0xfafafafa}, /* case-5 */
+ {0x6a5a6a5a, 0xaaaaaaaa},
+ {0x6a5a56aa, 0x6a5a56aa},
+ {0x6a5a5a5a, 0x6a5a5a5a},
+ {0x66555555, 0x5a5a5a5a},
+ {0x66555555, 0x6a5a5a5a}, /* case-10 */
+ {0x66555555, 0xfafafafa},
+ {0x66555555, 0x6a5a5aaa},
+ {0x66555555, 0x5aaa5aaa},
+ {0x66555555, 0xaaaa5aaa},
+ {0x66555555, 0xaaaaaaaa}, /* case-15 */
+ {0xffff55ff, 0xfafafafa},
+ {0xffff55ff, 0x6afa5afa},
+ {0xaaffffaa, 0xfafafafa},
+ {0xaa5555aa, 0x5a5a5a5a},
+ {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */
+ {0xaa5555aa, 0xaaaaaaaa},
+ {0xffffffff, 0x5a5a5a5a},
+ {0xffffffff, 0x6a5a5a5a},
+ {0xffffffff, 0x55555555},
+ {0xffffffff, 0x6a5a5aaa}, /* case-25 */
+ {0x55555555, 0x5a5a5a5a},
+ {0x55555555, 0xaaaaaaaa},
+ {0x55555555, 0x6a5a6a5a},
+ {0x66556655, 0x66556655}
+};
+
+/* Non-Shared-Antenna Coex Table */
+static const struct coex_table_para table_nsant_8822b[] = {
+ {0xffffffff, 0xffffffff}, /* case-100 */
+ {0x55555555, 0x55555555},
+ {0x66555555, 0x66555555},
+ {0xaaaaaaaa, 0xaaaaaaaa},
+ {0x5a5a5a5a, 0x5a5a5a5a},
+ {0xfafafafa, 0xfafafafa}, /* case-105 */
+ {0x5afa5afa, 0x5afa5afa},
+ {0x55555555, 0xfafafafa},
+ {0x66555555, 0xfafafafa},
+ {0x66555555, 0x5a5a5a5a},
+ {0x66555555, 0x6a5a5a5a}, /* case-110 */
+ {0x66555555, 0xaaaaaaaa},
+ {0xffff55ff, 0xfafafafa},
+ {0xffff55ff, 0x5afa5afa},
+ {0xffff55ff, 0xaaaaaaaa},
+ {0xaaffffaa, 0xfafafafa}, /* case-115 */
+ {0xaaffffaa, 0x5afa5afa},
+ {0xaaffffaa, 0xaaaaaaaa},
+ {0xffffffff, 0xfafafafa},
+ {0xffffffff, 0x5afa5afa},
+ {0xffffffff, 0xaaaaaaaa}, /* case-120 */
+ {0x55ff55ff, 0x5afa5afa},
+ {0x55ff55ff, 0xaaaaaaaa},
+ {0x55ff55ff, 0x55ff55ff}
};
+/* Shared-Antenna TDMA */
+static const struct coex_tdma_para tdma_sant_8822b[] = {
+ { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */
+ { {0x61, 0x45, 0x03, 0x11, 0x11} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x11} },
+ { {0x61, 0x30, 0x03, 0x11, 0x11} },
+ { {0x61, 0x20, 0x03, 0x11, 0x11} },
+ { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-5 */
+ { {0x61, 0x45, 0x03, 0x11, 0x10} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x10} },
+ { {0x61, 0x30, 0x03, 0x11, 0x10} },
+ { {0x61, 0x20, 0x03, 0x11, 0x10} },
+ { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */
+ { {0x61, 0x08, 0x03, 0x11, 0x14} },
+ { {0x61, 0x08, 0x03, 0x10, 0x14} },
+ { {0x51, 0x08, 0x03, 0x10, 0x54} },
+ { {0x51, 0x08, 0x03, 0x10, 0x55} },
+ { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */
+ { {0x51, 0x45, 0x03, 0x10, 0x10} },
+ { {0x51, 0x3a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x30, 0x03, 0x10, 0x50} },
+ { {0x51, 0x20, 0x03, 0x10, 0x50} },
+ { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-20 */
+ { {0x51, 0x4a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x0c, 0x03, 0x10, 0x54} },
+ { {0x55, 0x08, 0x03, 0x10, 0x54} },
+ { {0x65, 0x10, 0x03, 0x11, 0x11} },
+ { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */
+ { {0x51, 0x08, 0x03, 0x10, 0x50} }
+};
+
+/* Non-Shared-Antenna TDMA */
+static const struct coex_tdma_para tdma_nsant_8822b[] = {
+ { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-100 */
+ { {0x61, 0x45, 0x03, 0x11, 0x11} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x11} },
+ { {0x61, 0x30, 0x03, 0x11, 0x11} },
+ { {0x61, 0x20, 0x03, 0x11, 0x11} },
+ { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-105 */
+ { {0x61, 0x45, 0x03, 0x11, 0x10} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x10} },
+ { {0x61, 0x30, 0x03, 0x11, 0x10} },
+ { {0x61, 0x20, 0x03, 0x11, 0x10} },
+ { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-110 */
+ { {0x61, 0x08, 0x03, 0x11, 0x14} },
+ { {0x61, 0x08, 0x03, 0x10, 0x14} },
+ { {0x51, 0x08, 0x03, 0x10, 0x54} },
+ { {0x51, 0x08, 0x03, 0x10, 0x55} },
+ { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-115 */
+ { {0x51, 0x45, 0x03, 0x10, 0x50} },
+ { {0x51, 0x3a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x30, 0x03, 0x10, 0x50} },
+ { {0x51, 0x20, 0x03, 0x10, 0x50} },
+ { {0x51, 0x10, 0x03, 0x10, 0x50} } /* case-120 */
+};
+
+/* rssi in percentage % (dbm = % - 100) */
+static const u8 wl_rssi_step_8822b[] = {60, 50, 44, 30};
+static const u8 bt_rssi_step_8822b[] = {30, 30, 30, 30};
+static const struct coex_5g_afh_map afh_5g_8822b[] = { {0, 0, 0} };
+
+/* wl_tx_dec_power, bt_tx_dec_power, wl_rx_gain, bt_rx_lna_constrain */
+static const struct coex_rf_para rf_para_tx_8822b[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 16, false, 7}, /* for WL-CPT */
+ {4, 0, true, 1},
+ {3, 6, true, 1},
+ {2, 9, true, 1},
+ {1, 13, true, 1}
+};
+
+static const struct coex_rf_para rf_para_rx_8822b[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 16, false, 7}, /* for WL-CPT */
+ {4, 0, true, 1},
+ {3, 6, true, 1},
+ {2, 9, true, 1},
+ {1, 13, true, 1}
+};
+
+static_assert(ARRAY_SIZE(rf_para_tx_8822b) == ARRAY_SIZE(rf_para_rx_8822b));
+
struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
@@ -1588,6 +1986,32 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.rf_tbl = {&rtw8822b_rf_a_tbl, &rtw8822b_rf_b_tbl},
.rfe_defs = rtw8822b_rfe_defs,
.rfe_defs_size = ARRAY_SIZE(rtw8822b_rfe_defs),
+
+ .coex_para_ver = 0x19062706,
+ .bt_desired_ver = 0x6,
+ .scbd_support = true,
+ .new_scbd10_def = false,
+ .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
+ .bt_rssi_type = COEX_BTRSSI_RATIO,
+ .ant_isolation = 15,
+ .rssi_tolerance = 2,
+ .wl_rssi_step = wl_rssi_step_8822b,
+ .bt_rssi_step = bt_rssi_step_8822b,
+ .table_sant_num = ARRAY_SIZE(table_sant_8822b),
+ .table_sant = table_sant_8822b,
+ .table_nsant_num = ARRAY_SIZE(table_nsant_8822b),
+ .table_nsant = table_nsant_8822b,
+ .tdma_sant_num = ARRAY_SIZE(tdma_sant_8822b),
+ .tdma_sant = tdma_sant_8822b,
+ .tdma_nsant_num = ARRAY_SIZE(tdma_nsant_8822b),
+ .tdma_nsant = tdma_nsant_8822b,
+ .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8822b),
+ .wl_rf_para_tx = rf_para_tx_8822b,
+ .wl_rf_para_rx = rf_para_rx_8822b,
+ .bt_afh_span_bw20 = 0x24,
+ .bt_afh_span_bw40 = 0x36,
+ .afh_5g_num = ARRAY_SIZE(afh_5g_8822b),
+ .afh_5g = afh_5g_8822b,
};
EXPORT_SYMBOL(rtw8822b_hw_spec);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
index 2d2dfb495ce1..465f58411cab 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
@@ -20382,402 +20382,1182 @@ static const u32 rtw8822b_rf_b[] = {
RTW_DECL_TABLE_RF_RADIO(rtw8822b_rf_b, B);
-static const u8 rtw8822b_txpwr_lmt_type2[] = {
- 0, 0, 0, 0, 1, 32, 2, 0, 0, 0, 1, 28, 1, 0, 0, 0, 1, 30,
- 0, 0, 0, 0, 2, 32, 2, 0, 0, 0, 2, 28, 1, 0, 0, 0, 2, 30,
- 0, 0, 0, 0, 3, 32, 2, 0, 0, 0, 3, 28, 1, 0, 0, 0, 3, 30,
- 0, 0, 0, 0, 4, 32, 2, 0, 0, 0, 4, 28, 1, 0, 0, 0, 4, 30,
- 0, 0, 0, 0, 5, 32, 2, 0, 0, 0, 5, 28, 1, 0, 0, 0, 5, 30,
- 0, 0, 0, 0, 6, 32, 2, 0, 0, 0, 6, 28, 1, 0, 0, 0, 6, 30,
- 0, 0, 0, 0, 7, 32, 2, 0, 0, 0, 7, 28, 1, 0, 0, 0, 7, 30,
- 0, 0, 0, 0, 8, 32, 2, 0, 0, 0, 8, 28, 1, 0, 0, 0, 8, 30,
- 0, 0, 0, 0, 9, 32, 2, 0, 0, 0, 9, 28, 1, 0, 0, 0, 9, 30,
- 0, 0, 0, 0, 10, 32, 2, 0, 0, 0, 10, 28, 1, 0, 0, 0, 10, 30,
- 0, 0, 0, 0, 11, 32, 2, 0, 0, 0, 11, 28, 1, 0, 0, 0, 11, 30,
- 0, 0, 0, 0, 12, 26, 2, 0, 0, 0, 12, 28, 1, 0, 0, 0, 12, 30,
- 0, 0, 0, 0, 13, 20, 2, 0, 0, 0, 13, 28, 1, 0, 0, 0, 13, 28,
- 0, 0, 0, 0, 14, 63, 2, 0, 0, 0, 14, 63, 1, 0, 0, 0, 14, 32,
- 0, 0, 0, 1, 1, 26, 2, 0, 0, 1, 1, 30, 1, 0, 0, 1, 1, 34,
- 0, 0, 0, 1, 2, 30, 2, 0, 0, 1, 2, 30, 1, 0, 0, 1, 2, 34,
- 0, 0, 0, 1, 3, 32, 2, 0, 0, 1, 3, 30, 1, 0, 0, 1, 3, 34,
- 0, 0, 0, 1, 4, 34, 2, 0, 0, 1, 4, 30, 1, 0, 0, 1, 4, 34,
- 0, 0, 0, 1, 5, 34, 2, 0, 0, 1, 5, 30, 1, 0, 0, 1, 5, 34,
- 0, 0, 0, 1, 6, 34, 2, 0, 0, 1, 6, 30, 1, 0, 0, 1, 6, 34,
- 0, 0, 0, 1, 7, 34, 2, 0, 0, 1, 7, 30, 1, 0, 0, 1, 7, 34,
- 0, 0, 0, 1, 8, 34, 2, 0, 0, 1, 8, 30, 1, 0, 0, 1, 8, 34,
- 0, 0, 0, 1, 9, 32, 2, 0, 0, 1, 9, 30, 1, 0, 0, 1, 9, 34,
- 0, 0, 0, 1, 10, 30, 2, 0, 0, 1, 10, 30, 1, 0, 0, 1, 10, 34,
- 0, 0, 0, 1, 11, 28, 2, 0, 0, 1, 11, 30, 1, 0, 0, 1, 11, 34,
- 0, 0, 0, 1, 12, 22, 2, 0, 0, 1, 12, 30, 1, 0, 0, 1, 12, 34,
- 0, 0, 0, 1, 13, 14, 2, 0, 0, 1, 13, 30, 1, 0, 0, 1, 13, 34,
- 0, 0, 0, 1, 14, 63, 2, 0, 0, 1, 14, 63, 1, 0, 0, 1, 14, 63,
- 0, 0, 0, 2, 1, 26, 2, 0, 0, 2, 1, 30, 1, 0, 0, 2, 1, 34,
- 0, 0, 0, 2, 2, 30, 2, 0, 0, 2, 2, 30, 1, 0, 0, 2, 2, 34,
- 0, 0, 0, 2, 3, 32, 2, 0, 0, 2, 3, 30, 1, 0, 0, 2, 3, 34,
- 0, 0, 0, 2, 4, 34, 2, 0, 0, 2, 4, 30, 1, 0, 0, 2, 4, 34,
- 0, 0, 0, 2, 5, 34, 2, 0, 0, 2, 5, 30, 1, 0, 0, 2, 5, 34,
- 0, 0, 0, 2, 6, 34, 2, 0, 0, 2, 6, 30, 1, 0, 0, 2, 6, 34,
- 0, 0, 0, 2, 7, 34, 2, 0, 0, 2, 7, 30, 1, 0, 0, 2, 7, 34,
- 0, 0, 0, 2, 8, 34, 2, 0, 0, 2, 8, 30, 1, 0, 0, 2, 8, 34,
- 0, 0, 0, 2, 9, 32, 2, 0, 0, 2, 9, 30, 1, 0, 0, 2, 9, 34,
- 0, 0, 0, 2, 10, 30, 2, 0, 0, 2, 10, 30, 1, 0, 0, 2, 10, 34,
- 0, 0, 0, 2, 11, 26, 2, 0, 0, 2, 11, 30, 1, 0, 0, 2, 11, 34,
- 0, 0, 0, 2, 12, 20, 2, 0, 0, 2, 12, 30, 1, 0, 0, 2, 12, 34,
- 0, 0, 0, 2, 13, 14, 2, 0, 0, 2, 13, 30, 1, 0, 0, 2, 13, 34,
- 0, 0, 0, 2, 14, 63, 2, 0, 0, 2, 14, 63, 1, 0, 0, 2, 14, 63,
- 0, 0, 0, 3, 1, 26, 2, 0, 0, 3, 1, 18, 1, 0, 0, 3, 1, 30,
- 0, 0, 0, 3, 2, 28, 2, 0, 0, 3, 2, 18, 1, 0, 0, 3, 2, 30,
- 0, 0, 0, 3, 3, 30, 2, 0, 0, 3, 3, 18, 1, 0, 0, 3, 3, 30,
- 0, 0, 0, 3, 4, 30, 2, 0, 0, 3, 4, 18, 1, 0, 0, 3, 4, 30,
- 0, 0, 0, 3, 5, 32, 2, 0, 0, 3, 5, 18, 1, 0, 0, 3, 5, 30,
- 0, 0, 0, 3, 6, 32, 2, 0, 0, 3, 6, 18, 1, 0, 0, 3, 6, 30,
- 0, 0, 0, 3, 7, 32, 2, 0, 0, 3, 7, 18, 1, 0, 0, 3, 7, 30,
- 0, 0, 0, 3, 8, 30, 2, 0, 0, 3, 8, 18, 1, 0, 0, 3, 8, 30,
- 0, 0, 0, 3, 9, 30, 2, 0, 0, 3, 9, 18, 1, 0, 0, 3, 9, 30,
- 0, 0, 0, 3, 10, 28, 2, 0, 0, 3, 10, 18, 1, 0, 0, 3, 10, 30,
- 0, 0, 0, 3, 11, 26, 2, 0, 0, 3, 11, 18, 1, 0, 0, 3, 11, 30,
- 0, 0, 0, 3, 12, 20, 2, 0, 0, 3, 12, 18, 1, 0, 0, 3, 12, 30,
- 0, 0, 0, 3, 13, 14, 2, 0, 0, 3, 13, 18, 1, 0, 0, 3, 13, 30,
- 0, 0, 0, 3, 14, 63, 2, 0, 0, 3, 14, 63, 1, 0, 0, 3, 14, 63,
- 0, 0, 1, 2, 1, 63, 2, 0, 1, 2, 1, 63, 1, 0, 1, 2, 1, 63,
- 0, 0, 1, 2, 2, 63, 2, 0, 1, 2, 2, 63, 1, 0, 1, 2, 2, 63,
- 0, 0, 1, 2, 3, 26, 2, 0, 1, 2, 3, 30, 1, 0, 1, 2, 3, 34,
- 0, 0, 1, 2, 4, 26, 2, 0, 1, 2, 4, 30, 1, 0, 1, 2, 4, 34,
- 0, 0, 1, 2, 5, 30, 2, 0, 1, 2, 5, 30, 1, 0, 1, 2, 5, 34,
- 0, 0, 1, 2, 6, 32, 2, 0, 1, 2, 6, 30, 1, 0, 1, 2, 6, 34,
- 0, 0, 1, 2, 7, 30, 2, 0, 1, 2, 7, 30, 1, 0, 1, 2, 7, 34,
- 0, 0, 1, 2, 8, 26, 2, 0, 1, 2, 8, 30, 1, 0, 1, 2, 8, 34,
- 0, 0, 1, 2, 9, 26, 2, 0, 1, 2, 9, 30, 1, 0, 1, 2, 9, 34,
- 0, 0, 1, 2, 10, 20, 2, 0, 1, 2, 10, 30, 1, 0, 1, 2, 10, 34,
- 0, 0, 1, 2, 11, 14, 2, 0, 1, 2, 11, 30, 1, 0, 1, 2, 11, 34,
- 0, 0, 1, 2, 12, 63, 2, 0, 1, 2, 12, 63, 1, 0, 1, 2, 12, 63,
- 0, 0, 1, 2, 13, 63, 2, 0, 1, 2, 13, 63, 1, 0, 1, 2, 13, 63,
- 0, 0, 1, 2, 14, 63, 2, 0, 1, 2, 14, 63, 1, 0, 1, 2, 14, 63,
- 0, 0, 1, 3, 1, 63, 2, 0, 1, 3, 1, 63, 1, 0, 1, 3, 1, 63,
- 0, 0, 1, 3, 2, 63, 2, 0, 1, 3, 2, 63, 1, 0, 1, 3, 2, 63,
- 0, 0, 1, 3, 3, 24, 2, 0, 1, 3, 3, 18, 1, 0, 1, 3, 3, 30,
- 0, 0, 1, 3, 4, 24, 2, 0, 1, 3, 4, 18, 1, 0, 1, 3, 4, 30,
- 0, 0, 1, 3, 5, 26, 2, 0, 1, 3, 5, 18, 1, 0, 1, 3, 5, 30,
- 0, 0, 1, 3, 6, 28, 2, 0, 1, 3, 6, 18, 1, 0, 1, 3, 6, 30,
- 0, 0, 1, 3, 7, 26, 2, 0, 1, 3, 7, 18, 1, 0, 1, 3, 7, 30,
- 0, 0, 1, 3, 8, 26, 2, 0, 1, 3, 8, 18, 1, 0, 1, 3, 8, 30,
- 0, 0, 1, 3, 9, 26, 2, 0, 1, 3, 9, 18, 1, 0, 1, 3, 9, 30,
- 0, 0, 1, 3, 10, 20, 2, 0, 1, 3, 10, 18, 1, 0, 1, 3, 10, 30,
- 0, 0, 1, 3, 11, 14, 2, 0, 1, 3, 11, 18, 1, 0, 1, 3, 11, 30,
- 0, 0, 1, 3, 12, 63, 2, 0, 1, 3, 12, 63, 1, 0, 1, 3, 12, 63,
- 0, 0, 1, 3, 13, 63, 2, 0, 1, 3, 13, 63, 1, 0, 1, 3, 13, 63,
- 0, 0, 1, 3, 14, 63, 2, 0, 1, 3, 14, 63, 1, 0, 1, 3, 14, 63,
- 0, 1, 0, 1, 36, 36, 2, 1, 0, 1, 36, 32, 1, 1, 0, 1, 36, 30,
- 0, 1, 0, 1, 40, 38, 2, 1, 0, 1, 40, 32, 1, 1, 0, 1, 40, 30,
- 0, 1, 0, 1, 44, 38, 2, 1, 0, 1, 44, 32, 1, 1, 0, 1, 44, 30,
- 0, 1, 0, 1, 48, 38, 2, 1, 0, 1, 48, 32, 1, 1, 0, 1, 48, 30,
- 0, 1, 0, 1, 52, 38, 2, 1, 0, 1, 52, 32, 1, 1, 0, 1, 52, 28,
- 0, 1, 0, 1, 56, 38, 2, 1, 0, 1, 56, 32, 1, 1, 0, 1, 56, 28,
- 0, 1, 0, 1, 60, 38, 2, 1, 0, 1, 60, 32, 1, 1, 0, 1, 60, 28,
- 0, 1, 0, 1, 64, 34, 2, 1, 0, 1, 64, 32, 1, 1, 0, 1, 64, 28,
- 0, 1, 0, 1, 100, 32, 2, 1, 0, 1, 100, 32, 1, 1, 0, 1, 100, 32,
- 0, 1, 0, 1, 104, 38, 2, 1, 0, 1, 104, 32, 1, 1, 0, 1, 104, 32,
- 0, 1, 0, 1, 108, 38, 2, 1, 0, 1, 108, 32, 1, 1, 0, 1, 108, 32,
- 0, 1, 0, 1, 112, 38, 2, 1, 0, 1, 112, 32, 1, 1, 0, 1, 112, 32,
- 0, 1, 0, 1, 116, 38, 2, 1, 0, 1, 116, 32, 1, 1, 0, 1, 116, 32,
- 0, 1, 0, 1, 120, 38, 2, 1, 0, 1, 120, 32, 1, 1, 0, 1, 120, 32,
- 0, 1, 0, 1, 124, 38, 2, 1, 0, 1, 124, 32, 1, 1, 0, 1, 124, 32,
- 0, 1, 0, 1, 128, 38, 2, 1, 0, 1, 128, 32, 1, 1, 0, 1, 128, 32,
- 0, 1, 0, 1, 132, 38, 2, 1, 0, 1, 132, 32, 1, 1, 0, 1, 132, 32,
- 0, 1, 0, 1, 136, 38, 2, 1, 0, 1, 136, 32, 1, 1, 0, 1, 136, 32,
- 0, 1, 0, 1, 140, 34, 2, 1, 0, 1, 140, 32, 1, 1, 0, 1, 140, 32,
- 0, 1, 0, 1, 144, 34, 2, 1, 0, 1, 144, 32, 1, 1, 0, 1, 144, 63,
- 0, 1, 0, 1, 149, 38, 2, 1, 0, 1, 149, 63, 1, 1, 0, 1, 149, 63,
- 0, 1, 0, 1, 153, 38, 2, 1, 0, 1, 153, 63, 1, 1, 0, 1, 153, 63,
- 0, 1, 0, 1, 157, 38, 2, 1, 0, 1, 157, 63, 1, 1, 0, 1, 157, 63,
- 0, 1, 0, 1, 161, 38, 2, 1, 0, 1, 161, 63, 1, 1, 0, 1, 161, 63,
- 0, 1, 0, 1, 165, 38, 2, 1, 0, 1, 165, 63, 1, 1, 0, 1, 165, 63,
- 0, 1, 0, 2, 36, 36, 2, 1, 0, 2, 36, 32, 1, 1, 0, 2, 36, 28,
- 0, 1, 0, 2, 40, 38, 2, 1, 0, 2, 40, 32, 1, 1, 0, 2, 40, 28,
- 0, 1, 0, 2, 44, 38, 2, 1, 0, 2, 44, 32, 1, 1, 0, 2, 44, 28,
- 0, 1, 0, 2, 48, 38, 2, 1, 0, 2, 48, 32, 1, 1, 0, 2, 48, 28,
- 0, 1, 0, 2, 52, 38, 2, 1, 0, 2, 52, 32, 1, 1, 0, 2, 52, 28,
- 0, 1, 0, 2, 56, 38, 2, 1, 0, 2, 56, 32, 1, 1, 0, 2, 56, 28,
- 0, 1, 0, 2, 60, 38, 2, 1, 0, 2, 60, 32, 1, 1, 0, 2, 60, 28,
- 0, 1, 0, 2, 64, 34, 2, 1, 0, 2, 64, 32, 1, 1, 0, 2, 64, 28,
- 0, 1, 0, 2, 100, 32, 2, 1, 0, 2, 100, 32, 1, 1, 0, 2, 100, 32,
- 0, 1, 0, 2, 104, 38, 2, 1, 0, 2, 104, 32, 1, 1, 0, 2, 104, 32,
- 0, 1, 0, 2, 108, 38, 2, 1, 0, 2, 108, 32, 1, 1, 0, 2, 108, 32,
- 0, 1, 0, 2, 112, 38, 2, 1, 0, 2, 112, 32, 1, 1, 0, 2, 112, 32,
- 0, 1, 0, 2, 116, 38, 2, 1, 0, 2, 116, 32, 1, 1, 0, 2, 116, 32,
- 0, 1, 0, 2, 120, 38, 2, 1, 0, 2, 120, 32, 1, 1, 0, 2, 120, 32,
- 0, 1, 0, 2, 124, 38, 2, 1, 0, 2, 124, 32, 1, 1, 0, 2, 124, 32,
- 0, 1, 0, 2, 128, 38, 2, 1, 0, 2, 128, 32, 1, 1, 0, 2, 128, 32,
- 0, 1, 0, 2, 132, 38, 2, 1, 0, 2, 132, 32, 1, 1, 0, 2, 132, 32,
- 0, 1, 0, 2, 136, 38, 2, 1, 0, 2, 136, 32, 1, 1, 0, 2, 136, 32,
- 0, 1, 0, 2, 140, 32, 2, 1, 0, 2, 140, 32, 1, 1, 0, 2, 140, 32,
- 0, 1, 0, 2, 144, 26, 2, 1, 0, 2, 144, 63, 1, 1, 0, 2, 144, 63,
- 0, 1, 0, 2, 149, 38, 2, 1, 0, 2, 149, 63, 1, 1, 0, 2, 149, 63,
- 0, 1, 0, 2, 153, 38, 2, 1, 0, 2, 153, 63, 1, 1, 0, 2, 153, 63,
- 0, 1, 0, 2, 157, 38, 2, 1, 0, 2, 157, 63, 1, 1, 0, 2, 157, 63,
- 0, 1, 0, 2, 161, 38, 2, 1, 0, 2, 161, 63, 1, 1, 0, 2, 161, 63,
- 0, 1, 0, 2, 165, 38, 2, 1, 0, 2, 165, 63, 1, 1, 0, 2, 165, 63,
- 0, 1, 0, 3, 36, 34, 2, 1, 0, 3, 36, 20, 1, 1, 0, 3, 36, 22,
- 0, 1, 0, 3, 40, 36, 2, 1, 0, 3, 40, 20, 1, 1, 0, 3, 40, 22,
- 0, 1, 0, 3, 44, 36, 2, 1, 0, 3, 44, 20, 1, 1, 0, 3, 44, 22,
- 0, 1, 0, 3, 48, 36, 2, 1, 0, 3, 48, 20, 1, 1, 0, 3, 48, 22,
- 0, 1, 0, 3, 52, 36, 2, 1, 0, 3, 52, 20, 1, 1, 0, 3, 52, 22,
- 0, 1, 0, 3, 56, 36, 2, 1, 0, 3, 56, 20, 1, 1, 0, 3, 56, 22,
- 0, 1, 0, 3, 60, 36, 2, 1, 0, 3, 60, 20, 1, 1, 0, 3, 60, 22,
- 0, 1, 0, 3, 64, 34, 2, 1, 0, 3, 64, 20, 1, 1, 0, 3, 64, 22,
- 0, 1, 0, 3, 100, 32, 2, 1, 0, 3, 100, 20, 1, 1, 0, 3, 100, 30,
- 0, 1, 0, 3, 104, 36, 2, 1, 0, 3, 104, 20, 1, 1, 0, 3, 104, 30,
- 0, 1, 0, 3, 108, 38, 2, 1, 0, 3, 108, 20, 1, 1, 0, 3, 108, 30,
- 0, 1, 0, 3, 112, 38, 2, 1, 0, 3, 112, 20, 1, 1, 0, 3, 112, 30,
- 0, 1, 0, 3, 116, 38, 2, 1, 0, 3, 116, 20, 1, 1, 0, 3, 116, 30,
- 0, 1, 0, 3, 120, 38, 2, 1, 0, 3, 120, 20, 1, 1, 0, 3, 120, 30,
- 0, 1, 0, 3, 124, 38, 2, 1, 0, 3, 124, 20, 1, 1, 0, 3, 124, 30,
- 0, 1, 0, 3, 128, 38, 2, 1, 0, 3, 128, 20, 1, 1, 0, 3, 128, 30,
- 0, 1, 0, 3, 132, 38, 2, 1, 0, 3, 132, 20, 1, 1, 0, 3, 132, 30,
- 0, 1, 0, 3, 136, 36, 2, 1, 0, 3, 136, 20, 1, 1, 0, 3, 136, 30,
- 0, 1, 0, 3, 140, 32, 2, 1, 0, 3, 140, 20, 1, 1, 0, 3, 140, 30,
- 0, 1, 0, 3, 144, 26, 2, 1, 0, 3, 144, 63, 1, 1, 0, 3, 144, 63,
- 0, 1, 0, 3, 149, 38, 2, 1, 0, 3, 149, 63, 1, 1, 0, 3, 149, 63,
- 0, 1, 0, 3, 153, 38, 2, 1, 0, 3, 153, 63, 1, 1, 0, 3, 153, 63,
- 0, 1, 0, 3, 157, 38, 2, 1, 0, 3, 157, 63, 1, 1, 0, 3, 157, 63,
- 0, 1, 0, 3, 161, 38, 2, 1, 0, 3, 161, 63, 1, 1, 0, 3, 161, 63,
- 0, 1, 0, 3, 165, 38, 2, 1, 0, 3, 165, 63, 1, 1, 0, 3, 165, 63,
- 0, 1, 1, 2, 38, 28, 2, 1, 1, 2, 38, 30, 1, 1, 1, 2, 38, 30,
- 0, 1, 1, 2, 46, 36, 2, 1, 1, 2, 46, 30, 1, 1, 1, 2, 46, 30,
- 0, 1, 1, 2, 54, 36, 2, 1, 1, 2, 54, 30, 1, 1, 1, 2, 54, 30,
- 0, 1, 1, 2, 62, 30, 2, 1, 1, 2, 62, 30, 1, 1, 1, 2, 62, 30,
- 0, 1, 1, 2, 102, 30, 2, 1, 1, 2, 102, 30, 1, 1, 1, 2, 102, 30,
- 0, 1, 1, 2, 110, 36, 2, 1, 1, 2, 110, 30, 1, 1, 1, 2, 110, 30,
- 0, 1, 1, 2, 118, 36, 2, 1, 1, 2, 118, 30, 1, 1, 1, 2, 118, 30,
- 0, 1, 1, 2, 126, 36, 2, 1, 1, 2, 126, 30, 1, 1, 1, 2, 126, 30,
- 0, 1, 1, 2, 134, 36, 2, 1, 1, 2, 134, 30, 1, 1, 1, 2, 134, 30,
- 0, 1, 1, 2, 142, 30, 2, 1, 1, 2, 142, 63, 1, 1, 1, 2, 142, 63,
- 0, 1, 1, 2, 151, 36, 2, 1, 1, 2, 151, 63, 1, 1, 1, 2, 151, 63,
- 0, 1, 1, 2, 159, 36, 2, 1, 1, 2, 159, 63, 1, 1, 1, 2, 159, 63,
- 0, 1, 1, 3, 38, 26, 2, 1, 1, 3, 38, 20, 1, 1, 1, 3, 38, 22,
- 0, 1, 1, 3, 46, 36, 2, 1, 1, 3, 46, 20, 1, 1, 1, 3, 46, 22,
- 0, 1, 1, 3, 54, 36, 2, 1, 1, 3, 54, 20, 1, 1, 1, 3, 54, 22,
- 0, 1, 1, 3, 62, 28, 2, 1, 1, 3, 62, 20, 1, 1, 1, 3, 62, 22,
- 0, 1, 1, 3, 102, 28, 2, 1, 1, 3, 102, 20, 1, 1, 1, 3, 102, 30,
- 0, 1, 1, 3, 110, 36, 2, 1, 1, 3, 110, 20, 1, 1, 1, 3, 110, 30,
- 0, 1, 1, 3, 118, 36, 2, 1, 1, 3, 118, 20, 1, 1, 1, 3, 118, 30,
- 0, 1, 1, 3, 126, 36, 2, 1, 1, 3, 126, 20, 1, 1, 1, 3, 126, 30,
- 0, 1, 1, 3, 134, 36, 2, 1, 1, 3, 134, 20, 1, 1, 1, 3, 134, 30,
- 0, 1, 1, 3, 142, 30, 2, 1, 1, 3, 142, 63, 1, 1, 1, 3, 142, 63,
- 0, 1, 1, 3, 151, 36, 2, 1, 1, 3, 151, 63, 1, 1, 1, 3, 151, 63,
- 0, 1, 1, 3, 159, 36, 2, 1, 1, 3, 159, 63, 1, 1, 1, 3, 159, 63,
- 0, 1, 2, 4, 42, 26, 2, 1, 2, 4, 42, 30, 1, 1, 2, 4, 42, 28,
- 0, 1, 2, 4, 58, 26, 2, 1, 2, 4, 58, 30, 1, 1, 2, 4, 58, 28,
- 0, 1, 2, 4, 106, 26, 2, 1, 2, 4, 106, 30, 1, 1, 2, 4, 106, 30,
- 0, 1, 2, 4, 122, 36, 2, 1, 2, 4, 122, 30, 1, 1, 2, 4, 122, 30,
- 0, 1, 2, 4, 138, 36, 2, 1, 2, 4, 138, 63, 1, 1, 2, 4, 138, 63,
- 0, 1, 2, 4, 155, 36, 2, 1, 2, 4, 155, 63, 1, 1, 2, 4, 155, 63,
- 0, 1, 2, 5, 42, 24, 2, 1, 2, 5, 42, 20, 1, 1, 2, 5, 42, 22,
- 0, 1, 2, 5, 58, 24, 2, 1, 2, 5, 58, 20, 1, 1, 2, 5, 58, 22,
- 0, 1, 2, 5, 106, 26, 2, 1, 2, 5, 106, 20, 1, 1, 2, 5, 106, 30,
- 0, 1, 2, 5, 122, 36, 2, 1, 2, 5, 122, 20, 1, 1, 2, 5, 122, 30,
- 0, 1, 2, 5, 138, 36, 2, 1, 2, 5, 138, 63, 1, 1, 2, 5, 138, 63,
- 0, 1, 2, 5, 155, 36, 2, 1, 2, 5, 155, 63, 1, 1, 2, 5, 155, 63
+static const struct rtw_txpwr_lmt_cfg_pair rtw8822b_txpwr_lmt_type2[] = {
+ { 0, 0, 0, 0, 1, 32, },
+ { 2, 0, 0, 0, 1, 28, },
+ { 1, 0, 0, 0, 1, 30, },
+ { 0, 0, 0, 0, 2, 32, },
+ { 2, 0, 0, 0, 2, 28, },
+ { 1, 0, 0, 0, 2, 30, },
+ { 0, 0, 0, 0, 3, 32, },
+ { 2, 0, 0, 0, 3, 28, },
+ { 1, 0, 0, 0, 3, 30, },
+ { 0, 0, 0, 0, 4, 32, },
+ { 2, 0, 0, 0, 4, 28, },
+ { 1, 0, 0, 0, 4, 30, },
+ { 0, 0, 0, 0, 5, 32, },
+ { 2, 0, 0, 0, 5, 28, },
+ { 1, 0, 0, 0, 5, 30, },
+ { 0, 0, 0, 0, 6, 32, },
+ { 2, 0, 0, 0, 6, 28, },
+ { 1, 0, 0, 0, 6, 30, },
+ { 0, 0, 0, 0, 7, 32, },
+ { 2, 0, 0, 0, 7, 28, },
+ { 1, 0, 0, 0, 7, 30, },
+ { 0, 0, 0, 0, 8, 32, },
+ { 2, 0, 0, 0, 8, 28, },
+ { 1, 0, 0, 0, 8, 30, },
+ { 0, 0, 0, 0, 9, 32, },
+ { 2, 0, 0, 0, 9, 28, },
+ { 1, 0, 0, 0, 9, 30, },
+ { 0, 0, 0, 0, 10, 32, },
+ { 2, 0, 0, 0, 10, 28, },
+ { 1, 0, 0, 0, 10, 30, },
+ { 0, 0, 0, 0, 11, 32, },
+ { 2, 0, 0, 0, 11, 28, },
+ { 1, 0, 0, 0, 11, 30, },
+ { 0, 0, 0, 0, 12, 26, },
+ { 2, 0, 0, 0, 12, 28, },
+ { 1, 0, 0, 0, 12, 30, },
+ { 0, 0, 0, 0, 13, 20, },
+ { 2, 0, 0, 0, 13, 28, },
+ { 1, 0, 0, 0, 13, 28, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 32, },
+ { 0, 0, 0, 1, 1, 26, },
+ { 2, 0, 0, 1, 1, 30, },
+ { 1, 0, 0, 1, 1, 34, },
+ { 0, 0, 0, 1, 2, 30, },
+ { 2, 0, 0, 1, 2, 30, },
+ { 1, 0, 0, 1, 2, 34, },
+ { 0, 0, 0, 1, 3, 32, },
+ { 2, 0, 0, 1, 3, 30, },
+ { 1, 0, 0, 1, 3, 34, },
+ { 0, 0, 0, 1, 4, 34, },
+ { 2, 0, 0, 1, 4, 30, },
+ { 1, 0, 0, 1, 4, 34, },
+ { 0, 0, 0, 1, 5, 34, },
+ { 2, 0, 0, 1, 5, 30, },
+ { 1, 0, 0, 1, 5, 34, },
+ { 0, 0, 0, 1, 6, 34, },
+ { 2, 0, 0, 1, 6, 30, },
+ { 1, 0, 0, 1, 6, 34, },
+ { 0, 0, 0, 1, 7, 34, },
+ { 2, 0, 0, 1, 7, 30, },
+ { 1, 0, 0, 1, 7, 34, },
+ { 0, 0, 0, 1, 8, 34, },
+ { 2, 0, 0, 1, 8, 30, },
+ { 1, 0, 0, 1, 8, 34, },
+ { 0, 0, 0, 1, 9, 32, },
+ { 2, 0, 0, 1, 9, 30, },
+ { 1, 0, 0, 1, 9, 34, },
+ { 0, 0, 0, 1, 10, 30, },
+ { 2, 0, 0, 1, 10, 30, },
+ { 1, 0, 0, 1, 10, 34, },
+ { 0, 0, 0, 1, 11, 28, },
+ { 2, 0, 0, 1, 11, 30, },
+ { 1, 0, 0, 1, 11, 34, },
+ { 0, 0, 0, 1, 12, 22, },
+ { 2, 0, 0, 1, 12, 30, },
+ { 1, 0, 0, 1, 12, 34, },
+ { 0, 0, 0, 1, 13, 14, },
+ { 2, 0, 0, 1, 13, 30, },
+ { 1, 0, 0, 1, 13, 34, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 26, },
+ { 2, 0, 0, 2, 1, 30, },
+ { 1, 0, 0, 2, 1, 34, },
+ { 0, 0, 0, 2, 2, 30, },
+ { 2, 0, 0, 2, 2, 30, },
+ { 1, 0, 0, 2, 2, 34, },
+ { 0, 0, 0, 2, 3, 32, },
+ { 2, 0, 0, 2, 3, 30, },
+ { 1, 0, 0, 2, 3, 34, },
+ { 0, 0, 0, 2, 4, 34, },
+ { 2, 0, 0, 2, 4, 30, },
+ { 1, 0, 0, 2, 4, 34, },
+ { 0, 0, 0, 2, 5, 34, },
+ { 2, 0, 0, 2, 5, 30, },
+ { 1, 0, 0, 2, 5, 34, },
+ { 0, 0, 0, 2, 6, 34, },
+ { 2, 0, 0, 2, 6, 30, },
+ { 1, 0, 0, 2, 6, 34, },
+ { 0, 0, 0, 2, 7, 34, },
+ { 2, 0, 0, 2, 7, 30, },
+ { 1, 0, 0, 2, 7, 34, },
+ { 0, 0, 0, 2, 8, 34, },
+ { 2, 0, 0, 2, 8, 30, },
+ { 1, 0, 0, 2, 8, 34, },
+ { 0, 0, 0, 2, 9, 32, },
+ { 2, 0, 0, 2, 9, 30, },
+ { 1, 0, 0, 2, 9, 34, },
+ { 0, 0, 0, 2, 10, 30, },
+ { 2, 0, 0, 2, 10, 30, },
+ { 1, 0, 0, 2, 10, 34, },
+ { 0, 0, 0, 2, 11, 26, },
+ { 2, 0, 0, 2, 11, 30, },
+ { 1, 0, 0, 2, 11, 34, },
+ { 0, 0, 0, 2, 12, 20, },
+ { 2, 0, 0, 2, 12, 30, },
+ { 1, 0, 0, 2, 12, 34, },
+ { 0, 0, 0, 2, 13, 14, },
+ { 2, 0, 0, 2, 13, 30, },
+ { 1, 0, 0, 2, 13, 34, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 26, },
+ { 2, 0, 0, 3, 1, 18, },
+ { 1, 0, 0, 3, 1, 30, },
+ { 0, 0, 0, 3, 2, 28, },
+ { 2, 0, 0, 3, 2, 18, },
+ { 1, 0, 0, 3, 2, 30, },
+ { 0, 0, 0, 3, 3, 30, },
+ { 2, 0, 0, 3, 3, 18, },
+ { 1, 0, 0, 3, 3, 30, },
+ { 0, 0, 0, 3, 4, 30, },
+ { 2, 0, 0, 3, 4, 18, },
+ { 1, 0, 0, 3, 4, 30, },
+ { 0, 0, 0, 3, 5, 32, },
+ { 2, 0, 0, 3, 5, 18, },
+ { 1, 0, 0, 3, 5, 30, },
+ { 0, 0, 0, 3, 6, 32, },
+ { 2, 0, 0, 3, 6, 18, },
+ { 1, 0, 0, 3, 6, 30, },
+ { 0, 0, 0, 3, 7, 32, },
+ { 2, 0, 0, 3, 7, 18, },
+ { 1, 0, 0, 3, 7, 30, },
+ { 0, 0, 0, 3, 8, 30, },
+ { 2, 0, 0, 3, 8, 18, },
+ { 1, 0, 0, 3, 8, 30, },
+ { 0, 0, 0, 3, 9, 30, },
+ { 2, 0, 0, 3, 9, 18, },
+ { 1, 0, 0, 3, 9, 30, },
+ { 0, 0, 0, 3, 10, 28, },
+ { 2, 0, 0, 3, 10, 18, },
+ { 1, 0, 0, 3, 10, 30, },
+ { 0, 0, 0, 3, 11, 26, },
+ { 2, 0, 0, 3, 11, 18, },
+ { 1, 0, 0, 3, 11, 30, },
+ { 0, 0, 0, 3, 12, 20, },
+ { 2, 0, 0, 3, 12, 18, },
+ { 1, 0, 0, 3, 12, 30, },
+ { 0, 0, 0, 3, 13, 14, },
+ { 2, 0, 0, 3, 13, 18, },
+ { 1, 0, 0, 3, 13, 30, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 26, },
+ { 2, 0, 1, 2, 3, 30, },
+ { 1, 0, 1, 2, 3, 34, },
+ { 0, 0, 1, 2, 4, 26, },
+ { 2, 0, 1, 2, 4, 30, },
+ { 1, 0, 1, 2, 4, 34, },
+ { 0, 0, 1, 2, 5, 30, },
+ { 2, 0, 1, 2, 5, 30, },
+ { 1, 0, 1, 2, 5, 34, },
+ { 0, 0, 1, 2, 6, 32, },
+ { 2, 0, 1, 2, 6, 30, },
+ { 1, 0, 1, 2, 6, 34, },
+ { 0, 0, 1, 2, 7, 30, },
+ { 2, 0, 1, 2, 7, 30, },
+ { 1, 0, 1, 2, 7, 34, },
+ { 0, 0, 1, 2, 8, 26, },
+ { 2, 0, 1, 2, 8, 30, },
+ { 1, 0, 1, 2, 8, 34, },
+ { 0, 0, 1, 2, 9, 26, },
+ { 2, 0, 1, 2, 9, 30, },
+ { 1, 0, 1, 2, 9, 34, },
+ { 0, 0, 1, 2, 10, 20, },
+ { 2, 0, 1, 2, 10, 30, },
+ { 1, 0, 1, 2, 10, 34, },
+ { 0, 0, 1, 2, 11, 14, },
+ { 2, 0, 1, 2, 11, 30, },
+ { 1, 0, 1, 2, 11, 34, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 63, },
+ { 1, 0, 1, 2, 12, 63, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 63, },
+ { 1, 0, 1, 2, 13, 63, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 24, },
+ { 2, 0, 1, 3, 3, 18, },
+ { 1, 0, 1, 3, 3, 30, },
+ { 0, 0, 1, 3, 4, 24, },
+ { 2, 0, 1, 3, 4, 18, },
+ { 1, 0, 1, 3, 4, 30, },
+ { 0, 0, 1, 3, 5, 26, },
+ { 2, 0, 1, 3, 5, 18, },
+ { 1, 0, 1, 3, 5, 30, },
+ { 0, 0, 1, 3, 6, 28, },
+ { 2, 0, 1, 3, 6, 18, },
+ { 1, 0, 1, 3, 6, 30, },
+ { 0, 0, 1, 3, 7, 26, },
+ { 2, 0, 1, 3, 7, 18, },
+ { 1, 0, 1, 3, 7, 30, },
+ { 0, 0, 1, 3, 8, 26, },
+ { 2, 0, 1, 3, 8, 18, },
+ { 1, 0, 1, 3, 8, 30, },
+ { 0, 0, 1, 3, 9, 26, },
+ { 2, 0, 1, 3, 9, 18, },
+ { 1, 0, 1, 3, 9, 30, },
+ { 0, 0, 1, 3, 10, 20, },
+ { 2, 0, 1, 3, 10, 18, },
+ { 1, 0, 1, 3, 10, 30, },
+ { 0, 0, 1, 3, 11, 14, },
+ { 2, 0, 1, 3, 11, 18, },
+ { 1, 0, 1, 3, 11, 30, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 63, },
+ { 1, 0, 1, 3, 12, 63, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 63, },
+ { 1, 0, 1, 3, 13, 63, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 1, 0, 1, 36, 36, },
+ { 2, 1, 0, 1, 36, 32, },
+ { 1, 1, 0, 1, 36, 30, },
+ { 0, 1, 0, 1, 40, 38, },
+ { 2, 1, 0, 1, 40, 32, },
+ { 1, 1, 0, 1, 40, 30, },
+ { 0, 1, 0, 1, 44, 38, },
+ { 2, 1, 0, 1, 44, 32, },
+ { 1, 1, 0, 1, 44, 30, },
+ { 0, 1, 0, 1, 48, 38, },
+ { 2, 1, 0, 1, 48, 32, },
+ { 1, 1, 0, 1, 48, 30, },
+ { 0, 1, 0, 1, 52, 38, },
+ { 2, 1, 0, 1, 52, 32, },
+ { 1, 1, 0, 1, 52, 28, },
+ { 0, 1, 0, 1, 56, 38, },
+ { 2, 1, 0, 1, 56, 32, },
+ { 1, 1, 0, 1, 56, 28, },
+ { 0, 1, 0, 1, 60, 38, },
+ { 2, 1, 0, 1, 60, 32, },
+ { 1, 1, 0, 1, 60, 28, },
+ { 0, 1, 0, 1, 64, 34, },
+ { 2, 1, 0, 1, 64, 32, },
+ { 1, 1, 0, 1, 64, 28, },
+ { 0, 1, 0, 1, 100, 32, },
+ { 2, 1, 0, 1, 100, 32, },
+ { 1, 1, 0, 1, 100, 32, },
+ { 0, 1, 0, 1, 104, 38, },
+ { 2, 1, 0, 1, 104, 32, },
+ { 1, 1, 0, 1, 104, 32, },
+ { 0, 1, 0, 1, 108, 38, },
+ { 2, 1, 0, 1, 108, 32, },
+ { 1, 1, 0, 1, 108, 32, },
+ { 0, 1, 0, 1, 112, 38, },
+ { 2, 1, 0, 1, 112, 32, },
+ { 1, 1, 0, 1, 112, 32, },
+ { 0, 1, 0, 1, 116, 38, },
+ { 2, 1, 0, 1, 116, 32, },
+ { 1, 1, 0, 1, 116, 32, },
+ { 0, 1, 0, 1, 120, 38, },
+ { 2, 1, 0, 1, 120, 32, },
+ { 1, 1, 0, 1, 120, 32, },
+ { 0, 1, 0, 1, 124, 38, },
+ { 2, 1, 0, 1, 124, 32, },
+ { 1, 1, 0, 1, 124, 32, },
+ { 0, 1, 0, 1, 128, 38, },
+ { 2, 1, 0, 1, 128, 32, },
+ { 1, 1, 0, 1, 128, 32, },
+ { 0, 1, 0, 1, 132, 38, },
+ { 2, 1, 0, 1, 132, 32, },
+ { 1, 1, 0, 1, 132, 32, },
+ { 0, 1, 0, 1, 136, 38, },
+ { 2, 1, 0, 1, 136, 32, },
+ { 1, 1, 0, 1, 136, 32, },
+ { 0, 1, 0, 1, 140, 34, },
+ { 2, 1, 0, 1, 140, 32, },
+ { 1, 1, 0, 1, 140, 32, },
+ { 0, 1, 0, 1, 144, 34, },
+ { 2, 1, 0, 1, 144, 32, },
+ { 1, 1, 0, 1, 144, 63, },
+ { 0, 1, 0, 1, 149, 38, },
+ { 2, 1, 0, 1, 149, 63, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 38, },
+ { 2, 1, 0, 1, 153, 63, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 38, },
+ { 2, 1, 0, 1, 157, 63, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 38, },
+ { 2, 1, 0, 1, 161, 63, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 38, },
+ { 2, 1, 0, 1, 165, 63, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 36, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 28, },
+ { 0, 1, 0, 2, 40, 38, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 28, },
+ { 0, 1, 0, 2, 44, 38, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 28, },
+ { 0, 1, 0, 2, 48, 38, },
+ { 2, 1, 0, 2, 48, 32, },
+ { 1, 1, 0, 2, 48, 28, },
+ { 0, 1, 0, 2, 52, 38, },
+ { 2, 1, 0, 2, 52, 32, },
+ { 1, 1, 0, 2, 52, 28, },
+ { 0, 1, 0, 2, 56, 38, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 28, },
+ { 0, 1, 0, 2, 60, 38, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 28, },
+ { 0, 1, 0, 2, 64, 34, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 28, },
+ { 0, 1, 0, 2, 100, 32, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 38, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 38, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 38, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 38, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 38, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 38, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 38, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 38, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 38, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 32, },
+ { 2, 1, 0, 2, 140, 32, },
+ { 1, 1, 0, 2, 140, 32, },
+ { 0, 1, 0, 2, 144, 26, },
+ { 2, 1, 0, 2, 144, 63, },
+ { 1, 1, 0, 2, 144, 63, },
+ { 0, 1, 0, 2, 149, 38, },
+ { 2, 1, 0, 2, 149, 63, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 38, },
+ { 2, 1, 0, 2, 153, 63, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 38, },
+ { 2, 1, 0, 2, 157, 63, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 38, },
+ { 2, 1, 0, 2, 161, 63, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 38, },
+ { 2, 1, 0, 2, 165, 63, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 34, },
+ { 2, 1, 0, 3, 36, 20, },
+ { 1, 1, 0, 3, 36, 22, },
+ { 0, 1, 0, 3, 40, 36, },
+ { 2, 1, 0, 3, 40, 20, },
+ { 1, 1, 0, 3, 40, 22, },
+ { 0, 1, 0, 3, 44, 36, },
+ { 2, 1, 0, 3, 44, 20, },
+ { 1, 1, 0, 3, 44, 22, },
+ { 0, 1, 0, 3, 48, 36, },
+ { 2, 1, 0, 3, 48, 20, },
+ { 1, 1, 0, 3, 48, 22, },
+ { 0, 1, 0, 3, 52, 36, },
+ { 2, 1, 0, 3, 52, 20, },
+ { 1, 1, 0, 3, 52, 22, },
+ { 0, 1, 0, 3, 56, 36, },
+ { 2, 1, 0, 3, 56, 20, },
+ { 1, 1, 0, 3, 56, 22, },
+ { 0, 1, 0, 3, 60, 36, },
+ { 2, 1, 0, 3, 60, 20, },
+ { 1, 1, 0, 3, 60, 22, },
+ { 0, 1, 0, 3, 64, 34, },
+ { 2, 1, 0, 3, 64, 20, },
+ { 1, 1, 0, 3, 64, 22, },
+ { 0, 1, 0, 3, 100, 32, },
+ { 2, 1, 0, 3, 100, 20, },
+ { 1, 1, 0, 3, 100, 30, },
+ { 0, 1, 0, 3, 104, 36, },
+ { 2, 1, 0, 3, 104, 20, },
+ { 1, 1, 0, 3, 104, 30, },
+ { 0, 1, 0, 3, 108, 38, },
+ { 2, 1, 0, 3, 108, 20, },
+ { 1, 1, 0, 3, 108, 30, },
+ { 0, 1, 0, 3, 112, 38, },
+ { 2, 1, 0, 3, 112, 20, },
+ { 1, 1, 0, 3, 112, 30, },
+ { 0, 1, 0, 3, 116, 38, },
+ { 2, 1, 0, 3, 116, 20, },
+ { 1, 1, 0, 3, 116, 30, },
+ { 0, 1, 0, 3, 120, 38, },
+ { 2, 1, 0, 3, 120, 20, },
+ { 1, 1, 0, 3, 120, 30, },
+ { 0, 1, 0, 3, 124, 38, },
+ { 2, 1, 0, 3, 124, 20, },
+ { 1, 1, 0, 3, 124, 30, },
+ { 0, 1, 0, 3, 128, 38, },
+ { 2, 1, 0, 3, 128, 20, },
+ { 1, 1, 0, 3, 128, 30, },
+ { 0, 1, 0, 3, 132, 38, },
+ { 2, 1, 0, 3, 132, 20, },
+ { 1, 1, 0, 3, 132, 30, },
+ { 0, 1, 0, 3, 136, 36, },
+ { 2, 1, 0, 3, 136, 20, },
+ { 1, 1, 0, 3, 136, 30, },
+ { 0, 1, 0, 3, 140, 32, },
+ { 2, 1, 0, 3, 140, 20, },
+ { 1, 1, 0, 3, 140, 30, },
+ { 0, 1, 0, 3, 144, 26, },
+ { 2, 1, 0, 3, 144, 63, },
+ { 1, 1, 0, 3, 144, 63, },
+ { 0, 1, 0, 3, 149, 38, },
+ { 2, 1, 0, 3, 149, 63, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 38, },
+ { 2, 1, 0, 3, 153, 63, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 38, },
+ { 2, 1, 0, 3, 157, 63, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 38, },
+ { 2, 1, 0, 3, 161, 63, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 38, },
+ { 2, 1, 0, 3, 165, 63, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 1, 2, 38, 28, },
+ { 2, 1, 1, 2, 38, 30, },
+ { 1, 1, 1, 2, 38, 30, },
+ { 0, 1, 1, 2, 46, 36, },
+ { 2, 1, 1, 2, 46, 30, },
+ { 1, 1, 1, 2, 46, 30, },
+ { 0, 1, 1, 2, 54, 36, },
+ { 2, 1, 1, 2, 54, 30, },
+ { 1, 1, 1, 2, 54, 30, },
+ { 0, 1, 1, 2, 62, 30, },
+ { 2, 1, 1, 2, 62, 30, },
+ { 1, 1, 1, 2, 62, 30, },
+ { 0, 1, 1, 2, 102, 30, },
+ { 2, 1, 1, 2, 102, 30, },
+ { 1, 1, 1, 2, 102, 30, },
+ { 0, 1, 1, 2, 110, 36, },
+ { 2, 1, 1, 2, 110, 30, },
+ { 1, 1, 1, 2, 110, 30, },
+ { 0, 1, 1, 2, 118, 36, },
+ { 2, 1, 1, 2, 118, 30, },
+ { 1, 1, 1, 2, 118, 30, },
+ { 0, 1, 1, 2, 126, 36, },
+ { 2, 1, 1, 2, 126, 30, },
+ { 1, 1, 1, 2, 126, 30, },
+ { 0, 1, 1, 2, 134, 36, },
+ { 2, 1, 1, 2, 134, 30, },
+ { 1, 1, 1, 2, 134, 30, },
+ { 0, 1, 1, 2, 142, 30, },
+ { 2, 1, 1, 2, 142, 63, },
+ { 1, 1, 1, 2, 142, 63, },
+ { 0, 1, 1, 2, 151, 36, },
+ { 2, 1, 1, 2, 151, 63, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 36, },
+ { 2, 1, 1, 2, 159, 63, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 26, },
+ { 2, 1, 1, 3, 38, 20, },
+ { 1, 1, 1, 3, 38, 22, },
+ { 0, 1, 1, 3, 46, 36, },
+ { 2, 1, 1, 3, 46, 20, },
+ { 1, 1, 1, 3, 46, 22, },
+ { 0, 1, 1, 3, 54, 36, },
+ { 2, 1, 1, 3, 54, 20, },
+ { 1, 1, 1, 3, 54, 22, },
+ { 0, 1, 1, 3, 62, 28, },
+ { 2, 1, 1, 3, 62, 20, },
+ { 1, 1, 1, 3, 62, 22, },
+ { 0, 1, 1, 3, 102, 28, },
+ { 2, 1, 1, 3, 102, 20, },
+ { 1, 1, 1, 3, 102, 30, },
+ { 0, 1, 1, 3, 110, 36, },
+ { 2, 1, 1, 3, 110, 20, },
+ { 1, 1, 1, 3, 110, 30, },
+ { 0, 1, 1, 3, 118, 36, },
+ { 2, 1, 1, 3, 118, 20, },
+ { 1, 1, 1, 3, 118, 30, },
+ { 0, 1, 1, 3, 126, 36, },
+ { 2, 1, 1, 3, 126, 20, },
+ { 1, 1, 1, 3, 126, 30, },
+ { 0, 1, 1, 3, 134, 36, },
+ { 2, 1, 1, 3, 134, 20, },
+ { 1, 1, 1, 3, 134, 30, },
+ { 0, 1, 1, 3, 142, 30, },
+ { 2, 1, 1, 3, 142, 63, },
+ { 1, 1, 1, 3, 142, 63, },
+ { 0, 1, 1, 3, 151, 36, },
+ { 2, 1, 1, 3, 151, 63, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 36, },
+ { 2, 1, 1, 3, 159, 63, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 2, 4, 42, 26, },
+ { 2, 1, 2, 4, 42, 30, },
+ { 1, 1, 2, 4, 42, 28, },
+ { 0, 1, 2, 4, 58, 26, },
+ { 2, 1, 2, 4, 58, 30, },
+ { 1, 1, 2, 4, 58, 28, },
+ { 0, 1, 2, 4, 106, 26, },
+ { 2, 1, 2, 4, 106, 30, },
+ { 1, 1, 2, 4, 106, 30, },
+ { 0, 1, 2, 4, 122, 36, },
+ { 2, 1, 2, 4, 122, 30, },
+ { 1, 1, 2, 4, 122, 30, },
+ { 0, 1, 2, 4, 138, 36, },
+ { 2, 1, 2, 4, 138, 63, },
+ { 1, 1, 2, 4, 138, 63, },
+ { 0, 1, 2, 4, 155, 36, },
+ { 2, 1, 2, 4, 155, 63, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 24, },
+ { 2, 1, 2, 5, 42, 20, },
+ { 1, 1, 2, 5, 42, 22, },
+ { 0, 1, 2, 5, 58, 24, },
+ { 2, 1, 2, 5, 58, 20, },
+ { 1, 1, 2, 5, 58, 22, },
+ { 0, 1, 2, 5, 106, 26, },
+ { 2, 1, 2, 5, 106, 20, },
+ { 1, 1, 2, 5, 106, 30, },
+ { 0, 1, 2, 5, 122, 36, },
+ { 2, 1, 2, 5, 122, 20, },
+ { 1, 1, 2, 5, 122, 30, },
+ { 0, 1, 2, 5, 138, 36, },
+ { 2, 1, 2, 5, 138, 63, },
+ { 1, 1, 2, 5, 138, 63, },
+ { 0, 1, 2, 5, 155, 36, },
+ { 2, 1, 2, 5, 155, 63, },
+ { 1, 1, 2, 5, 155, 63 },
};
RTW_DECL_TABLE_TXPWR_LMT(rtw8822b_txpwr_lmt_type2);
-static const u8 rtw8822b_txpwr_lmt_type5[] = {
- 0, 0, 0, 0, 1, 32, 2, 0, 0, 0, 1, 28, 1, 0, 0, 0, 1, 30,
- 0, 0, 0, 0, 2, 32, 2, 0, 0, 0, 2, 28, 1, 0, 0, 0, 2, 30,
- 0, 0, 0, 0, 3, 32, 2, 0, 0, 0, 3, 28, 1, 0, 0, 0, 3, 30,
- 0, 0, 0, 0, 4, 32, 2, 0, 0, 0, 4, 28, 1, 0, 0, 0, 4, 30,
- 0, 0, 0, 0, 5, 32, 2, 0, 0, 0, 5, 28, 1, 0, 0, 0, 5, 30,
- 0, 0, 0, 0, 6, 32, 2, 0, 0, 0, 6, 28, 1, 0, 0, 0, 6, 30,
- 0, 0, 0, 0, 7, 32, 2, 0, 0, 0, 7, 28, 1, 0, 0, 0, 7, 30,
- 0, 0, 0, 0, 8, 32, 2, 0, 0, 0, 8, 28, 1, 0, 0, 0, 8, 30,
- 0, 0, 0, 0, 9, 32, 2, 0, 0, 0, 9, 28, 1, 0, 0, 0, 9, 30,
- 0, 0, 0, 0, 10, 32, 2, 0, 0, 0, 10, 28, 1, 0, 0, 0, 10, 30,
- 0, 0, 0, 0, 11, 32, 2, 0, 0, 0, 11, 28, 1, 0, 0, 0, 11, 30,
- 0, 0, 0, 0, 12, 26, 2, 0, 0, 0, 12, 28, 1, 0, 0, 0, 12, 30,
- 0, 0, 0, 0, 13, 20, 2, 0, 0, 0, 13, 28, 1, 0, 0, 0, 13, 28,
- 0, 0, 0, 0, 14, 63, 2, 0, 0, 0, 14, 63, 1, 0, 0, 0, 14, 32,
- 0, 0, 0, 1, 1, 26, 2, 0, 0, 1, 1, 30, 1, 0, 0, 1, 1, 34,
- 0, 0, 0, 1, 2, 30, 2, 0, 0, 1, 2, 30, 1, 0, 0, 1, 2, 34,
- 0, 0, 0, 1, 3, 32, 2, 0, 0, 1, 3, 30, 1, 0, 0, 1, 3, 34,
- 0, 0, 0, 1, 4, 34, 2, 0, 0, 1, 4, 30, 1, 0, 0, 1, 4, 34,
- 0, 0, 0, 1, 5, 34, 2, 0, 0, 1, 5, 30, 1, 0, 0, 1, 5, 34,
- 0, 0, 0, 1, 6, 34, 2, 0, 0, 1, 6, 30, 1, 0, 0, 1, 6, 34,
- 0, 0, 0, 1, 7, 34, 2, 0, 0, 1, 7, 30, 1, 0, 0, 1, 7, 34,
- 0, 0, 0, 1, 8, 34, 2, 0, 0, 1, 8, 30, 1, 0, 0, 1, 8, 34,
- 0, 0, 0, 1, 9, 32, 2, 0, 0, 1, 9, 30, 1, 0, 0, 1, 9, 34,
- 0, 0, 0, 1, 10, 30, 2, 0, 0, 1, 10, 30, 1, 0, 0, 1, 10, 34,
- 0, 0, 0, 1, 11, 28, 2, 0, 0, 1, 11, 30, 1, 0, 0, 1, 11, 34,
- 0, 0, 0, 1, 12, 22, 2, 0, 0, 1, 12, 30, 1, 0, 0, 1, 12, 34,
- 0, 0, 0, 1, 13, 14, 2, 0, 0, 1, 13, 30, 1, 0, 0, 1, 13, 34,
- 0, 0, 0, 1, 14, 63, 2, 0, 0, 1, 14, 63, 1, 0, 0, 1, 14, 63,
- 0, 0, 0, 2, 1, 26, 2, 0, 0, 2, 1, 30, 1, 0, 0, 2, 1, 34,
- 0, 0, 0, 2, 2, 30, 2, 0, 0, 2, 2, 30, 1, 0, 0, 2, 2, 34,
- 0, 0, 0, 2, 3, 32, 2, 0, 0, 2, 3, 30, 1, 0, 0, 2, 3, 34,
- 0, 0, 0, 2, 4, 34, 2, 0, 0, 2, 4, 30, 1, 0, 0, 2, 4, 34,
- 0, 0, 0, 2, 5, 34, 2, 0, 0, 2, 5, 30, 1, 0, 0, 2, 5, 34,
- 0, 0, 0, 2, 6, 34, 2, 0, 0, 2, 6, 30, 1, 0, 0, 2, 6, 34,
- 0, 0, 0, 2, 7, 34, 2, 0, 0, 2, 7, 30, 1, 0, 0, 2, 7, 34,
- 0, 0, 0, 2, 8, 34, 2, 0, 0, 2, 8, 30, 1, 0, 0, 2, 8, 34,
- 0, 0, 0, 2, 9, 32, 2, 0, 0, 2, 9, 30, 1, 0, 0, 2, 9, 34,
- 0, 0, 0, 2, 10, 30, 2, 0, 0, 2, 10, 30, 1, 0, 0, 2, 10, 34,
- 0, 0, 0, 2, 11, 26, 2, 0, 0, 2, 11, 30, 1, 0, 0, 2, 11, 34,
- 0, 0, 0, 2, 12, 20, 2, 0, 0, 2, 12, 30, 1, 0, 0, 2, 12, 34,
- 0, 0, 0, 2, 13, 14, 2, 0, 0, 2, 13, 30, 1, 0, 0, 2, 13, 34,
- 0, 0, 0, 2, 14, 63, 2, 0, 0, 2, 14, 63, 1, 0, 0, 2, 14, 63,
- 0, 0, 0, 3, 1, 26, 2, 0, 0, 3, 1, 18, 1, 0, 0, 3, 1, 30,
- 0, 0, 0, 3, 2, 28, 2, 0, 0, 3, 2, 18, 1, 0, 0, 3, 2, 30,
- 0, 0, 0, 3, 3, 30, 2, 0, 0, 3, 3, 18, 1, 0, 0, 3, 3, 30,
- 0, 0, 0, 3, 4, 30, 2, 0, 0, 3, 4, 18, 1, 0, 0, 3, 4, 30,
- 0, 0, 0, 3, 5, 32, 2, 0, 0, 3, 5, 18, 1, 0, 0, 3, 5, 30,
- 0, 0, 0, 3, 6, 32, 2, 0, 0, 3, 6, 18, 1, 0, 0, 3, 6, 30,
- 0, 0, 0, 3, 7, 32, 2, 0, 0, 3, 7, 18, 1, 0, 0, 3, 7, 30,
- 0, 0, 0, 3, 8, 30, 2, 0, 0, 3, 8, 18, 1, 0, 0, 3, 8, 30,
- 0, 0, 0, 3, 9, 30, 2, 0, 0, 3, 9, 18, 1, 0, 0, 3, 9, 30,
- 0, 0, 0, 3, 10, 28, 2, 0, 0, 3, 10, 18, 1, 0, 0, 3, 10, 30,
- 0, 0, 0, 3, 11, 26, 2, 0, 0, 3, 11, 18, 1, 0, 0, 3, 11, 30,
- 0, 0, 0, 3, 12, 20, 2, 0, 0, 3, 12, 18, 1, 0, 0, 3, 12, 30,
- 0, 0, 0, 3, 13, 14, 2, 0, 0, 3, 13, 18, 1, 0, 0, 3, 13, 30,
- 0, 0, 0, 3, 14, 63, 2, 0, 0, 3, 14, 63, 1, 0, 0, 3, 14, 63,
- 0, 0, 1, 2, 1, 63, 2, 0, 1, 2, 1, 63, 1, 0, 1, 2, 1, 63,
- 0, 0, 1, 2, 2, 63, 2, 0, 1, 2, 2, 63, 1, 0, 1, 2, 2, 63,
- 0, 0, 1, 2, 3, 26, 2, 0, 1, 2, 3, 30, 1, 0, 1, 2, 3, 34,
- 0, 0, 1, 2, 4, 26, 2, 0, 1, 2, 4, 30, 1, 0, 1, 2, 4, 34,
- 0, 0, 1, 2, 5, 30, 2, 0, 1, 2, 5, 30, 1, 0, 1, 2, 5, 34,
- 0, 0, 1, 2, 6, 32, 2, 0, 1, 2, 6, 30, 1, 0, 1, 2, 6, 34,
- 0, 0, 1, 2, 7, 30, 2, 0, 1, 2, 7, 30, 1, 0, 1, 2, 7, 34,
- 0, 0, 1, 2, 8, 26, 2, 0, 1, 2, 8, 30, 1, 0, 1, 2, 8, 34,
- 0, 0, 1, 2, 9, 26, 2, 0, 1, 2, 9, 30, 1, 0, 1, 2, 9, 34,
- 0, 0, 1, 2, 10, 20, 2, 0, 1, 2, 10, 30, 1, 0, 1, 2, 10, 34,
- 0, 0, 1, 2, 11, 14, 2, 0, 1, 2, 11, 30, 1, 0, 1, 2, 11, 34,
- 0, 0, 1, 2, 12, 63, 2, 0, 1, 2, 12, 63, 1, 0, 1, 2, 12, 63,
- 0, 0, 1, 2, 13, 63, 2, 0, 1, 2, 13, 63, 1, 0, 1, 2, 13, 63,
- 0, 0, 1, 2, 14, 63, 2, 0, 1, 2, 14, 63, 1, 0, 1, 2, 14, 63,
- 0, 0, 1, 3, 1, 63, 2, 0, 1, 3, 1, 63, 1, 0, 1, 3, 1, 63,
- 0, 0, 1, 3, 2, 63, 2, 0, 1, 3, 2, 63, 1, 0, 1, 3, 2, 63,
- 0, 0, 1, 3, 3, 24, 2, 0, 1, 3, 3, 18, 1, 0, 1, 3, 3, 30,
- 0, 0, 1, 3, 4, 24, 2, 0, 1, 3, 4, 18, 1, 0, 1, 3, 4, 30,
- 0, 0, 1, 3, 5, 26, 2, 0, 1, 3, 5, 18, 1, 0, 1, 3, 5, 30,
- 0, 0, 1, 3, 6, 28, 2, 0, 1, 3, 6, 18, 1, 0, 1, 3, 6, 30,
- 0, 0, 1, 3, 7, 26, 2, 0, 1, 3, 7, 18, 1, 0, 1, 3, 7, 30,
- 0, 0, 1, 3, 8, 26, 2, 0, 1, 3, 8, 18, 1, 0, 1, 3, 8, 30,
- 0, 0, 1, 3, 9, 26, 2, 0, 1, 3, 9, 18, 1, 0, 1, 3, 9, 30,
- 0, 0, 1, 3, 10, 20, 2, 0, 1, 3, 10, 18, 1, 0, 1, 3, 10, 30,
- 0, 0, 1, 3, 11, 14, 2, 0, 1, 3, 11, 18, 1, 0, 1, 3, 11, 30,
- 0, 0, 1, 3, 12, 63, 2, 0, 1, 3, 12, 63, 1, 0, 1, 3, 12, 63,
- 0, 0, 1, 3, 13, 63, 2, 0, 1, 3, 13, 63, 1, 0, 1, 3, 13, 63,
- 0, 0, 1, 3, 14, 63, 2, 0, 1, 3, 14, 63, 1, 0, 1, 3, 14, 63,
- 0, 1, 0, 1, 36, 30, 2, 1, 0, 1, 36, 32, 1, 1, 0, 1, 36, 30,
- 0, 1, 0, 1, 40, 32, 2, 1, 0, 1, 40, 32, 1, 1, 0, 1, 40, 30,
- 0, 1, 0, 1, 44, 32, 2, 1, 0, 1, 44, 32, 1, 1, 0, 1, 44, 30,
- 0, 1, 0, 1, 48, 32, 2, 1, 0, 1, 48, 32, 1, 1, 0, 1, 48, 30,
- 0, 1, 0, 1, 52, 32, 2, 1, 0, 1, 52, 32, 1, 1, 0, 1, 52, 28,
- 0, 1, 0, 1, 56, 32, 2, 1, 0, 1, 56, 32, 1, 1, 0, 1, 56, 28,
- 0, 1, 0, 1, 60, 32, 2, 1, 0, 1, 60, 32, 1, 1, 0, 1, 60, 28,
- 0, 1, 0, 1, 64, 28, 2, 1, 0, 1, 64, 32, 1, 1, 0, 1, 64, 28,
- 0, 1, 0, 1, 100, 26, 2, 1, 0, 1, 100, 32, 1, 1, 0, 1, 100, 32,
- 0, 1, 0, 1, 104, 32, 2, 1, 0, 1, 104, 32, 1, 1, 0, 1, 104, 32,
- 0, 1, 0, 1, 108, 32, 2, 1, 0, 1, 108, 32, 1, 1, 0, 1, 108, 32,
- 0, 1, 0, 1, 112, 32, 2, 1, 0, 1, 112, 32, 1, 1, 0, 1, 112, 32,
- 0, 1, 0, 1, 116, 32, 2, 1, 0, 1, 116, 32, 1, 1, 0, 1, 116, 32,
- 0, 1, 0, 1, 120, 32, 2, 1, 0, 1, 120, 32, 1, 1, 0, 1, 120, 32,
- 0, 1, 0, 1, 124, 32, 2, 1, 0, 1, 124, 32, 1, 1, 0, 1, 124, 32,
- 0, 1, 0, 1, 128, 32, 2, 1, 0, 1, 128, 32, 1, 1, 0, 1, 128, 32,
- 0, 1, 0, 1, 132, 32, 2, 1, 0, 1, 132, 32, 1, 1, 0, 1, 132, 32,
- 0, 1, 0, 1, 136, 32, 2, 1, 0, 1, 136, 32, 1, 1, 0, 1, 136, 32,
- 0, 1, 0, 1, 140, 28, 2, 1, 0, 1, 140, 32, 1, 1, 0, 1, 140, 32,
- 0, 1, 0, 1, 144, 28, 2, 1, 0, 1, 144, 63, 1, 1, 0, 1, 144, 63,
- 0, 1, 0, 1, 149, 32, 2, 1, 0, 1, 149, 63, 1, 1, 0, 1, 149, 63,
- 0, 1, 0, 1, 153, 32, 2, 1, 0, 1, 153, 63, 1, 1, 0, 1, 153, 63,
- 0, 1, 0, 1, 157, 32, 2, 1, 0, 1, 157, 63, 1, 1, 0, 1, 157, 63,
- 0, 1, 0, 1, 161, 32, 2, 1, 0, 1, 161, 63, 1, 1, 0, 1, 161, 63,
- 0, 1, 0, 1, 165, 32, 2, 1, 0, 1, 165, 63, 1, 1, 0, 1, 165, 63,
- 0, 1, 0, 2, 36, 30, 2, 1, 0, 2, 36, 32, 1, 1, 0, 2, 36, 28,
- 0, 1, 0, 2, 40, 32, 2, 1, 0, 2, 40, 32, 1, 1, 0, 2, 40, 28,
- 0, 1, 0, 2, 44, 32, 2, 1, 0, 2, 44, 32, 1, 1, 0, 2, 44, 28,
- 0, 1, 0, 2, 48, 32, 2, 1, 0, 2, 48, 32, 1, 1, 0, 2, 48, 28,
- 0, 1, 0, 2, 52, 32, 2, 1, 0, 2, 52, 32, 1, 1, 0, 2, 52, 28,
- 0, 1, 0, 2, 56, 32, 2, 1, 0, 2, 56, 32, 1, 1, 0, 2, 56, 28,
- 0, 1, 0, 2, 60, 32, 2, 1, 0, 2, 60, 32, 1, 1, 0, 2, 60, 28,
- 0, 1, 0, 2, 64, 28, 2, 1, 0, 2, 64, 32, 1, 1, 0, 2, 64, 28,
- 0, 1, 0, 2, 100, 26, 2, 1, 0, 2, 100, 32, 1, 1, 0, 2, 100, 32,
- 0, 1, 0, 2, 104, 32, 2, 1, 0, 2, 104, 32, 1, 1, 0, 2, 104, 32,
- 0, 1, 0, 2, 108, 32, 2, 1, 0, 2, 108, 32, 1, 1, 0, 2, 108, 32,
- 0, 1, 0, 2, 112, 32, 2, 1, 0, 2, 112, 32, 1, 1, 0, 2, 112, 32,
- 0, 1, 0, 2, 116, 32, 2, 1, 0, 2, 116, 32, 1, 1, 0, 2, 116, 32,
- 0, 1, 0, 2, 120, 32, 2, 1, 0, 2, 120, 32, 1, 1, 0, 2, 120, 32,
- 0, 1, 0, 2, 124, 32, 2, 1, 0, 2, 124, 32, 1, 1, 0, 2, 124, 32,
- 0, 1, 0, 2, 128, 32, 2, 1, 0, 2, 128, 32, 1, 1, 0, 2, 128, 32,
- 0, 1, 0, 2, 132, 32, 2, 1, 0, 2, 132, 32, 1, 1, 0, 2, 132, 32,
- 0, 1, 0, 2, 136, 32, 2, 1, 0, 2, 136, 32, 1, 1, 0, 2, 136, 32,
- 0, 1, 0, 2, 140, 26, 2, 1, 0, 2, 140, 32, 1, 1, 0, 2, 140, 32,
- 0, 1, 0, 2, 144, 26, 2, 1, 0, 2, 144, 63, 1, 1, 0, 2, 144, 63,
- 0, 1, 0, 2, 149, 32, 2, 1, 0, 2, 149, 63, 1, 1, 0, 2, 149, 63,
- 0, 1, 0, 2, 153, 32, 2, 1, 0, 2, 153, 63, 1, 1, 0, 2, 153, 63,
- 0, 1, 0, 2, 157, 32, 2, 1, 0, 2, 157, 63, 1, 1, 0, 2, 157, 63,
- 0, 1, 0, 2, 161, 32, 2, 1, 0, 2, 161, 63, 1, 1, 0, 2, 161, 63,
- 0, 1, 0, 2, 165, 32, 2, 1, 0, 2, 165, 63, 1, 1, 0, 2, 165, 63,
- 0, 1, 0, 3, 36, 28, 2, 1, 0, 3, 36, 20, 1, 1, 0, 3, 36, 22,
- 0, 1, 0, 3, 40, 30, 2, 1, 0, 3, 40, 20, 1, 1, 0, 3, 40, 22,
- 0, 1, 0, 3, 44, 30, 2, 1, 0, 3, 44, 20, 1, 1, 0, 3, 44, 22,
- 0, 1, 0, 3, 48, 30, 2, 1, 0, 3, 48, 20, 1, 1, 0, 3, 48, 22,
- 0, 1, 0, 3, 52, 30, 2, 1, 0, 3, 52, 20, 1, 1, 0, 3, 52, 22,
- 0, 1, 0, 3, 56, 30, 2, 1, 0, 3, 56, 20, 1, 1, 0, 3, 56, 22,
- 0, 1, 0, 3, 60, 30, 2, 1, 0, 3, 60, 20, 1, 1, 0, 3, 60, 22,
- 0, 1, 0, 3, 64, 28, 2, 1, 0, 3, 64, 20, 1, 1, 0, 3, 64, 22,
- 0, 1, 0, 3, 100, 26, 2, 1, 0, 3, 100, 20, 1, 1, 0, 3, 100, 30,
- 0, 1, 0, 3, 104, 30, 2, 1, 0, 3, 104, 20, 1, 1, 0, 3, 104, 30,
- 0, 1, 0, 3, 108, 32, 2, 1, 0, 3, 108, 20, 1, 1, 0, 3, 108, 30,
- 0, 1, 0, 3, 112, 32, 2, 1, 0, 3, 112, 20, 1, 1, 0, 3, 112, 30,
- 0, 1, 0, 3, 116, 32, 2, 1, 0, 3, 116, 20, 1, 1, 0, 3, 116, 30,
- 0, 1, 0, 3, 120, 32, 2, 1, 0, 3, 120, 20, 1, 1, 0, 3, 120, 30,
- 0, 1, 0, 3, 124, 32, 2, 1, 0, 3, 124, 20, 1, 1, 0, 3, 124, 30,
- 0, 1, 0, 3, 128, 32, 2, 1, 0, 3, 128, 20, 1, 1, 0, 3, 128, 30,
- 0, 1, 0, 3, 132, 32, 2, 1, 0, 3, 132, 20, 1, 1, 0, 3, 132, 30,
- 0, 1, 0, 3, 136, 30, 2, 1, 0, 3, 136, 20, 1, 1, 0, 3, 136, 30,
- 0, 1, 0, 3, 140, 26, 2, 1, 0, 3, 140, 20, 1, 1, 0, 3, 140, 30,
- 0, 1, 0, 3, 144, 26, 2, 1, 0, 3, 144, 63, 1, 1, 0, 3, 144, 63,
- 0, 1, 0, 3, 149, 32, 2, 1, 0, 3, 149, 63, 1, 1, 0, 3, 149, 63,
- 0, 1, 0, 3, 153, 32, 2, 1, 0, 3, 153, 63, 1, 1, 0, 3, 153, 63,
- 0, 1, 0, 3, 157, 32, 2, 1, 0, 3, 157, 63, 1, 1, 0, 3, 157, 63,
- 0, 1, 0, 3, 161, 32, 2, 1, 0, 3, 161, 63, 1, 1, 0, 3, 161, 63,
- 0, 1, 0, 3, 165, 32, 2, 1, 0, 3, 165, 63, 1, 1, 0, 3, 165, 63,
- 0, 1, 1, 2, 38, 22, 2, 1, 1, 2, 38, 30, 1, 1, 1, 2, 38, 30,
- 0, 1, 1, 2, 46, 30, 2, 1, 1, 2, 46, 30, 1, 1, 1, 2, 46, 30,
- 0, 1, 1, 2, 54, 30, 2, 1, 1, 2, 54, 30, 1, 1, 1, 2, 54, 30,
- 0, 1, 1, 2, 62, 24, 2, 1, 1, 2, 62, 30, 1, 1, 1, 2, 62, 30,
- 0, 1, 1, 2, 102, 24, 2, 1, 1, 2, 102, 30, 1, 1, 1, 2, 102, 30,
- 0, 1, 1, 2, 110, 30, 2, 1, 1, 2, 110, 30, 1, 1, 1, 2, 110, 30,
- 0, 1, 1, 2, 118, 30, 2, 1, 1, 2, 118, 30, 1, 1, 1, 2, 118, 30,
- 0, 1, 1, 2, 126, 30, 2, 1, 1, 2, 126, 30, 1, 1, 1, 2, 126, 30,
- 0, 1, 1, 2, 134, 30, 2, 1, 1, 2, 134, 30, 1, 1, 1, 2, 134, 30,
- 0, 1, 1, 2, 142, 30, 2, 1, 1, 2, 142, 63, 1, 1, 1, 2, 142, 63,
- 0, 1, 1, 2, 151, 30, 2, 1, 1, 2, 151, 63, 1, 1, 1, 2, 151, 63,
- 0, 1, 1, 2, 159, 30, 2, 1, 1, 2, 159, 63, 1, 1, 1, 2, 159, 63,
- 0, 1, 1, 3, 38, 20, 2, 1, 1, 3, 38, 20, 1, 1, 1, 3, 38, 22,
- 0, 1, 1, 3, 46, 30, 2, 1, 1, 3, 46, 20, 1, 1, 1, 3, 46, 22,
- 0, 1, 1, 3, 54, 30, 2, 1, 1, 3, 54, 20, 1, 1, 1, 3, 54, 22,
- 0, 1, 1, 3, 62, 22, 2, 1, 1, 3, 62, 20, 1, 1, 1, 3, 62, 22,
- 0, 1, 1, 3, 102, 22, 2, 1, 1, 3, 102, 20, 1, 1, 1, 3, 102, 30,
- 0, 1, 1, 3, 110, 30, 2, 1, 1, 3, 110, 20, 1, 1, 1, 3, 110, 30,
- 0, 1, 1, 3, 118, 30, 2, 1, 1, 3, 118, 20, 1, 1, 1, 3, 118, 30,
- 0, 1, 1, 3, 126, 30, 2, 1, 1, 3, 126, 20, 1, 1, 1, 3, 126, 30,
- 0, 1, 1, 3, 134, 30, 2, 1, 1, 3, 134, 20, 1, 1, 1, 3, 134, 30,
- 0, 1, 1, 3, 142, 30, 2, 1, 1, 3, 142, 63, 1, 1, 1, 3, 142, 63,
- 0, 1, 1, 3, 151, 30, 2, 1, 1, 3, 151, 63, 1, 1, 1, 3, 151, 63,
- 0, 1, 1, 3, 159, 30, 2, 1, 1, 3, 159, 63, 1, 1, 1, 3, 159, 63,
- 0, 1, 2, 4, 42, 20, 2, 1, 2, 4, 42, 30, 1, 1, 2, 4, 42, 28,
- 0, 1, 2, 4, 58, 20, 2, 1, 2, 4, 58, 30, 1, 1, 2, 4, 58, 28,
- 0, 1, 2, 4, 106, 20, 2, 1, 2, 4, 106, 30, 1, 1, 2, 4, 106, 30,
- 0, 1, 2, 4, 122, 30, 2, 1, 2, 4, 122, 30, 1, 1, 2, 4, 122, 30,
- 0, 1, 2, 4, 138, 30, 2, 1, 2, 4, 138, 63, 1, 1, 2, 4, 138, 63,
- 0, 1, 2, 4, 155, 30, 2, 1, 2, 4, 155, 63, 1, 1, 2, 4, 155, 63,
- 0, 1, 2, 5, 42, 18, 2, 1, 2, 5, 42, 20, 1, 1, 2, 5, 42, 22,
- 0, 1, 2, 5, 58, 18, 2, 1, 2, 5, 58, 20, 1, 1, 2, 5, 58, 22,
- 0, 1, 2, 5, 106, 20, 2, 1, 2, 5, 106, 20, 1, 1, 2, 5, 106, 30,
- 0, 1, 2, 5, 122, 30, 2, 1, 2, 5, 122, 20, 1, 1, 2, 5, 122, 30,
- 0, 1, 2, 5, 138, 30, 2, 1, 2, 5, 138, 63, 1, 1, 2, 5, 138, 63,
- 0, 1, 2, 5, 155, 30, 2, 1, 2, 5, 155, 63, 1, 1, 2, 5, 155, 63,
+static const struct rtw_txpwr_lmt_cfg_pair rtw8822b_txpwr_lmt_type5[] = {
+ { 0, 0, 0, 0, 1, 32, },
+ { 2, 0, 0, 0, 1, 28, },
+ { 1, 0, 0, 0, 1, 30, },
+ { 0, 0, 0, 0, 2, 32, },
+ { 2, 0, 0, 0, 2, 28, },
+ { 1, 0, 0, 0, 2, 30, },
+ { 0, 0, 0, 0, 3, 32, },
+ { 2, 0, 0, 0, 3, 28, },
+ { 1, 0, 0, 0, 3, 30, },
+ { 0, 0, 0, 0, 4, 32, },
+ { 2, 0, 0, 0, 4, 28, },
+ { 1, 0, 0, 0, 4, 30, },
+ { 0, 0, 0, 0, 5, 32, },
+ { 2, 0, 0, 0, 5, 28, },
+ { 1, 0, 0, 0, 5, 30, },
+ { 0, 0, 0, 0, 6, 32, },
+ { 2, 0, 0, 0, 6, 28, },
+ { 1, 0, 0, 0, 6, 30, },
+ { 0, 0, 0, 0, 7, 32, },
+ { 2, 0, 0, 0, 7, 28, },
+ { 1, 0, 0, 0, 7, 30, },
+ { 0, 0, 0, 0, 8, 32, },
+ { 2, 0, 0, 0, 8, 28, },
+ { 1, 0, 0, 0, 8, 30, },
+ { 0, 0, 0, 0, 9, 32, },
+ { 2, 0, 0, 0, 9, 28, },
+ { 1, 0, 0, 0, 9, 30, },
+ { 0, 0, 0, 0, 10, 32, },
+ { 2, 0, 0, 0, 10, 28, },
+ { 1, 0, 0, 0, 10, 30, },
+ { 0, 0, 0, 0, 11, 32, },
+ { 2, 0, 0, 0, 11, 28, },
+ { 1, 0, 0, 0, 11, 30, },
+ { 0, 0, 0, 0, 12, 26, },
+ { 2, 0, 0, 0, 12, 28, },
+ { 1, 0, 0, 0, 12, 30, },
+ { 0, 0, 0, 0, 13, 20, },
+ { 2, 0, 0, 0, 13, 28, },
+ { 1, 0, 0, 0, 13, 28, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 32, },
+ { 0, 0, 0, 1, 1, 26, },
+ { 2, 0, 0, 1, 1, 30, },
+ { 1, 0, 0, 1, 1, 34, },
+ { 0, 0, 0, 1, 2, 30, },
+ { 2, 0, 0, 1, 2, 30, },
+ { 1, 0, 0, 1, 2, 34, },
+ { 0, 0, 0, 1, 3, 32, },
+ { 2, 0, 0, 1, 3, 30, },
+ { 1, 0, 0, 1, 3, 34, },
+ { 0, 0, 0, 1, 4, 34, },
+ { 2, 0, 0, 1, 4, 30, },
+ { 1, 0, 0, 1, 4, 34, },
+ { 0, 0, 0, 1, 5, 34, },
+ { 2, 0, 0, 1, 5, 30, },
+ { 1, 0, 0, 1, 5, 34, },
+ { 0, 0, 0, 1, 6, 34, },
+ { 2, 0, 0, 1, 6, 30, },
+ { 1, 0, 0, 1, 6, 34, },
+ { 0, 0, 0, 1, 7, 34, },
+ { 2, 0, 0, 1, 7, 30, },
+ { 1, 0, 0, 1, 7, 34, },
+ { 0, 0, 0, 1, 8, 34, },
+ { 2, 0, 0, 1, 8, 30, },
+ { 1, 0, 0, 1, 8, 34, },
+ { 0, 0, 0, 1, 9, 32, },
+ { 2, 0, 0, 1, 9, 30, },
+ { 1, 0, 0, 1, 9, 34, },
+ { 0, 0, 0, 1, 10, 30, },
+ { 2, 0, 0, 1, 10, 30, },
+ { 1, 0, 0, 1, 10, 34, },
+ { 0, 0, 0, 1, 11, 28, },
+ { 2, 0, 0, 1, 11, 30, },
+ { 1, 0, 0, 1, 11, 34, },
+ { 0, 0, 0, 1, 12, 22, },
+ { 2, 0, 0, 1, 12, 30, },
+ { 1, 0, 0, 1, 12, 34, },
+ { 0, 0, 0, 1, 13, 14, },
+ { 2, 0, 0, 1, 13, 30, },
+ { 1, 0, 0, 1, 13, 34, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 26, },
+ { 2, 0, 0, 2, 1, 30, },
+ { 1, 0, 0, 2, 1, 34, },
+ { 0, 0, 0, 2, 2, 30, },
+ { 2, 0, 0, 2, 2, 30, },
+ { 1, 0, 0, 2, 2, 34, },
+ { 0, 0, 0, 2, 3, 32, },
+ { 2, 0, 0, 2, 3, 30, },
+ { 1, 0, 0, 2, 3, 34, },
+ { 0, 0, 0, 2, 4, 34, },
+ { 2, 0, 0, 2, 4, 30, },
+ { 1, 0, 0, 2, 4, 34, },
+ { 0, 0, 0, 2, 5, 34, },
+ { 2, 0, 0, 2, 5, 30, },
+ { 1, 0, 0, 2, 5, 34, },
+ { 0, 0, 0, 2, 6, 34, },
+ { 2, 0, 0, 2, 6, 30, },
+ { 1, 0, 0, 2, 6, 34, },
+ { 0, 0, 0, 2, 7, 34, },
+ { 2, 0, 0, 2, 7, 30, },
+ { 1, 0, 0, 2, 7, 34, },
+ { 0, 0, 0, 2, 8, 34, },
+ { 2, 0, 0, 2, 8, 30, },
+ { 1, 0, 0, 2, 8, 34, },
+ { 0, 0, 0, 2, 9, 32, },
+ { 2, 0, 0, 2, 9, 30, },
+ { 1, 0, 0, 2, 9, 34, },
+ { 0, 0, 0, 2, 10, 30, },
+ { 2, 0, 0, 2, 10, 30, },
+ { 1, 0, 0, 2, 10, 34, },
+ { 0, 0, 0, 2, 11, 26, },
+ { 2, 0, 0, 2, 11, 30, },
+ { 1, 0, 0, 2, 11, 34, },
+ { 0, 0, 0, 2, 12, 20, },
+ { 2, 0, 0, 2, 12, 30, },
+ { 1, 0, 0, 2, 12, 34, },
+ { 0, 0, 0, 2, 13, 14, },
+ { 2, 0, 0, 2, 13, 30, },
+ { 1, 0, 0, 2, 13, 34, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 26, },
+ { 2, 0, 0, 3, 1, 18, },
+ { 1, 0, 0, 3, 1, 30, },
+ { 0, 0, 0, 3, 2, 28, },
+ { 2, 0, 0, 3, 2, 18, },
+ { 1, 0, 0, 3, 2, 30, },
+ { 0, 0, 0, 3, 3, 30, },
+ { 2, 0, 0, 3, 3, 18, },
+ { 1, 0, 0, 3, 3, 30, },
+ { 0, 0, 0, 3, 4, 30, },
+ { 2, 0, 0, 3, 4, 18, },
+ { 1, 0, 0, 3, 4, 30, },
+ { 0, 0, 0, 3, 5, 32, },
+ { 2, 0, 0, 3, 5, 18, },
+ { 1, 0, 0, 3, 5, 30, },
+ { 0, 0, 0, 3, 6, 32, },
+ { 2, 0, 0, 3, 6, 18, },
+ { 1, 0, 0, 3, 6, 30, },
+ { 0, 0, 0, 3, 7, 32, },
+ { 2, 0, 0, 3, 7, 18, },
+ { 1, 0, 0, 3, 7, 30, },
+ { 0, 0, 0, 3, 8, 30, },
+ { 2, 0, 0, 3, 8, 18, },
+ { 1, 0, 0, 3, 8, 30, },
+ { 0, 0, 0, 3, 9, 30, },
+ { 2, 0, 0, 3, 9, 18, },
+ { 1, 0, 0, 3, 9, 30, },
+ { 0, 0, 0, 3, 10, 28, },
+ { 2, 0, 0, 3, 10, 18, },
+ { 1, 0, 0, 3, 10, 30, },
+ { 0, 0, 0, 3, 11, 26, },
+ { 2, 0, 0, 3, 11, 18, },
+ { 1, 0, 0, 3, 11, 30, },
+ { 0, 0, 0, 3, 12, 20, },
+ { 2, 0, 0, 3, 12, 18, },
+ { 1, 0, 0, 3, 12, 30, },
+ { 0, 0, 0, 3, 13, 14, },
+ { 2, 0, 0, 3, 13, 18, },
+ { 1, 0, 0, 3, 13, 30, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 26, },
+ { 2, 0, 1, 2, 3, 30, },
+ { 1, 0, 1, 2, 3, 34, },
+ { 0, 0, 1, 2, 4, 26, },
+ { 2, 0, 1, 2, 4, 30, },
+ { 1, 0, 1, 2, 4, 34, },
+ { 0, 0, 1, 2, 5, 30, },
+ { 2, 0, 1, 2, 5, 30, },
+ { 1, 0, 1, 2, 5, 34, },
+ { 0, 0, 1, 2, 6, 32, },
+ { 2, 0, 1, 2, 6, 30, },
+ { 1, 0, 1, 2, 6, 34, },
+ { 0, 0, 1, 2, 7, 30, },
+ { 2, 0, 1, 2, 7, 30, },
+ { 1, 0, 1, 2, 7, 34, },
+ { 0, 0, 1, 2, 8, 26, },
+ { 2, 0, 1, 2, 8, 30, },
+ { 1, 0, 1, 2, 8, 34, },
+ { 0, 0, 1, 2, 9, 26, },
+ { 2, 0, 1, 2, 9, 30, },
+ { 1, 0, 1, 2, 9, 34, },
+ { 0, 0, 1, 2, 10, 20, },
+ { 2, 0, 1, 2, 10, 30, },
+ { 1, 0, 1, 2, 10, 34, },
+ { 0, 0, 1, 2, 11, 14, },
+ { 2, 0, 1, 2, 11, 30, },
+ { 1, 0, 1, 2, 11, 34, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 63, },
+ { 1, 0, 1, 2, 12, 63, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 63, },
+ { 1, 0, 1, 2, 13, 63, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 24, },
+ { 2, 0, 1, 3, 3, 18, },
+ { 1, 0, 1, 3, 3, 30, },
+ { 0, 0, 1, 3, 4, 24, },
+ { 2, 0, 1, 3, 4, 18, },
+ { 1, 0, 1, 3, 4, 30, },
+ { 0, 0, 1, 3, 5, 26, },
+ { 2, 0, 1, 3, 5, 18, },
+ { 1, 0, 1, 3, 5, 30, },
+ { 0, 0, 1, 3, 6, 28, },
+ { 2, 0, 1, 3, 6, 18, },
+ { 1, 0, 1, 3, 6, 30, },
+ { 0, 0, 1, 3, 7, 26, },
+ { 2, 0, 1, 3, 7, 18, },
+ { 1, 0, 1, 3, 7, 30, },
+ { 0, 0, 1, 3, 8, 26, },
+ { 2, 0, 1, 3, 8, 18, },
+ { 1, 0, 1, 3, 8, 30, },
+ { 0, 0, 1, 3, 9, 26, },
+ { 2, 0, 1, 3, 9, 18, },
+ { 1, 0, 1, 3, 9, 30, },
+ { 0, 0, 1, 3, 10, 20, },
+ { 2, 0, 1, 3, 10, 18, },
+ { 1, 0, 1, 3, 10, 30, },
+ { 0, 0, 1, 3, 11, 14, },
+ { 2, 0, 1, 3, 11, 18, },
+ { 1, 0, 1, 3, 11, 30, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 63, },
+ { 1, 0, 1, 3, 12, 63, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 63, },
+ { 1, 0, 1, 3, 13, 63, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 1, 0, 1, 36, 30, },
+ { 2, 1, 0, 1, 36, 32, },
+ { 1, 1, 0, 1, 36, 30, },
+ { 0, 1, 0, 1, 40, 32, },
+ { 2, 1, 0, 1, 40, 32, },
+ { 1, 1, 0, 1, 40, 30, },
+ { 0, 1, 0, 1, 44, 32, },
+ { 2, 1, 0, 1, 44, 32, },
+ { 1, 1, 0, 1, 44, 30, },
+ { 0, 1, 0, 1, 48, 32, },
+ { 2, 1, 0, 1, 48, 32, },
+ { 1, 1, 0, 1, 48, 30, },
+ { 0, 1, 0, 1, 52, 32, },
+ { 2, 1, 0, 1, 52, 32, },
+ { 1, 1, 0, 1, 52, 28, },
+ { 0, 1, 0, 1, 56, 32, },
+ { 2, 1, 0, 1, 56, 32, },
+ { 1, 1, 0, 1, 56, 28, },
+ { 0, 1, 0, 1, 60, 32, },
+ { 2, 1, 0, 1, 60, 32, },
+ { 1, 1, 0, 1, 60, 28, },
+ { 0, 1, 0, 1, 64, 28, },
+ { 2, 1, 0, 1, 64, 32, },
+ { 1, 1, 0, 1, 64, 28, },
+ { 0, 1, 0, 1, 100, 26, },
+ { 2, 1, 0, 1, 100, 32, },
+ { 1, 1, 0, 1, 100, 32, },
+ { 0, 1, 0, 1, 104, 32, },
+ { 2, 1, 0, 1, 104, 32, },
+ { 1, 1, 0, 1, 104, 32, },
+ { 0, 1, 0, 1, 108, 32, },
+ { 2, 1, 0, 1, 108, 32, },
+ { 1, 1, 0, 1, 108, 32, },
+ { 0, 1, 0, 1, 112, 32, },
+ { 2, 1, 0, 1, 112, 32, },
+ { 1, 1, 0, 1, 112, 32, },
+ { 0, 1, 0, 1, 116, 32, },
+ { 2, 1, 0, 1, 116, 32, },
+ { 1, 1, 0, 1, 116, 32, },
+ { 0, 1, 0, 1, 120, 32, },
+ { 2, 1, 0, 1, 120, 32, },
+ { 1, 1, 0, 1, 120, 32, },
+ { 0, 1, 0, 1, 124, 32, },
+ { 2, 1, 0, 1, 124, 32, },
+ { 1, 1, 0, 1, 124, 32, },
+ { 0, 1, 0, 1, 128, 32, },
+ { 2, 1, 0, 1, 128, 32, },
+ { 1, 1, 0, 1, 128, 32, },
+ { 0, 1, 0, 1, 132, 32, },
+ { 2, 1, 0, 1, 132, 32, },
+ { 1, 1, 0, 1, 132, 32, },
+ { 0, 1, 0, 1, 136, 32, },
+ { 2, 1, 0, 1, 136, 32, },
+ { 1, 1, 0, 1, 136, 32, },
+ { 0, 1, 0, 1, 140, 28, },
+ { 2, 1, 0, 1, 140, 32, },
+ { 1, 1, 0, 1, 140, 32, },
+ { 0, 1, 0, 1, 144, 28, },
+ { 2, 1, 0, 1, 144, 63, },
+ { 1, 1, 0, 1, 144, 63, },
+ { 0, 1, 0, 1, 149, 32, },
+ { 2, 1, 0, 1, 149, 63, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 32, },
+ { 2, 1, 0, 1, 153, 63, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 32, },
+ { 2, 1, 0, 1, 157, 63, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 32, },
+ { 2, 1, 0, 1, 161, 63, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 32, },
+ { 2, 1, 0, 1, 165, 63, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 30, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 28, },
+ { 0, 1, 0, 2, 40, 32, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 28, },
+ { 0, 1, 0, 2, 44, 32, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 28, },
+ { 0, 1, 0, 2, 48, 32, },
+ { 2, 1, 0, 2, 48, 32, },
+ { 1, 1, 0, 2, 48, 28, },
+ { 0, 1, 0, 2, 52, 32, },
+ { 2, 1, 0, 2, 52, 32, },
+ { 1, 1, 0, 2, 52, 28, },
+ { 0, 1, 0, 2, 56, 32, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 28, },
+ { 0, 1, 0, 2, 60, 32, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 28, },
+ { 0, 1, 0, 2, 64, 28, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 28, },
+ { 0, 1, 0, 2, 100, 26, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 32, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 32, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 32, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 32, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 32, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 32, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 32, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 32, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 32, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 26, },
+ { 2, 1, 0, 2, 140, 32, },
+ { 1, 1, 0, 2, 140, 32, },
+ { 0, 1, 0, 2, 144, 26, },
+ { 2, 1, 0, 2, 144, 63, },
+ { 1, 1, 0, 2, 144, 63, },
+ { 0, 1, 0, 2, 149, 32, },
+ { 2, 1, 0, 2, 149, 63, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 32, },
+ { 2, 1, 0, 2, 153, 63, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 32, },
+ { 2, 1, 0, 2, 157, 63, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 32, },
+ { 2, 1, 0, 2, 161, 63, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 32, },
+ { 2, 1, 0, 2, 165, 63, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 28, },
+ { 2, 1, 0, 3, 36, 20, },
+ { 1, 1, 0, 3, 36, 22, },
+ { 0, 1, 0, 3, 40, 30, },
+ { 2, 1, 0, 3, 40, 20, },
+ { 1, 1, 0, 3, 40, 22, },
+ { 0, 1, 0, 3, 44, 30, },
+ { 2, 1, 0, 3, 44, 20, },
+ { 1, 1, 0, 3, 44, 22, },
+ { 0, 1, 0, 3, 48, 30, },
+ { 2, 1, 0, 3, 48, 20, },
+ { 1, 1, 0, 3, 48, 22, },
+ { 0, 1, 0, 3, 52, 30, },
+ { 2, 1, 0, 3, 52, 20, },
+ { 1, 1, 0, 3, 52, 22, },
+ { 0, 1, 0, 3, 56, 30, },
+ { 2, 1, 0, 3, 56, 20, },
+ { 1, 1, 0, 3, 56, 22, },
+ { 0, 1, 0, 3, 60, 30, },
+ { 2, 1, 0, 3, 60, 20, },
+ { 1, 1, 0, 3, 60, 22, },
+ { 0, 1, 0, 3, 64, 28, },
+ { 2, 1, 0, 3, 64, 20, },
+ { 1, 1, 0, 3, 64, 22, },
+ { 0, 1, 0, 3, 100, 26, },
+ { 2, 1, 0, 3, 100, 20, },
+ { 1, 1, 0, 3, 100, 30, },
+ { 0, 1, 0, 3, 104, 30, },
+ { 2, 1, 0, 3, 104, 20, },
+ { 1, 1, 0, 3, 104, 30, },
+ { 0, 1, 0, 3, 108, 32, },
+ { 2, 1, 0, 3, 108, 20, },
+ { 1, 1, 0, 3, 108, 30, },
+ { 0, 1, 0, 3, 112, 32, },
+ { 2, 1, 0, 3, 112, 20, },
+ { 1, 1, 0, 3, 112, 30, },
+ { 0, 1, 0, 3, 116, 32, },
+ { 2, 1, 0, 3, 116, 20, },
+ { 1, 1, 0, 3, 116, 30, },
+ { 0, 1, 0, 3, 120, 32, },
+ { 2, 1, 0, 3, 120, 20, },
+ { 1, 1, 0, 3, 120, 30, },
+ { 0, 1, 0, 3, 124, 32, },
+ { 2, 1, 0, 3, 124, 20, },
+ { 1, 1, 0, 3, 124, 30, },
+ { 0, 1, 0, 3, 128, 32, },
+ { 2, 1, 0, 3, 128, 20, },
+ { 1, 1, 0, 3, 128, 30, },
+ { 0, 1, 0, 3, 132, 32, },
+ { 2, 1, 0, 3, 132, 20, },
+ { 1, 1, 0, 3, 132, 30, },
+ { 0, 1, 0, 3, 136, 30, },
+ { 2, 1, 0, 3, 136, 20, },
+ { 1, 1, 0, 3, 136, 30, },
+ { 0, 1, 0, 3, 140, 26, },
+ { 2, 1, 0, 3, 140, 20, },
+ { 1, 1, 0, 3, 140, 30, },
+ { 0, 1, 0, 3, 144, 26, },
+ { 2, 1, 0, 3, 144, 63, },
+ { 1, 1, 0, 3, 144, 63, },
+ { 0, 1, 0, 3, 149, 32, },
+ { 2, 1, 0, 3, 149, 63, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 32, },
+ { 2, 1, 0, 3, 153, 63, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 32, },
+ { 2, 1, 0, 3, 157, 63, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 32, },
+ { 2, 1, 0, 3, 161, 63, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 32, },
+ { 2, 1, 0, 3, 165, 63, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 1, 2, 38, 22, },
+ { 2, 1, 1, 2, 38, 30, },
+ { 1, 1, 1, 2, 38, 30, },
+ { 0, 1, 1, 2, 46, 30, },
+ { 2, 1, 1, 2, 46, 30, },
+ { 1, 1, 1, 2, 46, 30, },
+ { 0, 1, 1, 2, 54, 30, },
+ { 2, 1, 1, 2, 54, 30, },
+ { 1, 1, 1, 2, 54, 30, },
+ { 0, 1, 1, 2, 62, 24, },
+ { 2, 1, 1, 2, 62, 30, },
+ { 1, 1, 1, 2, 62, 30, },
+ { 0, 1, 1, 2, 102, 24, },
+ { 2, 1, 1, 2, 102, 30, },
+ { 1, 1, 1, 2, 102, 30, },
+ { 0, 1, 1, 2, 110, 30, },
+ { 2, 1, 1, 2, 110, 30, },
+ { 1, 1, 1, 2, 110, 30, },
+ { 0, 1, 1, 2, 118, 30, },
+ { 2, 1, 1, 2, 118, 30, },
+ { 1, 1, 1, 2, 118, 30, },
+ { 0, 1, 1, 2, 126, 30, },
+ { 2, 1, 1, 2, 126, 30, },
+ { 1, 1, 1, 2, 126, 30, },
+ { 0, 1, 1, 2, 134, 30, },
+ { 2, 1, 1, 2, 134, 30, },
+ { 1, 1, 1, 2, 134, 30, },
+ { 0, 1, 1, 2, 142, 30, },
+ { 2, 1, 1, 2, 142, 63, },
+ { 1, 1, 1, 2, 142, 63, },
+ { 0, 1, 1, 2, 151, 30, },
+ { 2, 1, 1, 2, 151, 63, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 30, },
+ { 2, 1, 1, 2, 159, 63, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 20, },
+ { 2, 1, 1, 3, 38, 20, },
+ { 1, 1, 1, 3, 38, 22, },
+ { 0, 1, 1, 3, 46, 30, },
+ { 2, 1, 1, 3, 46, 20, },
+ { 1, 1, 1, 3, 46, 22, },
+ { 0, 1, 1, 3, 54, 30, },
+ { 2, 1, 1, 3, 54, 20, },
+ { 1, 1, 1, 3, 54, 22, },
+ { 0, 1, 1, 3, 62, 22, },
+ { 2, 1, 1, 3, 62, 20, },
+ { 1, 1, 1, 3, 62, 22, },
+ { 0, 1, 1, 3, 102, 22, },
+ { 2, 1, 1, 3, 102, 20, },
+ { 1, 1, 1, 3, 102, 30, },
+ { 0, 1, 1, 3, 110, 30, },
+ { 2, 1, 1, 3, 110, 20, },
+ { 1, 1, 1, 3, 110, 30, },
+ { 0, 1, 1, 3, 118, 30, },
+ { 2, 1, 1, 3, 118, 20, },
+ { 1, 1, 1, 3, 118, 30, },
+ { 0, 1, 1, 3, 126, 30, },
+ { 2, 1, 1, 3, 126, 20, },
+ { 1, 1, 1, 3, 126, 30, },
+ { 0, 1, 1, 3, 134, 30, },
+ { 2, 1, 1, 3, 134, 20, },
+ { 1, 1, 1, 3, 134, 30, },
+ { 0, 1, 1, 3, 142, 30, },
+ { 2, 1, 1, 3, 142, 63, },
+ { 1, 1, 1, 3, 142, 63, },
+ { 0, 1, 1, 3, 151, 30, },
+ { 2, 1, 1, 3, 151, 63, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 30, },
+ { 2, 1, 1, 3, 159, 63, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 2, 4, 42, 20, },
+ { 2, 1, 2, 4, 42, 30, },
+ { 1, 1, 2, 4, 42, 28, },
+ { 0, 1, 2, 4, 58, 20, },
+ { 2, 1, 2, 4, 58, 30, },
+ { 1, 1, 2, 4, 58, 28, },
+ { 0, 1, 2, 4, 106, 20, },
+ { 2, 1, 2, 4, 106, 30, },
+ { 1, 1, 2, 4, 106, 30, },
+ { 0, 1, 2, 4, 122, 30, },
+ { 2, 1, 2, 4, 122, 30, },
+ { 1, 1, 2, 4, 122, 30, },
+ { 0, 1, 2, 4, 138, 30, },
+ { 2, 1, 2, 4, 138, 63, },
+ { 1, 1, 2, 4, 138, 63, },
+ { 0, 1, 2, 4, 155, 30, },
+ { 2, 1, 2, 4, 155, 63, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 18, },
+ { 2, 1, 2, 5, 42, 20, },
+ { 1, 1, 2, 5, 42, 22, },
+ { 0, 1, 2, 5, 58, 18, },
+ { 2, 1, 2, 5, 58, 20, },
+ { 1, 1, 2, 5, 58, 22, },
+ { 0, 1, 2, 5, 106, 20, },
+ { 2, 1, 2, 5, 106, 20, },
+ { 1, 1, 2, 5, 106, 30, },
+ { 0, 1, 2, 5, 122, 30, },
+ { 2, 1, 2, 5, 122, 20, },
+ { 1, 1, 2, 5, 122, 30, },
+ { 0, 1, 2, 5, 138, 30, },
+ { 2, 1, 2, 5, 138, 63, },
+ { 1, 1, 2, 5, 138, 63, },
+ { 0, 1, 2, 5, 155, 30, },
+ { 2, 1, 2, 5, 155, 63, },
+ { 1, 1, 2, 5, 155, 63, },
};
RTW_DECL_TABLE_TXPWR_LMT(rtw8822b_txpwr_lmt_type5);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index f6214ff20337..207f64cc3e55 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -3,6 +3,7 @@
*/
#include "main.h"
+#include "coex.h"
#include "fw.h"
#include "tx.h"
#include "rx.h"
@@ -31,6 +32,7 @@ static int rtw8822c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
map = (struct rtw8822c_efuse *)log_map;
efuse->rfe_option = map->rfe_option;
+ efuse->rf_board_option = map->rf_board_option;
efuse->crystal_cap = map->xtal_k;
efuse->channel_plan = map->channel_plan;
efuse->country_code[0] = map->country_code[0];
@@ -1041,12 +1043,6 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
dm_info->cck_gi_l_bnd = ((cck_gi_l_bnd_msb << 4) | (cck_gi_l_bnd_lsb));
rtw8822c_rf_init(rtwdev);
- /* wifi path controller */
- rtw_write32_mask(rtwdev, 0x70, 0xff000000, 0x0e);
- rtw_write32_mask(rtwdev, 0x1704, 0xffffffff, 0x7700);
- rtw_write32_mask(rtwdev, 0x1700, 0xffffffff, 0xc00f0038);
- rtw_write32_mask(rtwdev, 0x6c0, 0xffffffff, 0xaaaaaaaa);
- rtw_write32_mask(rtwdev, 0x6c4, 0xffffffff, 0xaaaaaaaa);
}
#define WLAN_TXQ_RPT_EN 0x1F
@@ -1817,6 +1813,7 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u32 cck_enable;
u32 cck_fa_cnt;
+ u32 crc32_cnt;
u32 ofdm_fa_cnt;
u32 ofdm_fa_cnt1, ofdm_fa_cnt2, ofdm_fa_cnt3, ofdm_fa_cnt4, ofdm_fa_cnt5;
u16 parity_fail, rate_illegal, crc8_fail, mcs_fail, sb_search_fail,
@@ -1848,6 +1845,19 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
dm_info->total_fa_cnt = ofdm_fa_cnt;
dm_info->total_fa_cnt += cck_enable ? cck_fa_cnt : 0;
+ crc32_cnt = rtw_read32(rtwdev, 0x2c04);
+ dm_info->cck_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->cck_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ crc32_cnt = rtw_read32(rtwdev, 0x2c14);
+ dm_info->ofdm_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->ofdm_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ crc32_cnt = rtw_read32(rtwdev, 0x2c10);
+ dm_info->ht_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->ht_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ crc32_cnt = rtw_read32(rtwdev, 0x2c0c);
+ dm_info->vht_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->vht_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 0);
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 2);
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_OFDM_FA_RST, 0);
@@ -1864,6 +1874,161 @@ static void rtw8822c_do_iqk(struct rtw_dev *rtwdev)
{
}
+/* for coex */
+static void rtw8822c_coex_cfg_init(struct rtw_dev *rtwdev)
+{
+ /* enable TBTT nterrupt */
+ rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
+
+ /* BT report packet sample rate */
+ /* 0x790[5:0]=0x5 */
+ rtw_write8_set(rtwdev, REG_BT_TDMA_TIME, 0x05);
+
+ /* enable BT counter statistics */
+ rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1);
+
+ /* enable PTA (3-wire function form BT side) */
+ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN);
+ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_AOD_GPIO3);
+
+ /* enable PTA (tx/rx signal form WiFi side) */
+ rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN);
+ /* wl tx signal to PTA not case EDCCA */
+ rtw_write8_clr(rtwdev, REG_QUEUE_CTRL, BIT_PTA_EDCCA_EN);
+ /* GNT_BT=1 while select both */
+ rtw_write8_set(rtwdev, REG_BT_COEX_V2, BIT_GNT_BT_POLARITY);
+ /* BT_CCA = ~GNT_WL_BB, (not or GNT_BT_BB, LTE_Rx */
+ rtw_write8_clr(rtwdev, REG_DUMMY_PAGE4_V1, BIT_BTCCA_CTRL);
+
+ /* to avoid RF parameter error */
+ rtw_write_rf(rtwdev, RF_PATH_B, 0x1, 0xfffff, 0x40000);
+}
+
+static void rtw8822c_coex_cfg_gnt_fix(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u32 rf_0x1;
+
+ if (coex_stat->gnt_workaround_state == coex_stat->wl_coex_mode)
+ return;
+
+ coex_stat->gnt_workaround_state = coex_stat->wl_coex_mode;
+
+ if ((coex_stat->kt_ver == 0 && coex->under_5g) || coex->freerun)
+ rf_0x1 = 0x40021;
+ else
+ rf_0x1 = 0x40000;
+
+ /* BT at S1 for Shared-Ant */
+ if (efuse->share_ant)
+ rf_0x1 |= BIT(13);
+
+ rtw_write_rf(rtwdev, RF_PATH_B, 0x1, 0xfffff, rf_0x1);
+
+ /* WL-S0 2G RF TRX cannot be masked by GNT_BT
+ * enable "WLS0 BB chage RF mode if GNT_BT = 1" for shared-antenna type
+ * disable:0x1860[3] = 1, enable:0x1860[3] = 0
+ *
+ * enable "DAC off if GNT_WL = 0" for non-shared-antenna
+ * disable 0x1c30[22] = 0,
+ * enable: 0x1c30[22] = 1, 0x1c38[12] = 0, 0x1c38[28] = 1
+ *
+ * disable WL-S1 BB chage RF mode if GNT_BT
+ * since RF TRx mask can do it
+ */
+ rtw_write8_mask(rtwdev, 0x1c32, BIT(6), 1);
+ rtw_write8_mask(rtwdev, 0x1c39, BIT(4), 0);
+ rtw_write8_mask(rtwdev, 0x1c3b, BIT(4), 1);
+ rtw_write8_mask(rtwdev, 0x4160, BIT(3), 1);
+
+ /* disable WL-S0 BB chage RF mode if wifi is at 5G,
+ * or antenna path is separated
+ */
+ if (coex_stat->wl_coex_mode == COEX_WLINK_5G ||
+ coex->under_5g || !efuse->share_ant) {
+ if (coex_stat->kt_ver >= 3) {
+ rtw_write8_mask(rtwdev, 0x1860, BIT(3), 0);
+ rtw_write8_mask(rtwdev, 0x1ca7, BIT(3), 1);
+ } else {
+ rtw_write8_mask(rtwdev, 0x1860, BIT(3), 1);
+ }
+ } else {
+ /* shared-antenna */
+ rtw_write8_mask(rtwdev, 0x1860, BIT(3), 0);
+ if (coex_stat->kt_ver >= 3)
+ rtw_write8_mask(rtwdev, 0x1ca7, BIT(3), 0);
+ }
+}
+
+static void rtw8822c_coex_cfg_gnt_debug(struct rtw_dev *rtwdev)
+{
+ rtw_write8_mask(rtwdev, 0x66, BIT(4), 0);
+ rtw_write8_mask(rtwdev, 0x67, BIT(0), 0);
+ rtw_write8_mask(rtwdev, 0x42, BIT(3), 0);
+ rtw_write8_mask(rtwdev, 0x65, BIT(7), 0);
+ rtw_write8_mask(rtwdev, 0x73, BIT(3), 0);
+}
+
+static void rtw8822c_coex_cfg_rfe_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+
+ coex_rfe->rfe_module_type = rtwdev->efuse.rfe_option;
+ coex_rfe->ant_switch_polarity = 0;
+ coex_rfe->ant_switch_exist = false;
+ coex_rfe->ant_switch_with_bt = false;
+ coex_rfe->ant_switch_diversity = false;
+
+ if (efuse->share_ant)
+ coex_rfe->wlg_at_btg = true;
+ else
+ coex_rfe->wlg_at_btg = false;
+
+ /* disable LTE coex in wifi side */
+ rtw_coex_write_indirect_reg(rtwdev, 0x38, BIT_LTE_COEX_EN, 0x0);
+ rtw_coex_write_indirect_reg(rtwdev, 0xa0, MASKLWORD, 0xffff);
+ rtw_coex_write_indirect_reg(rtwdev, 0xa4, MASKLWORD, 0xffff);
+}
+
+static void rtw8822c_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+
+ if (wl_pwr == coex_dm->cur_wl_pwr_lvl)
+ return;
+
+ coex_dm->cur_wl_pwr_lvl = wl_pwr;
+}
+
+static void rtw8822c_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+
+ if (low_gain == coex_dm->cur_wl_rx_low_gain_en)
+ return;
+
+ coex_dm->cur_wl_rx_low_gain_en = low_gain;
+
+ if (coex_dm->cur_wl_rx_low_gain_en) {
+ /* set Rx filter corner RCK offset */
+ rtw_write_rf(rtwdev, RF_PATH_A, 0xde, 0xfffff, 0x22);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x1d, 0xfffff, 0x36);
+ rtw_write_rf(rtwdev, RF_PATH_B, 0xde, 0xfffff, 0x22);
+ rtw_write_rf(rtwdev, RF_PATH_B, 0x1d, 0xfffff, 0x36);
+ } else {
+ /* set Rx filter corner RCK offset */
+ rtw_write_rf(rtwdev, RF_PATH_A, 0xde, 0xfffff, 0x20);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x1d, 0xfffff, 0x0);
+ rtw_write_rf(rtwdev, RF_PATH_B, 0x1d, 0xfffff, 0x0);
+ }
+}
+
static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -2232,8 +2397,160 @@ static struct rtw_chip_ops rtw8822c_ops = {
.cfg_ldo25 = rtw8822c_cfg_ldo25,
.false_alarm_statistics = rtw8822c_false_alarm_statistics,
.do_iqk = rtw8822c_do_iqk,
+
+ .coex_set_init = rtw8822c_coex_cfg_init,
+ .coex_set_ant_switch = NULL,
+ .coex_set_gnt_fix = rtw8822c_coex_cfg_gnt_fix,
+ .coex_set_gnt_debug = rtw8822c_coex_cfg_gnt_debug,
+ .coex_set_rfe_type = rtw8822c_coex_cfg_rfe_type,
+ .coex_set_wl_tx_power = rtw8822c_coex_cfg_wl_tx_power,
+ .coex_set_wl_rx_gain = rtw8822c_coex_cfg_wl_rx_gain,
+};
+
+/* Shared-Antenna Coex Table */
+static const struct coex_table_para table_sant_8822c[] = {
+ {0xffffffff, 0xffffffff}, /* case-0 */
+ {0x55555555, 0x55555555},
+ {0x66555555, 0x66555555},
+ {0xaaaaaaaa, 0xaaaaaaaa},
+ {0x5a5a5a5a, 0x5a5a5a5a},
+ {0xfafafafa, 0xfafafafa}, /* case-5 */
+ {0x6a5a6a5a, 0xaaaaaaaa},
+ {0x6a5a56aa, 0x6a5a56aa},
+ {0x6a5a5a5a, 0x6a5a5a5a},
+ {0x66555555, 0x5a5a5a5a},
+ {0x66555555, 0x6a5a5a5a}, /* case-10 */
+ {0x66555555, 0xfafafafa},
+ {0x66555555, 0x6a5a5aaa},
+ {0x66555555, 0x5aaa5aaa},
+ {0x66555555, 0xaaaa5aaa},
+ {0x66555555, 0xaaaaaaaa}, /* case-15 */
+ {0xffff55ff, 0xfafafafa},
+ {0xffff55ff, 0x6afa5afa},
+ {0xaaffffaa, 0xfafafafa},
+ {0xaa5555aa, 0x5a5a5a5a},
+ {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */
+ {0xaa5555aa, 0xaaaaaaaa},
+ {0xffffffff, 0x5a5a5a5a},
+ {0xffffffff, 0x6a5a5a5a},
+ {0xffffffff, 0x55555555},
+ {0xffffffff, 0x6a5a5aaa}, /* case-25 */
+ {0x55555555, 0x5a5a5a5a},
+ {0x55555555, 0xaaaaaaaa},
+ {0x55555555, 0x6a5a6a5a},
+ {0x66556655, 0x66556655}
+};
+
+/* Non-Shared-Antenna Coex Table */
+static const struct coex_table_para table_nsant_8822c[] = {
+ {0xffffffff, 0xffffffff}, /* case-100 */
+ {0x55555555, 0x55555555},
+ {0x66555555, 0x66555555},
+ {0xaaaaaaaa, 0xaaaaaaaa},
+ {0x5a5a5a5a, 0x5a5a5a5a},
+ {0xfafafafa, 0xfafafafa}, /* case-105 */
+ {0x5afa5afa, 0x5afa5afa},
+ {0x55555555, 0xfafafafa},
+ {0x66555555, 0xfafafafa},
+ {0x66555555, 0x5a5a5a5a},
+ {0x66555555, 0x6a5a5a5a}, /* case-110 */
+ {0x66555555, 0xaaaaaaaa},
+ {0xffff55ff, 0xfafafafa},
+ {0xffff55ff, 0x5afa5afa},
+ {0xffff55ff, 0xaaaaaaaa},
+ {0xaaffffaa, 0xfafafafa}, /* case-115 */
+ {0xaaffffaa, 0x5afa5afa},
+ {0xaaffffaa, 0xaaaaaaaa},
+ {0xffffffff, 0xfafafafa},
+ {0xffffffff, 0x5afa5afa},
+ {0xffffffff, 0xaaaaaaaa},/* case-120 */
+ {0x55ff55ff, 0x5afa5afa},
+ {0x55ff55ff, 0xaaaaaaaa},
+ {0x55ff55ff, 0x55ff55ff}
+};
+
+/* Shared-Antenna TDMA */
+static const struct coex_tdma_para tdma_sant_8822c[] = {
+ { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */
+ { {0x61, 0x45, 0x03, 0x11, 0x11} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x11} },
+ { {0x61, 0x30, 0x03, 0x11, 0x11} },
+ { {0x61, 0x20, 0x03, 0x11, 0x11} },
+ { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-5 */
+ { {0x61, 0x45, 0x03, 0x11, 0x10} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x10} },
+ { {0x61, 0x30, 0x03, 0x11, 0x10} },
+ { {0x61, 0x20, 0x03, 0x11, 0x10} },
+ { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */
+ { {0x61, 0x08, 0x03, 0x11, 0x14} },
+ { {0x61, 0x08, 0x03, 0x10, 0x14} },
+ { {0x51, 0x08, 0x03, 0x10, 0x54} },
+ { {0x51, 0x08, 0x03, 0x10, 0x55} },
+ { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */
+ { {0x51, 0x45, 0x03, 0x10, 0x10} },
+ { {0x51, 0x3a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x30, 0x03, 0x10, 0x50} },
+ { {0x51, 0x20, 0x03, 0x10, 0x50} },
+ { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-20 */
+ { {0x51, 0x4a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x0c, 0x03, 0x10, 0x54} },
+ { {0x55, 0x08, 0x03, 0x10, 0x54} },
+ { {0x65, 0x10, 0x03, 0x11, 0x11} },
+ { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */
+ { {0x51, 0x08, 0x03, 0x10, 0x50} }
};
+/* Non-Shared-Antenna TDMA */
+static const struct coex_tdma_para tdma_nsant_8822c[] = {
+ { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-100 */
+ { {0x61, 0x45, 0x03, 0x11, 0x11} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x11} },
+ { {0x61, 0x30, 0x03, 0x11, 0x11} },
+ { {0x61, 0x20, 0x03, 0x11, 0x11} },
+ { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-105 */
+ { {0x61, 0x45, 0x03, 0x11, 0x10} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x10} },
+ { {0x61, 0x30, 0x03, 0x11, 0x10} },
+ { {0x61, 0x20, 0x03, 0x11, 0x10} },
+ { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-110 */
+ { {0x61, 0x08, 0x03, 0x11, 0x14} },
+ { {0x61, 0x08, 0x03, 0x10, 0x14} },
+ { {0x51, 0x08, 0x03, 0x10, 0x54} },
+ { {0x51, 0x08, 0x03, 0x10, 0x55} },
+ { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-115 */
+ { {0x51, 0x45, 0x03, 0x10, 0x50} },
+ { {0x51, 0x3a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x30, 0x03, 0x10, 0x50} },
+ { {0x51, 0x20, 0x03, 0x10, 0x50} },
+ { {0x51, 0x10, 0x03, 0x10, 0x50} } /* case-120 */
+};
+
+/* rssi in percentage % (dbm = % - 100) */
+static const u8 wl_rssi_step_8822c[] = {60, 50, 44, 30};
+static const u8 bt_rssi_step_8822c[] = {8, 15, 20, 25};
+static const struct coex_5g_afh_map afh_5g_8822c[] = { {0, 0, 0} };
+
+/* wl_tx_dec_power, bt_tx_dec_power, wl_rx_gain, bt_rx_lna_constrain */
+static const struct coex_rf_para rf_para_tx_8822c[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 16, false, 7}, /* for WL-CPT */
+ {8, 17, true, 4},
+ {7, 18, true, 4},
+ {6, 19, true, 4},
+ {5, 20, true, 4}
+};
+
+static const struct coex_rf_para rf_para_rx_8822c[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 16, false, 7}, /* for WL-CPT */
+ {3, 24, true, 5},
+ {2, 26, true, 5},
+ {1, 27, true, 5},
+ {0, 28, true, 5}
+};
+
+static_assert(ARRAY_SIZE(rf_para_tx_8822c) == ARRAY_SIZE(rf_para_rx_8822c));
+
struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
@@ -2272,6 +2589,32 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.rf_tbl = {&rtw8822c_rf_a_tbl, &rtw8822c_rf_b_tbl},
.rfe_defs = rtw8822c_rfe_defs,
.rfe_defs_size = ARRAY_SIZE(rtw8822c_rfe_defs),
+
+ .coex_para_ver = 0x19062706,
+ .bt_desired_ver = 0x6,
+ .scbd_support = true,
+ .new_scbd10_def = true,
+ .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
+ .bt_rssi_type = COEX_BTRSSI_DBM,
+ .ant_isolation = 15,
+ .rssi_tolerance = 2,
+ .wl_rssi_step = wl_rssi_step_8822c,
+ .bt_rssi_step = bt_rssi_step_8822c,
+ .table_sant_num = ARRAY_SIZE(table_sant_8822c),
+ .table_sant = table_sant_8822c,
+ .table_nsant_num = ARRAY_SIZE(table_nsant_8822c),
+ .table_nsant = table_nsant_8822c,
+ .tdma_sant_num = ARRAY_SIZE(tdma_sant_8822c),
+ .tdma_sant = tdma_sant_8822c,
+ .tdma_nsant_num = ARRAY_SIZE(tdma_nsant_8822c),
+ .tdma_nsant = tdma_nsant_8822c,
+ .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8822c),
+ .wl_rf_para_tx = rf_para_tx_8822c,
+ .wl_rf_para_rx = rf_para_rx_8822c,
+ .bt_afh_span_bw20 = 0x24,
+ .bt_afh_span_bw40 = 0x36,
+ .afh_5g_num = ARRAY_SIZE(afh_5g_8822c),
+ .afh_5g = afh_5g_8822c,
};
EXPORT_SYMBOL(rtw8822c_hw_spec);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
index 18e609a69829..6c7eaa75b98b 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
@@ -9403,885 +9403,1762 @@ static const u32 rtw8822c_rf_b[] = {
RTW_DECL_TABLE_RF_RADIO(rtw8822c_rf_b, B);
-static const u8 rtw8822c_txpwr_lmt_type0[] = {
- 0, 0, 0, 0, 1, 72, 2, 0, 0, 0, 1, 60,
- 0, 0, 0, 0, 2, 72, 2, 0, 0, 0, 2, 60,
- 0, 0, 0, 0, 3, 76, 2, 0, 0, 0, 3, 60,
- 0, 0, 0, 0, 4, 76, 2, 0, 0, 0, 4, 60,
- 0, 0, 0, 0, 5, 76, 2, 0, 0, 0, 5, 60,
- 0, 0, 0, 0, 6, 76, 2, 0, 0, 0, 6, 60,
- 0, 0, 0, 0, 7, 76, 2, 0, 0, 0, 7, 60,
- 0, 0, 0, 0, 8, 76, 2, 0, 0, 0, 8, 60,
- 0, 0, 0, 0, 9, 76, 2, 0, 0, 0, 9, 60,
- 0, 0, 0, 0, 10, 72, 2, 0, 0, 0, 10, 60,
- 0, 0, 0, 0, 11, 72, 2, 0, 0, 0, 11, 60,
- 0, 0, 0, 0, 12, 52, 2, 0, 0, 0, 12, 60,
- 0, 0, 0, 0, 13, 48, 2, 0, 0, 0, 13, 60,
- 0, 0, 0, 0, 14, 127, 2, 0, 0, 0, 14, 127,
- 0, 0, 0, 1, 1, 52, 2, 0, 0, 1, 1, 60,
- 0, 0, 0, 1, 2, 60, 2, 0, 0, 1, 2, 60,
- 0, 0, 0, 1, 3, 64, 2, 0, 0, 1, 3, 60,
- 0, 0, 0, 1, 4, 68, 2, 0, 0, 1, 4, 60,
- 0, 0, 0, 1, 5, 76, 2, 0, 0, 1, 5, 60,
- 0, 0, 0, 1, 6, 76, 2, 0, 0, 1, 6, 60,
- 0, 0, 0, 1, 7, 76, 2, 0, 0, 1, 7, 60,
- 0, 0, 0, 1, 8, 68, 2, 0, 0, 1, 8, 60,
- 0, 0, 0, 1, 9, 64, 2, 0, 0, 1, 9, 60,
- 0, 0, 0, 1, 10, 60, 2, 0, 0, 1, 10, 60,
- 0, 0, 0, 1, 11, 52, 2, 0, 0, 1, 11, 60,
- 0, 0, 0, 1, 12, 40, 2, 0, 0, 1, 12, 60,
- 0, 0, 0, 1, 13, 28, 2, 0, 0, 1, 13, 60,
- 0, 0, 0, 1, 14, 127, 2, 0, 0, 1, 14, 127,
- 0, 0, 0, 2, 1, 52, 2, 0, 0, 2, 1, 60,
- 0, 0, 0, 2, 2, 60, 2, 0, 0, 2, 2, 60,
- 0, 0, 0, 2, 3, 64, 2, 0, 0, 2, 3, 60,
- 0, 0, 0, 2, 4, 68, 2, 0, 0, 2, 4, 60,
- 0, 0, 0, 2, 5, 76, 2, 0, 0, 2, 5, 60,
- 0, 0, 0, 2, 6, 76, 2, 0, 0, 2, 6, 60,
- 0, 0, 0, 2, 7, 76, 2, 0, 0, 2, 7, 60,
- 0, 0, 0, 2, 8, 68, 2, 0, 0, 2, 8, 60,
- 0, 0, 0, 2, 9, 64, 2, 0, 0, 2, 9, 60,
- 0, 0, 0, 2, 10, 60, 2, 0, 0, 2, 10, 60,
- 0, 0, 0, 2, 11, 52, 2, 0, 0, 2, 11, 60,
- 0, 0, 0, 2, 12, 40, 2, 0, 0, 2, 12, 60,
- 0, 0, 0, 2, 13, 28, 2, 0, 0, 2, 13, 60,
- 0, 0, 0, 2, 14, 127, 2, 0, 0, 2, 14, 127,
- 0, 0, 0, 3, 1, 52, 2, 0, 0, 3, 1, 36,
- 0, 0, 0, 3, 2, 60, 2, 0, 0, 3, 2, 36,
- 0, 0, 0, 3, 3, 64, 2, 0, 0, 3, 3, 36,
- 0, 0, 0, 3, 4, 68, 2, 0, 0, 3, 4, 36,
- 0, 0, 0, 3, 5, 76, 2, 0, 0, 3, 5, 36,
- 0, 0, 0, 3, 6, 76, 2, 0, 0, 3, 6, 36,
- 0, 0, 0, 3, 7, 76, 2, 0, 0, 3, 7, 36,
- 0, 0, 0, 3, 8, 68, 2, 0, 0, 3, 8, 36,
- 0, 0, 0, 3, 9, 64, 2, 0, 0, 3, 9, 36,
- 0, 0, 0, 3, 10, 60, 2, 0, 0, 3, 10, 36,
- 0, 0, 0, 3, 11, 52, 2, 0, 0, 3, 11, 36,
- 0, 0, 0, 3, 12, 40, 2, 0, 0, 3, 12, 36,
- 0, 0, 0, 3, 13, 28, 2, 0, 0, 3, 13, 36,
- 0, 0, 0, 3, 14, 127, 2, 0, 0, 3, 14, 127,
- 0, 0, 1, 2, 1, 127, 2, 0, 1, 2, 1, 127,
- 0, 0, 1, 2, 2, 127, 2, 0, 1, 2, 2, 127,
- 0, 0, 1, 2, 3, 52, 2, 0, 1, 2, 3, 60,
- 0, 0, 1, 2, 4, 52, 2, 0, 1, 2, 4, 60,
- 0, 0, 1, 2, 5, 60, 2, 0, 1, 2, 5, 60,
- 0, 0, 1, 2, 6, 64, 2, 0, 1, 2, 6, 60,
- 0, 0, 1, 2, 7, 60, 2, 0, 1, 2, 7, 60,
- 0, 0, 1, 2, 8, 52, 2, 0, 1, 2, 8, 60,
- 0, 0, 1, 2, 9, 52, 2, 0, 1, 2, 9, 60,
- 0, 0, 1, 2, 10, 40, 2, 0, 1, 2, 10, 60,
- 0, 0, 1, 2, 11, 28, 2, 0, 1, 2, 11, 60,
- 0, 0, 1, 2, 12, 127, 2, 0, 1, 2, 12, 127,
- 0, 0, 1, 2, 13, 127, 2, 0, 1, 2, 13, 127,
- 0, 0, 1, 2, 14, 127, 2, 0, 1, 2, 14, 127,
- 0, 0, 1, 3, 1, 127, 2, 0, 1, 3, 1, 127,
- 0, 0, 1, 3, 2, 127, 2, 0, 1, 3, 2, 127,
- 0, 0, 1, 3, 3, 48, 2, 0, 1, 3, 3, 36,
- 0, 0, 1, 3, 4, 48, 2, 0, 1, 3, 4, 36,
- 0, 0, 1, 3, 5, 60, 2, 0, 1, 3, 5, 36,
- 0, 0, 1, 3, 6, 64, 2, 0, 1, 3, 6, 36,
- 0, 0, 1, 3, 7, 60, 2, 0, 1, 3, 7, 36,
- 0, 0, 1, 3, 8, 52, 2, 0, 1, 3, 8, 36,
- 0, 0, 1, 3, 9, 52, 2, 0, 1, 3, 9, 36,
- 0, 0, 1, 3, 10, 40, 2, 0, 1, 3, 10, 36,
- 0, 0, 1, 3, 11, 26, 2, 0, 1, 3, 11, 36,
- 0, 0, 1, 3, 12, 127, 2, 0, 1, 3, 12, 127,
- 0, 0, 1, 3, 13, 127, 2, 0, 1, 3, 13, 127,
- 0, 0, 1, 3, 14, 127, 2, 0, 1, 3, 14, 127,
- 0, 1, 0, 1, 36, 74, 2, 1, 0, 1, 36, 62,
- 0, 1, 0, 1, 40, 76, 2, 1, 0, 1, 40, 62,
- 0, 1, 0, 1, 44, 76, 2, 1, 0, 1, 44, 62,
- 0, 1, 0, 1, 48, 76, 2, 1, 0, 1, 48, 62,
- 0, 1, 0, 1, 52, 76, 2, 1, 0, 1, 52, 62,
- 0, 1, 0, 1, 56, 76, 2, 1, 0, 1, 56, 62,
- 0, 1, 0, 1, 60, 76, 2, 1, 0, 1, 60, 62,
- 0, 1, 0, 1, 64, 74, 2, 1, 0, 1, 64, 62,
- 0, 1, 0, 1, 100, 72, 2, 1, 0, 1, 100, 62,
- 0, 1, 0, 1, 104, 76, 2, 1, 0, 1, 104, 62,
- 0, 1, 0, 1, 108, 76, 2, 1, 0, 1, 108, 62,
- 0, 1, 0, 1, 112, 76, 2, 1, 0, 1, 112, 62,
- 0, 1, 0, 1, 116, 76, 2, 1, 0, 1, 116, 62,
- 0, 1, 0, 1, 120, 76, 2, 1, 0, 1, 120, 62,
- 0, 1, 0, 1, 124, 76, 2, 1, 0, 1, 124, 62,
- 0, 1, 0, 1, 128, 76, 2, 1, 0, 1, 128, 62,
- 0, 1, 0, 1, 132, 76, 2, 1, 0, 1, 132, 62,
- 0, 1, 0, 1, 136, 76, 2, 1, 0, 1, 136, 62,
- 0, 1, 0, 1, 140, 72, 2, 1, 0, 1, 140, 62,
- 0, 1, 0, 1, 144, 76, 2, 1, 0, 1, 144, 127,
- 0, 1, 0, 1, 149, 76, 2, 1, 0, 1, 149, -128,
- 0, 1, 0, 1, 153, 76, 2, 1, 0, 1, 153, -128,
- 0, 1, 0, 1, 157, 76, 2, 1, 0, 1, 157, -128,
- 0, 1, 0, 1, 161, 76, 2, 1, 0, 1, 161, -128,
- 0, 1, 0, 1, 165, 76, 2, 1, 0, 1, 165, -128,
- 0, 1, 0, 2, 36, 72, 2, 1, 0, 2, 36, 62,
- 0, 1, 0, 2, 40, 76, 2, 1, 0, 2, 40, 62,
- 0, 1, 0, 2, 44, 76, 2, 1, 0, 2, 44, 62,
- 0, 1, 0, 2, 48, 76, 2, 1, 0, 2, 48, 62,
- 0, 1, 0, 2, 52, 76, 2, 1, 0, 2, 52, 62,
- 0, 1, 0, 2, 56, 76, 2, 1, 0, 2, 56, 62,
- 0, 1, 0, 2, 60, 76, 2, 1, 0, 2, 60, 62,
- 0, 1, 0, 2, 64, 74, 2, 1, 0, 2, 64, 62,
- 0, 1, 0, 2, 100, 70, 2, 1, 0, 2, 100, 62,
- 0, 1, 0, 2, 104, 76, 2, 1, 0, 2, 104, 62,
- 0, 1, 0, 2, 108, 76, 2, 1, 0, 2, 108, 62,
- 0, 1, 0, 2, 112, 76, 2, 1, 0, 2, 112, 62,
- 0, 1, 0, 2, 116, 76, 2, 1, 0, 2, 116, 62,
- 0, 1, 0, 2, 120, 76, 2, 1, 0, 2, 120, 62,
- 0, 1, 0, 2, 124, 76, 2, 1, 0, 2, 124, 62,
- 0, 1, 0, 2, 128, 76, 2, 1, 0, 2, 128, 62,
- 0, 1, 0, 2, 132, 76, 2, 1, 0, 2, 132, 62,
- 0, 1, 0, 2, 136, 76, 2, 1, 0, 2, 136, 62,
- 0, 1, 0, 2, 140, 70, 2, 1, 0, 2, 140, 62,
- 0, 1, 0, 2, 144, 76, 2, 1, 0, 2, 144, 127,
- 0, 1, 0, 2, 149, 76, 2, 1, 0, 2, 149, -128,
- 0, 1, 0, 2, 153, 76, 2, 1, 0, 2, 153, -128,
- 0, 1, 0, 2, 157, 76, 2, 1, 0, 2, 157, -128,
- 0, 1, 0, 2, 161, 76, 2, 1, 0, 2, 161, -128,
- 0, 1, 0, 2, 165, 76, 2, 1, 0, 2, 165, -128,
- 0, 1, 0, 3, 36, 68, 2, 1, 0, 3, 36, 38,
- 0, 1, 0, 3, 40, 68, 2, 1, 0, 3, 40, 38,
- 0, 1, 0, 3, 44, 68, 2, 1, 0, 3, 44, 38,
- 0, 1, 0, 3, 48, 68, 2, 1, 0, 3, 48, 38,
- 0, 1, 0, 3, 52, 68, 2, 1, 0, 3, 52, 38,
- 0, 1, 0, 3, 56, 68, 2, 1, 0, 3, 56, 38,
- 0, 1, 0, 3, 60, 66, 2, 1, 0, 3, 60, 38,
- 0, 1, 0, 3, 64, 68, 2, 1, 0, 3, 64, 38,
- 0, 1, 0, 3, 100, 60, 2, 1, 0, 3, 100, 38,
- 0, 1, 0, 3, 104, 68, 2, 1, 0, 3, 104, 38,
- 0, 1, 0, 3, 108, 68, 2, 1, 0, 3, 108, 38,
- 0, 1, 0, 3, 112, 68, 2, 1, 0, 3, 112, 38,
- 0, 1, 0, 3, 116, 68, 2, 1, 0, 3, 116, 38,
- 0, 1, 0, 3, 120, 68, 2, 1, 0, 3, 120, 38,
- 0, 1, 0, 3, 124, 68, 2, 1, 0, 3, 124, 38,
- 0, 1, 0, 3, 128, 68, 2, 1, 0, 3, 128, 38,
- 0, 1, 0, 3, 132, 68, 2, 1, 0, 3, 132, 38,
- 0, 1, 0, 3, 136, 68, 2, 1, 0, 3, 136, 38,
- 0, 1, 0, 3, 140, 60, 2, 1, 0, 3, 140, 38,
- 0, 1, 0, 3, 144, 68, 2, 1, 0, 3, 144, 127,
- 0, 1, 0, 3, 149, 76, 2, 1, 0, 3, 149, -128,
- 0, 1, 0, 3, 153, 76, 2, 1, 0, 3, 153, -128,
- 0, 1, 0, 3, 157, 76, 2, 1, 0, 3, 157, -128,
- 0, 1, 0, 3, 161, 76, 2, 1, 0, 3, 161, -128,
- 0, 1, 0, 3, 165, 76, 2, 1, 0, 3, 165, -128,
- 0, 1, 1, 2, 38, 66, 2, 1, 1, 2, 38, 64,
- 0, 1, 1, 2, 46, 72, 2, 1, 1, 2, 46, 64,
- 0, 1, 1, 2, 54, 72, 2, 1, 1, 2, 54, 64,
- 0, 1, 1, 2, 62, 64, 2, 1, 1, 2, 62, 64,
- 0, 1, 1, 2, 102, 58, 2, 1, 1, 2, 102, 64,
- 0, 1, 1, 2, 110, 72, 2, 1, 1, 2, 110, 64,
- 0, 1, 1, 2, 118, 72, 2, 1, 1, 2, 118, 64,
- 0, 1, 1, 2, 126, 72, 2, 1, 1, 2, 126, 64,
- 0, 1, 1, 2, 134, 72, 2, 1, 1, 2, 134, 64,
- 0, 1, 1, 2, 142, 72, 2, 1, 1, 2, 142, 127,
- 0, 1, 1, 2, 151, 72, 2, 1, 1, 2, 151, -128,
- 0, 1, 1, 2, 159, 72, 2, 1, 1, 2, 159, -128,
- 0, 1, 1, 3, 38, 60, 2, 1, 1, 3, 38, 40,
- 0, 1, 1, 3, 46, 68, 2, 1, 1, 3, 46, 40,
- 0, 1, 1, 3, 54, 68, 2, 1, 1, 3, 54, 40,
- 0, 1, 1, 3, 62, 58, 2, 1, 1, 3, 62, 40,
- 0, 1, 1, 3, 102, 54, 2, 1, 1, 3, 102, 40,
- 0, 1, 1, 3, 110, 68, 2, 1, 1, 3, 110, 40,
- 0, 1, 1, 3, 118, 68, 2, 1, 1, 3, 118, 40,
- 0, 1, 1, 3, 126, 68, 2, 1, 1, 3, 126, 40,
- 0, 1, 1, 3, 134, 68, 2, 1, 1, 3, 134, 40,
- 0, 1, 1, 3, 142, 68, 2, 1, 1, 3, 142, 127,
- 0, 1, 1, 3, 151, 72, 2, 1, 1, 3, 151, -128,
- 0, 1, 1, 3, 159, 72, 2, 1, 1, 3, 159, -128,
- 0, 1, 2, 4, 42, 64, 2, 1, 2, 4, 42, 64,
- 0, 1, 2, 4, 58, 62, 2, 1, 2, 4, 58, 64,
- 0, 1, 2, 4, 106, 58, 2, 1, 2, 4, 106, 64,
- 0, 1, 2, 4, 122, 72, 2, 1, 2, 4, 122, 64,
- 0, 1, 2, 4, 138, 72, 2, 1, 2, 4, 138, 127,
- 0, 1, 2, 4, 155, 72, 2, 1, 2, 4, 155, -128,
- 0, 1, 2, 5, 42, 54, 2, 1, 2, 5, 42, 40,
- 0, 1, 2, 5, 58, 52, 2, 1, 2, 5, 58, 40,
- 0, 1, 2, 5, 106, 50, 2, 1, 2, 5, 106, 40,
- 0, 1, 2, 5, 122, 66, 2, 1, 2, 5, 122, 40,
- 0, 1, 2, 5, 138, 66, 2, 1, 2, 5, 138, 127,
- 0, 1, 2, 5, 155, 62, 2, 1, 2, 5, 155, -128,
- 1, 0, 0, 0, 1, 68, 3, 0, 0, 0, 1, 72,
- 4, 0, 0, 0, 1, 76, 5, 0, 0, 0, 1, 60,
- 6, 0, 0, 0, 1, 72, 7, 0, 0, 0, 1, 60,
- 8, 0, 0, 0, 1, 72, 1, 0, 0, 0, 2, 68,
- 3, 0, 0, 0, 2, 72, 4, 0, 0, 0, 2, 76,
- 5, 0, 0, 0, 2, 60, 6, 0, 0, 0, 2, 72,
- 7, 0, 0, 0, 2, 60, 8, 0, 0, 0, 2, 72,
- 1, 0, 0, 0, 3, 68, 3, 0, 0, 0, 3, 76,
- 4, 0, 0, 0, 3, 76, 5, 0, 0, 0, 3, 60,
- 6, 0, 0, 0, 3, 76, 7, 0, 0, 0, 3, 60,
- 8, 0, 0, 0, 3, 76, 1, 0, 0, 0, 4, 68,
- 3, 0, 0, 0, 4, 76, 4, 0, 0, 0, 4, 76,
- 5, 0, 0, 0, 4, 60, 6, 0, 0, 0, 4, 76,
- 7, 0, 0, 0, 4, 60, 8, 0, 0, 0, 4, 76,
- 1, 0, 0, 0, 5, 68, 3, 0, 0, 0, 5, 76,
- 4, 0, 0, 0, 5, 76, 5, 0, 0, 0, 5, 60,
- 6, 0, 0, 0, 5, 76, 7, 0, 0, 0, 5, 60,
- 8, 0, 0, 0, 5, 76, 1, 0, 0, 0, 6, 68,
- 3, 0, 0, 0, 6, 76, 4, 0, 0, 0, 6, 76,
- 5, 0, 0, 0, 6, 60, 6, 0, 0, 0, 6, 76,
- 7, 0, 0, 0, 6, 60, 8, 0, 0, 0, 6, 76,
- 1, 0, 0, 0, 7, 68, 3, 0, 0, 0, 7, 76,
- 4, 0, 0, 0, 7, 76, 5, 0, 0, 0, 7, 60,
- 6, 0, 0, 0, 7, 76, 7, 0, 0, 0, 7, 60,
- 8, 0, 0, 0, 7, 76, 1, 0, 0, 0, 8, 68,
- 3, 0, 0, 0, 8, 76, 4, 0, 0, 0, 8, 76,
- 5, 0, 0, 0, 8, 60, 6, 0, 0, 0, 8, 76,
- 7, 0, 0, 0, 8, 60, 8, 0, 0, 0, 8, 76,
- 1, 0, 0, 0, 9, 68, 3, 0, 0, 0, 9, 76,
- 4, 0, 0, 0, 9, 76, 5, 0, 0, 0, 9, 60,
- 6, 0, 0, 0, 9, 76, 7, 0, 0, 0, 9, 60,
- 8, 0, 0, 0, 9, 76, 1, 0, 0, 0, 10, 68,
- 3, 0, 0, 0, 10, 72, 4, 0, 0, 0, 10, 76,
- 5, 0, 0, 0, 10, 60, 6, 0, 0, 0, 10, 72,
- 7, 0, 0, 0, 10, 60, 8, 0, 0, 0, 10, 72,
- 1, 0, 0, 0, 11, 68, 3, 0, 0, 0, 11, 72,
- 4, 0, 0, 0, 11, 76, 5, 0, 0, 0, 11, 60,
- 6, 0, 0, 0, 11, 72, 7, 0, 0, 0, 11, 60,
- 8, 0, 0, 0, 11, 72, 1, 0, 0, 0, 12, 68,
- 3, 0, 0, 0, 12, 52, 4, 0, 0, 0, 12, 76,
- 5, 0, 0, 0, 12, 60, 6, 0, 0, 0, 12, 52,
- 7, 0, 0, 0, 12, 60, 8, 0, 0, 0, 12, 52,
- 1, 0, 0, 0, 13, 68, 3, 0, 0, 0, 13, 48,
- 4, 0, 0, 0, 13, 76, 5, 0, 0, 0, 13, 60,
- 6, 0, 0, 0, 13, 48, 7, 0, 0, 0, 13, 60,
- 8, 0, 0, 0, 13, 48, 1, 0, 0, 0, 14, 68,
- 3, 0, 0, 0, 14, 127, 4, 0, 0, 0, 14, 127,
- 5, 0, 0, 0, 14, 127, 6, 0, 0, 0, 14, 127,
- 7, 0, 0, 0, 14, 127, 8, 0, 0, 0, 14, 127,
- 1, 0, 0, 1, 1, 76, 3, 0, 0, 1, 1, 52,
- 4, 0, 0, 1, 1, 76, 5, 0, 0, 1, 1, 60,
- 6, 0, 0, 1, 1, 52, 7, 0, 0, 1, 1, 60,
- 8, 0, 0, 1, 1, 52, 1, 0, 0, 1, 2, 76,
- 3, 0, 0, 1, 2, 60, 4, 0, 0, 1, 2, 76,
- 5, 0, 0, 1, 2, 60, 6, 0, 0, 1, 2, 60,
- 7, 0, 0, 1, 2, 60, 8, 0, 0, 1, 2, 60,
- 1, 0, 0, 1, 3, 76, 3, 0, 0, 1, 3, 64,
- 4, 0, 0, 1, 3, 76, 5, 0, 0, 1, 3, 60,
- 6, 0, 0, 1, 3, 64, 7, 0, 0, 1, 3, 60,
- 8, 0, 0, 1, 3, 64, 1, 0, 0, 1, 4, 76,
- 3, 0, 0, 1, 4, 68, 4, 0, 0, 1, 4, 76,
- 5, 0, 0, 1, 4, 60, 6, 0, 0, 1, 4, 68,
- 7, 0, 0, 1, 4, 60, 8, 0, 0, 1, 4, 68,
- 1, 0, 0, 1, 5, 76, 3, 0, 0, 1, 5, 76,
- 4, 0, 0, 1, 5, 76, 5, 0, 0, 1, 5, 60,
- 6, 0, 0, 1, 5, 76, 7, 0, 0, 1, 5, 60,
- 8, 0, 0, 1, 5, 76, 1, 0, 0, 1, 6, 76,
- 3, 0, 0, 1, 6, 76, 4, 0, 0, 1, 6, 76,
- 5, 0, 0, 1, 6, 60, 6, 0, 0, 1, 6, 76,
- 7, 0, 0, 1, 6, 60, 8, 0, 0, 1, 6, 76,
- 1, 0, 0, 1, 7, 76, 3, 0, 0, 1, 7, 76,
- 4, 0, 0, 1, 7, 76, 5, 0, 0, 1, 7, 60,
- 6, 0, 0, 1, 7, 76, 7, 0, 0, 1, 7, 60,
- 8, 0, 0, 1, 7, 76, 1, 0, 0, 1, 8, 76,
- 3, 0, 0, 1, 8, 68, 4, 0, 0, 1, 8, 76,
- 5, 0, 0, 1, 8, 60, 6, 0, 0, 1, 8, 68,
- 7, 0, 0, 1, 8, 60, 8, 0, 0, 1, 8, 68,
- 1, 0, 0, 1, 9, 76, 3, 0, 0, 1, 9, 64,
- 4, 0, 0, 1, 9, 76, 5, 0, 0, 1, 9, 60,
- 6, 0, 0, 1, 9, 64, 7, 0, 0, 1, 9, 60,
- 8, 0, 0, 1, 9, 64, 1, 0, 0, 1, 10, 76,
- 3, 0, 0, 1, 10, 60, 4, 0, 0, 1, 10, 76,
- 5, 0, 0, 1, 10, 60, 6, 0, 0, 1, 10, 60,
- 7, 0, 0, 1, 10, 60, 8, 0, 0, 1, 10, 60,
- 1, 0, 0, 1, 11, 76, 3, 0, 0, 1, 11, 52,
- 4, 0, 0, 1, 11, 76, 5, 0, 0, 1, 11, 60,
- 6, 0, 0, 1, 11, 52, 7, 0, 0, 1, 11, 60,
- 8, 0, 0, 1, 11, 52, 1, 0, 0, 1, 12, 76,
- 3, 0, 0, 1, 12, 40, 4, 0, 0, 1, 12, 76,
- 5, 0, 0, 1, 12, 60, 6, 0, 0, 1, 12, 40,
- 7, 0, 0, 1, 12, 60, 8, 0, 0, 1, 12, 40,
- 1, 0, 0, 1, 13, 76, 3, 0, 0, 1, 13, 28,
- 4, 0, 0, 1, 13, 70, 5, 0, 0, 1, 13, 60,
- 6, 0, 0, 1, 13, 28, 7, 0, 0, 1, 13, 60,
- 8, 0, 0, 1, 13, 28, 1, 0, 0, 1, 14, 127,
- 3, 0, 0, 1, 14, 127, 4, 0, 0, 1, 14, 127,
- 5, 0, 0, 1, 14, 127, 6, 0, 0, 1, 14, 127,
- 7, 0, 0, 1, 14, 127, 8, 0, 0, 1, 14, 127,
- 1, 0, 0, 2, 1, 76, 3, 0, 0, 2, 1, 52,
- 4, 0, 0, 2, 1, 76, 5, 0, 0, 2, 1, 60,
- 6, 0, 0, 2, 1, 52, 7, 0, 0, 2, 1, 60,
- 8, 0, 0, 2, 1, 52, 1, 0, 0, 2, 2, 76,
- 3, 0, 0, 2, 2, 60, 4, 0, 0, 2, 2, 76,
- 5, 0, 0, 2, 2, 60, 6, 0, 0, 2, 2, 60,
- 7, 0, 0, 2, 2, 60, 8, 0, 0, 2, 2, 60,
- 1, 0, 0, 2, 3, 76, 3, 0, 0, 2, 3, 64,
- 4, 0, 0, 2, 3, 76, 5, 0, 0, 2, 3, 60,
- 6, 0, 0, 2, 3, 64, 7, 0, 0, 2, 3, 60,
- 8, 0, 0, 2, 3, 64, 1, 0, 0, 2, 4, 76,
- 3, 0, 0, 2, 4, 68, 4, 0, 0, 2, 4, 76,
- 5, 0, 0, 2, 4, 60, 6, 0, 0, 2, 4, 68,
- 7, 0, 0, 2, 4, 60, 8, 0, 0, 2, 4, 68,
- 1, 0, 0, 2, 5, 76, 3, 0, 0, 2, 5, 76,
- 4, 0, 0, 2, 5, 76, 5, 0, 0, 2, 5, 60,
- 6, 0, 0, 2, 5, 76, 7, 0, 0, 2, 5, 60,
- 8, 0, 0, 2, 5, 76, 1, 0, 0, 2, 6, 76,
- 3, 0, 0, 2, 6, 76, 4, 0, 0, 2, 6, 76,
- 5, 0, 0, 2, 6, 60, 6, 0, 0, 2, 6, 76,
- 7, 0, 0, 2, 6, 60, 8, 0, 0, 2, 6, 76,
- 1, 0, 0, 2, 7, 76, 3, 0, 0, 2, 7, 76,
- 4, 0, 0, 2, 7, 76, 5, 0, 0, 2, 7, 60,
- 6, 0, 0, 2, 7, 76, 7, 0, 0, 2, 7, 60,
- 8, 0, 0, 2, 7, 76, 1, 0, 0, 2, 8, 76,
- 3, 0, 0, 2, 8, 68, 4, 0, 0, 2, 8, 76,
- 5, 0, 0, 2, 8, 60, 6, 0, 0, 2, 8, 68,
- 7, 0, 0, 2, 8, 60, 8, 0, 0, 2, 8, 68,
- 1, 0, 0, 2, 9, 76, 3, 0, 0, 2, 9, 64,
- 4, 0, 0, 2, 9, 76, 5, 0, 0, 2, 9, 60,
- 6, 0, 0, 2, 9, 64, 7, 0, 0, 2, 9, 60,
- 8, 0, 0, 2, 9, 64, 1, 0, 0, 2, 10, 76,
- 3, 0, 0, 2, 10, 60, 4, 0, 0, 2, 10, 76,
- 5, 0, 0, 2, 10, 60, 6, 0, 0, 2, 10, 60,
- 7, 0, 0, 2, 10, 60, 8, 0, 0, 2, 10, 60,
- 1, 0, 0, 2, 11, 76, 3, 0, 0, 2, 11, 52,
- 4, 0, 0, 2, 11, 76, 5, 0, 0, 2, 11, 60,
- 6, 0, 0, 2, 11, 52, 7, 0, 0, 2, 11, 60,
- 8, 0, 0, 2, 11, 52, 1, 0, 0, 2, 12, 76,
- 3, 0, 0, 2, 12, 40, 4, 0, 0, 2, 12, 76,
- 5, 0, 0, 2, 12, 60, 6, 0, 0, 2, 12, 40,
- 7, 0, 0, 2, 12, 60, 8, 0, 0, 2, 12, 40,
- 1, 0, 0, 2, 13, 76, 3, 0, 0, 2, 13, 28,
- 4, 0, 0, 2, 13, 72, 5, 0, 0, 2, 13, 60,
- 6, 0, 0, 2, 13, 28, 7, 0, 0, 2, 13, 60,
- 8, 0, 0, 2, 13, 28, 1, 0, 0, 2, 14, 127,
- 3, 0, 0, 2, 14, 127, 4, 0, 0, 2, 14, 127,
- 5, 0, 0, 2, 14, 127, 6, 0, 0, 2, 14, 127,
- 7, 0, 0, 2, 14, 127, 8, 0, 0, 2, 14, 127,
- 1, 0, 0, 3, 1, 66, 3, 0, 0, 3, 1, 52,
- 4, 0, 0, 3, 1, 68, 5, 0, 0, 3, 1, 36,
- 6, 0, 0, 3, 1, 52, 7, 0, 0, 3, 1, 36,
- 8, 0, 0, 3, 1, 52, 1, 0, 0, 3, 2, 66,
- 3, 0, 0, 3, 2, 60, 4, 0, 0, 3, 2, 70,
- 5, 0, 0, 3, 2, 36, 6, 0, 0, 3, 2, 60,
- 7, 0, 0, 3, 2, 36, 8, 0, 0, 3, 2, 60,
- 1, 0, 0, 3, 3, 66, 3, 0, 0, 3, 3, 64,
- 4, 0, 0, 3, 3, 70, 5, 0, 0, 3, 3, 36,
- 6, 0, 0, 3, 3, 64, 7, 0, 0, 3, 3, 36,
- 8, 0, 0, 3, 3, 64, 1, 0, 0, 3, 4, 66,
- 3, 0, 0, 3, 4, 68, 4, 0, 0, 3, 4, 70,
- 5, 0, 0, 3, 4, 36, 6, 0, 0, 3, 4, 68,
- 7, 0, 0, 3, 4, 36, 8, 0, 0, 3, 4, 68,
- 1, 0, 0, 3, 5, 66, 3, 0, 0, 3, 5, 76,
- 4, 0, 0, 3, 5, 70, 5, 0, 0, 3, 5, 36,
- 6, 0, 0, 3, 5, 76, 7, 0, 0, 3, 5, 36,
- 8, 0, 0, 3, 5, 76, 1, 0, 0, 3, 6, 66,
- 3, 0, 0, 3, 6, 76, 4, 0, 0, 3, 6, 70,
- 5, 0, 0, 3, 6, 36, 6, 0, 0, 3, 6, 76,
- 7, 0, 0, 3, 6, 36, 8, 0, 0, 3, 6, 76,
- 1, 0, 0, 3, 7, 66, 3, 0, 0, 3, 7, 76,
- 4, 0, 0, 3, 7, 70, 5, 0, 0, 3, 7, 36,
- 6, 0, 0, 3, 7, 76, 7, 0, 0, 3, 7, 36,
- 8, 0, 0, 3, 7, 76, 1, 0, 0, 3, 8, 66,
- 3, 0, 0, 3, 8, 68, 4, 0, 0, 3, 8, 70,
- 5, 0, 0, 3, 8, 36, 6, 0, 0, 3, 8, 68,
- 7, 0, 0, 3, 8, 36, 8, 0, 0, 3, 8, 68,
- 1, 0, 0, 3, 9, 66, 3, 0, 0, 3, 9, 64,
- 4, 0, 0, 3, 9, 70, 5, 0, 0, 3, 9, 36,
- 6, 0, 0, 3, 9, 64, 7, 0, 0, 3, 9, 36,
- 8, 0, 0, 3, 9, 64, 1, 0, 0, 3, 10, 66,
- 3, 0, 0, 3, 10, 60, 4, 0, 0, 3, 10, 70,
- 5, 0, 0, 3, 10, 36, 6, 0, 0, 3, 10, 60,
- 7, 0, 0, 3, 10, 36, 8, 0, 0, 3, 10, 60,
- 1, 0, 0, 3, 11, 66, 3, 0, 0, 3, 11, 52,
- 4, 0, 0, 3, 11, 70, 5, 0, 0, 3, 11, 36,
- 6, 0, 0, 3, 11, 52, 7, 0, 0, 3, 11, 36,
- 8, 0, 0, 3, 11, 52, 1, 0, 0, 3, 12, 66,
- 3, 0, 0, 3, 12, 40, 4, 0, 0, 3, 12, 70,
- 5, 0, 0, 3, 12, 36, 6, 0, 0, 3, 12, 40,
- 7, 0, 0, 3, 12, 36, 8, 0, 0, 3, 12, 40,
- 1, 0, 0, 3, 13, 66, 3, 0, 0, 3, 13, 28,
- 4, 0, 0, 3, 13, 62, 5, 0, 0, 3, 13, 36,
- 6, 0, 0, 3, 13, 28, 7, 0, 0, 3, 13, 36,
- 8, 0, 0, 3, 13, 28, 1, 0, 0, 3, 14, 127,
- 3, 0, 0, 3, 14, 127, 4, 0, 0, 3, 14, 127,
- 5, 0, 0, 3, 14, 127, 6, 0, 0, 3, 14, 127,
- 7, 0, 0, 3, 14, 127, 8, 0, 0, 3, 14, 127,
- 1, 0, 1, 2, 1, 127, 3, 0, 1, 2, 1, 127,
- 4, 0, 1, 2, 1, 127, 5, 0, 1, 2, 1, 127,
- 6, 0, 1, 2, 1, 127, 7, 0, 1, 2, 1, 127,
- 8, 0, 1, 2, 1, 127, 1, 0, 1, 2, 2, 127,
- 3, 0, 1, 2, 2, 127, 4, 0, 1, 2, 2, 127,
- 5, 0, 1, 2, 2, 127, 6, 0, 1, 2, 2, 127,
- 7, 0, 1, 2, 2, 127, 8, 0, 1, 2, 2, 127,
- 1, 0, 1, 2, 3, 72, 3, 0, 1, 2, 3, 52,
- 4, 0, 1, 2, 3, 72, 5, 0, 1, 2, 3, 60,
- 6, 0, 1, 2, 3, 52, 7, 0, 1, 2, 3, 60,
- 8, 0, 1, 2, 3, 52, 1, 0, 1, 2, 4, 72,
- 3, 0, 1, 2, 4, 52, 4, 0, 1, 2, 4, 72,
- 5, 0, 1, 2, 4, 60, 6, 0, 1, 2, 4, 52,
- 7, 0, 1, 2, 4, 60, 8, 0, 1, 2, 4, 52,
- 1, 0, 1, 2, 5, 72, 3, 0, 1, 2, 5, 60,
- 4, 0, 1, 2, 5, 72, 5, 0, 1, 2, 5, 60,
- 6, 0, 1, 2, 5, 60, 7, 0, 1, 2, 5, 60,
- 8, 0, 1, 2, 5, 60, 1, 0, 1, 2, 6, 72,
- 3, 0, 1, 2, 6, 64, 4, 0, 1, 2, 6, 72,
- 5, 0, 1, 2, 6, 60, 6, 0, 1, 2, 6, 64,
- 7, 0, 1, 2, 6, 60, 8, 0, 1, 2, 6, 64,
- 1, 0, 1, 2, 7, 72, 3, 0, 1, 2, 7, 60,
- 4, 0, 1, 2, 7, 72, 5, 0, 1, 2, 7, 60,
- 6, 0, 1, 2, 7, 60, 7, 0, 1, 2, 7, 60,
- 8, 0, 1, 2, 7, 60, 1, 0, 1, 2, 8, 72,
- 3, 0, 1, 2, 8, 52, 4, 0, 1, 2, 8, 72,
- 5, 0, 1, 2, 8, 60, 6, 0, 1, 2, 8, 52,
- 7, 0, 1, 2, 8, 60, 8, 0, 1, 2, 8, 52,
- 1, 0, 1, 2, 9, 72, 3, 0, 1, 2, 9, 52,
- 4, 0, 1, 2, 9, 72, 5, 0, 1, 2, 9, 60,
- 6, 0, 1, 2, 9, 52, 7, 0, 1, 2, 9, 60,
- 8, 0, 1, 2, 9, 52, 1, 0, 1, 2, 10, 72,
- 3, 0, 1, 2, 10, 40, 4, 0, 1, 2, 10, 72,
- 5, 0, 1, 2, 10, 60, 6, 0, 1, 2, 10, 40,
- 7, 0, 1, 2, 10, 60, 8, 0, 1, 2, 10, 40,
- 1, 0, 1, 2, 11, 72, 3, 0, 1, 2, 11, 28,
- 4, 0, 1, 2, 11, 70, 5, 0, 1, 2, 11, 60,
- 6, 0, 1, 2, 11, 28, 7, 0, 1, 2, 11, 60,
- 8, 0, 1, 2, 11, 28, 1, 0, 1, 2, 12, 127,
- 3, 0, 1, 2, 12, 127, 4, 0, 1, 2, 12, 127,
- 5, 0, 1, 2, 12, 127, 6, 0, 1, 2, 12, 127,
- 7, 0, 1, 2, 12, 127, 8, 0, 1, 2, 12, 127,
- 1, 0, 1, 2, 13, 127, 3, 0, 1, 2, 13, 127,
- 4, 0, 1, 2, 13, 127, 5, 0, 1, 2, 13, 127,
- 6, 0, 1, 2, 13, 127, 7, 0, 1, 2, 13, 127,
- 8, 0, 1, 2, 13, 127, 1, 0, 1, 2, 14, 127,
- 3, 0, 1, 2, 14, 127, 4, 0, 1, 2, 14, 127,
- 5, 0, 1, 2, 14, 127, 6, 0, 1, 2, 14, 127,
- 7, 0, 1, 2, 14, 127, 8, 0, 1, 2, 14, 127,
- 1, 0, 1, 3, 1, 127, 3, 0, 1, 3, 1, 127,
- 4, 0, 1, 3, 1, 127, 5, 0, 1, 3, 1, 127,
- 6, 0, 1, 3, 1, 127, 7, 0, 1, 3, 1, 127,
- 8, 0, 1, 3, 1, 127, 1, 0, 1, 3, 2, 127,
- 3, 0, 1, 3, 2, 127, 4, 0, 1, 3, 2, 127,
- 5, 0, 1, 3, 2, 127, 6, 0, 1, 3, 2, 127,
- 7, 0, 1, 3, 2, 127, 8, 0, 1, 3, 2, 127,
- 1, 0, 1, 3, 3, 66, 3, 0, 1, 3, 3, 48,
- 4, 0, 1, 3, 3, 66, 5, 0, 1, 3, 3, 36,
- 6, 0, 1, 3, 3, 48, 7, 0, 1, 3, 3, 36,
- 8, 0, 1, 3, 3, 48, 1, 0, 1, 3, 4, 66,
- 3, 0, 1, 3, 4, 48, 4, 0, 1, 3, 4, 70,
- 5, 0, 1, 3, 4, 36, 6, 0, 1, 3, 4, 48,
- 7, 0, 1, 3, 4, 36, 8, 0, 1, 3, 4, 48,
- 1, 0, 1, 3, 5, 66, 3, 0, 1, 3, 5, 60,
- 4, 0, 1, 3, 5, 70, 5, 0, 1, 3, 5, 36,
- 6, 0, 1, 3, 5, 60, 7, 0, 1, 3, 5, 36,
- 8, 0, 1, 3, 5, 60, 1, 0, 1, 3, 6, 66,
- 3, 0, 1, 3, 6, 64, 4, 0, 1, 3, 6, 70,
- 5, 0, 1, 3, 6, 36, 6, 0, 1, 3, 6, 64,
- 7, 0, 1, 3, 6, 36, 8, 0, 1, 3, 6, 64,
- 1, 0, 1, 3, 7, 66, 3, 0, 1, 3, 7, 60,
- 4, 0, 1, 3, 7, 70, 5, 0, 1, 3, 7, 36,
- 6, 0, 1, 3, 7, 60, 7, 0, 1, 3, 7, 36,
- 8, 0, 1, 3, 7, 60, 1, 0, 1, 3, 8, 66,
- 3, 0, 1, 3, 8, 52, 4, 0, 1, 3, 8, 70,
- 5, 0, 1, 3, 8, 36, 6, 0, 1, 3, 8, 52,
- 7, 0, 1, 3, 8, 36, 8, 0, 1, 3, 8, 52,
- 1, 0, 1, 3, 9, 66, 3, 0, 1, 3, 9, 52,
- 4, 0, 1, 3, 9, 70, 5, 0, 1, 3, 9, 36,
- 6, 0, 1, 3, 9, 52, 7, 0, 1, 3, 9, 36,
- 8, 0, 1, 3, 9, 52, 1, 0, 1, 3, 10, 66,
- 3, 0, 1, 3, 10, 40, 4, 0, 1, 3, 10, 70,
- 5, 0, 1, 3, 10, 36, 6, 0, 1, 3, 10, 40,
- 7, 0, 1, 3, 10, 36, 8, 0, 1, 3, 10, 40,
- 1, 0, 1, 3, 11, 66, 3, 0, 1, 3, 11, 26,
- 4, 0, 1, 3, 11, 66, 5, 0, 1, 3, 11, 36,
- 6, 0, 1, 3, 11, 26, 7, 0, 1, 3, 11, 36,
- 8, 0, 1, 3, 11, 26, 1, 0, 1, 3, 12, 127,
- 3, 0, 1, 3, 12, 127, 4, 0, 1, 3, 12, 127,
- 5, 0, 1, 3, 12, 127, 6, 0, 1, 3, 12, 127,
- 7, 0, 1, 3, 12, 127, 8, 0, 1, 3, 12, 127,
- 1, 0, 1, 3, 13, 127, 3, 0, 1, 3, 13, 127,
- 4, 0, 1, 3, 13, 127, 5, 0, 1, 3, 13, 127,
- 6, 0, 1, 3, 13, 127, 7, 0, 1, 3, 13, 127,
- 8, 0, 1, 3, 13, 127, 1, 0, 1, 3, 14, 127,
- 3, 0, 1, 3, 14, 127, 4, 0, 1, 3, 14, 127,
- 5, 0, 1, 3, 14, 127, 6, 0, 1, 3, 14, 127,
- 7, 0, 1, 3, 14, 127, 8, 0, 1, 3, 14, 127,
- 1, 1, 0, 1, 36, 60, 3, 1, 0, 1, 36, 62,
- 4, 1, 0, 1, 36, 76, 5, 1, 0, 1, 36, 62,
- 6, 1, 0, 1, 36, 64, 7, 1, 0, 1, 36, 54,
- 8, 1, 0, 1, 36, 62, 1, 1, 0, 1, 40, 62,
- 3, 1, 0, 1, 40, 62, 4, 1, 0, 1, 40, 76,
- 5, 1, 0, 1, 40, 62, 6, 1, 0, 1, 40, 64,
- 7, 1, 0, 1, 40, 54, 8, 1, 0, 1, 40, 62,
- 1, 1, 0, 1, 44, 62, 3, 1, 0, 1, 44, 62,
- 4, 1, 0, 1, 44, 76, 5, 1, 0, 1, 44, 62,
- 6, 1, 0, 1, 44, 64, 7, 1, 0, 1, 44, 54,
- 8, 1, 0, 1, 44, 62, 1, 1, 0, 1, 48, 62,
- 3, 1, 0, 1, 48, 62, 4, 1, 0, 1, 48, 76,
- 5, 1, 0, 1, 48, 62, 6, 1, 0, 1, 48, 64,
- 7, 1, 0, 1, 48, 54, 8, 1, 0, 1, 48, 62,
- 1, 1, 0, 1, 52, 62, 3, 1, 0, 1, 52, 64,
- 4, 1, 0, 1, 52, 76, 5, 1, 0, 1, 52, 62,
- 6, 1, 0, 1, 52, 76, 7, 1, 0, 1, 52, 54,
- 8, 1, 0, 1, 52, 76, 1, 1, 0, 1, 56, 62,
- 3, 1, 0, 1, 56, 64, 4, 1, 0, 1, 56, 76,
- 5, 1, 0, 1, 56, 62, 6, 1, 0, 1, 56, 76,
- 7, 1, 0, 1, 56, 54, 8, 1, 0, 1, 56, 76,
- 1, 1, 0, 1, 60, 62, 3, 1, 0, 1, 60, 64,
- 4, 1, 0, 1, 60, 76, 5, 1, 0, 1, 60, 62,
- 6, 1, 0, 1, 60, 76, 7, 1, 0, 1, 60, 54,
- 8, 1, 0, 1, 60, 76, 1, 1, 0, 1, 64, 60,
- 3, 1, 0, 1, 64, 64, 4, 1, 0, 1, 64, 76,
- 5, 1, 0, 1, 64, 62, 6, 1, 0, 1, 64, 74,
- 7, 1, 0, 1, 64, 54, 8, 1, 0, 1, 64, 74,
- 1, 1, 0, 1, 100, 76, 3, 1, 0, 1, 100, 72,
- 4, 1, 0, 1, 100, 76, 5, 1, 0, 1, 100, 62,
- 6, 1, 0, 1, 100, 72, 7, 1, 0, 1, 100, 54,
- 8, 1, 0, 1, 100, 72, 1, 1, 0, 1, 104, 76,
- 3, 1, 0, 1, 104, 76, 4, 1, 0, 1, 104, 76,
- 5, 1, 0, 1, 104, 62, 6, 1, 0, 1, 104, 76,
- 7, 1, 0, 1, 104, 54, 8, 1, 0, 1, 104, 76,
- 1, 1, 0, 1, 108, 76, 3, 1, 0, 1, 108, 76,
- 4, 1, 0, 1, 108, 76, 5, 1, 0, 1, 108, 62,
- 6, 1, 0, 1, 108, 76, 7, 1, 0, 1, 108, 54,
- 8, 1, 0, 1, 108, 76, 1, 1, 0, 1, 112, 76,
- 3, 1, 0, 1, 112, 76, 4, 1, 0, 1, 112, 76,
- 5, 1, 0, 1, 112, 62, 6, 1, 0, 1, 112, 76,
- 7, 1, 0, 1, 112, 54, 8, 1, 0, 1, 112, 76,
- 1, 1, 0, 1, 116, 76, 3, 1, 0, 1, 116, 76,
- 4, 1, 0, 1, 116, 76, 5, 1, 0, 1, 116, 62,
- 6, 1, 0, 1, 116, 76, 7, 1, 0, 1, 116, 54,
- 8, 1, 0, 1, 116, 76, 1, 1, 0, 1, 120, 76,
- 3, 1, 0, 1, 120, 127, 4, 1, 0, 1, 120, 76,
- 5, 1, 0, 1, 120, 127, 6, 1, 0, 1, 120, 76,
- 7, 1, 0, 1, 120, 54, 8, 1, 0, 1, 120, 76,
- 1, 1, 0, 1, 124, 76, 3, 1, 0, 1, 124, 127,
- 4, 1, 0, 1, 124, 76, 5, 1, 0, 1, 124, 127,
- 6, 1, 0, 1, 124, 76, 7, 1, 0, 1, 124, 54,
- 8, 1, 0, 1, 124, 76, 1, 1, 0, 1, 128, 76,
- 3, 1, 0, 1, 128, 127, 4, 1, 0, 1, 128, 76,
- 5, 1, 0, 1, 128, 127, 6, 1, 0, 1, 128, 76,
- 7, 1, 0, 1, 128, 54, 8, 1, 0, 1, 128, 76,
- 1, 1, 0, 1, 132, 76, 3, 1, 0, 1, 132, 76,
- 4, 1, 0, 1, 132, 76, 5, 1, 0, 1, 132, 62,
- 6, 1, 0, 1, 132, 76, 7, 1, 0, 1, 132, 54,
- 8, 1, 0, 1, 132, 76, 1, 1, 0, 1, 136, 76,
- 3, 1, 0, 1, 136, 76, 4, 1, 0, 1, 136, 76,
- 5, 1, 0, 1, 136, 62, 6, 1, 0, 1, 136, 76,
- 7, 1, 0, 1, 136, 127, 8, 1, 0, 1, 136, 76,
- 1, 1, 0, 1, 140, 76, 3, 1, 0, 1, 140, 72,
- 4, 1, 0, 1, 140, 76, 5, 1, 0, 1, 140, 62,
- 6, 1, 0, 1, 140, 72, 7, 1, 0, 1, 140, 127,
- 8, 1, 0, 1, 140, 72, 1, 1, 0, 1, 144, 127,
- 3, 1, 0, 1, 144, 76, 4, 1, 0, 1, 144, 76,
- 5, 1, 0, 1, 144, 127, 6, 1, 0, 1, 144, 76,
- 7, 1, 0, 1, 144, 127, 8, 1, 0, 1, 144, 76,
- 1, 1, 0, 1, 149, 127, 3, 1, 0, 1, 149, 76,
- 4, 1, 0, 1, 149, 74, 5, 1, 0, 1, 149, 76,
- 6, 1, 0, 1, 149, 76, 7, 1, 0, 1, 149, 54,
- 8, 1, 0, 1, 149, 76, 1, 1, 0, 1, 153, 127,
- 3, 1, 0, 1, 153, 76, 4, 1, 0, 1, 153, 74,
- 5, 1, 0, 1, 153, 76, 6, 1, 0, 1, 153, 76,
- 7, 1, 0, 1, 153, 54, 8, 1, 0, 1, 153, 76,
- 1, 1, 0, 1, 157, 127, 3, 1, 0, 1, 157, 76,
- 4, 1, 0, 1, 157, 74, 5, 1, 0, 1, 157, 76,
- 6, 1, 0, 1, 157, 76, 7, 1, 0, 1, 157, 54,
- 8, 1, 0, 1, 157, 76, 1, 1, 0, 1, 161, 127,
- 3, 1, 0, 1, 161, 76, 4, 1, 0, 1, 161, 74,
- 5, 1, 0, 1, 161, 76, 6, 1, 0, 1, 161, 76,
- 7, 1, 0, 1, 161, 54, 8, 1, 0, 1, 161, 76,
- 1, 1, 0, 1, 165, 127, 3, 1, 0, 1, 165, 76,
- 4, 1, 0, 1, 165, 74, 5, 1, 0, 1, 165, 76,
- 6, 1, 0, 1, 165, 76, 7, 1, 0, 1, 165, 54,
- 8, 1, 0, 1, 165, 76, 1, 1, 0, 2, 36, 62,
- 3, 1, 0, 2, 36, 62, 4, 1, 0, 2, 36, 76,
- 5, 1, 0, 2, 36, 62, 6, 1, 0, 2, 36, 64,
- 7, 1, 0, 2, 36, 54, 8, 1, 0, 2, 36, 62,
- 1, 1, 0, 2, 40, 62, 3, 1, 0, 2, 40, 62,
- 4, 1, 0, 2, 40, 76, 5, 1, 0, 2, 40, 62,
- 6, 1, 0, 2, 40, 64, 7, 1, 0, 2, 40, 54,
- 8, 1, 0, 2, 40, 62, 1, 1, 0, 2, 44, 62,
- 3, 1, 0, 2, 44, 62, 4, 1, 0, 2, 44, 76,
- 5, 1, 0, 2, 44, 62, 6, 1, 0, 2, 44, 64,
- 7, 1, 0, 2, 44, 54, 8, 1, 0, 2, 44, 62,
- 1, 1, 0, 2, 48, 62, 3, 1, 0, 2, 48, 62,
- 4, 1, 0, 2, 48, 76, 5, 1, 0, 2, 48, 62,
- 6, 1, 0, 2, 48, 64, 7, 1, 0, 2, 48, 54,
- 8, 1, 0, 2, 48, 62, 1, 1, 0, 2, 52, 62,
- 3, 1, 0, 2, 52, 64, 4, 1, 0, 2, 52, 76,
- 5, 1, 0, 2, 52, 62, 6, 1, 0, 2, 52, 76,
- 7, 1, 0, 2, 52, 54, 8, 1, 0, 2, 52, 76,
- 1, 1, 0, 2, 56, 62, 3, 1, 0, 2, 56, 64,
- 4, 1, 0, 2, 56, 76, 5, 1, 0, 2, 56, 62,
- 6, 1, 0, 2, 56, 76, 7, 1, 0, 2, 56, 54,
- 8, 1, 0, 2, 56, 76, 1, 1, 0, 2, 60, 62,
- 3, 1, 0, 2, 60, 64, 4, 1, 0, 2, 60, 76,
- 5, 1, 0, 2, 60, 62, 6, 1, 0, 2, 60, 76,
- 7, 1, 0, 2, 60, 54, 8, 1, 0, 2, 60, 76,
- 1, 1, 0, 2, 64, 60, 3, 1, 0, 2, 64, 64,
- 4, 1, 0, 2, 64, 74, 5, 1, 0, 2, 64, 62,
- 6, 1, 0, 2, 64, 74, 7, 1, 0, 2, 64, 54,
- 8, 1, 0, 2, 64, 74, 1, 1, 0, 2, 100, 76,
- 3, 1, 0, 2, 100, 70, 4, 1, 0, 2, 100, 76,
- 5, 1, 0, 2, 100, 62, 6, 1, 0, 2, 100, 70,
- 7, 1, 0, 2, 100, 54, 8, 1, 0, 2, 100, 70,
- 1, 1, 0, 2, 104, 76, 3, 1, 0, 2, 104, 76,
- 4, 1, 0, 2, 104, 76, 5, 1, 0, 2, 104, 62,
- 6, 1, 0, 2, 104, 76, 7, 1, 0, 2, 104, 54,
- 8, 1, 0, 2, 104, 76, 1, 1, 0, 2, 108, 76,
- 3, 1, 0, 2, 108, 76, 4, 1, 0, 2, 108, 76,
- 5, 1, 0, 2, 108, 62, 6, 1, 0, 2, 108, 76,
- 7, 1, 0, 2, 108, 54, 8, 1, 0, 2, 108, 76,
- 1, 1, 0, 2, 112, 76, 3, 1, 0, 2, 112, 76,
- 4, 1, 0, 2, 112, 76, 5, 1, 0, 2, 112, 62,
- 6, 1, 0, 2, 112, 76, 7, 1, 0, 2, 112, 54,
- 8, 1, 0, 2, 112, 76, 1, 1, 0, 2, 116, 76,
- 3, 1, 0, 2, 116, 76, 4, 1, 0, 2, 116, 76,
- 5, 1, 0, 2, 116, 62, 6, 1, 0, 2, 116, 76,
- 7, 1, 0, 2, 116, 54, 8, 1, 0, 2, 116, 76,
- 1, 1, 0, 2, 120, 76, 3, 1, 0, 2, 120, 127,
- 4, 1, 0, 2, 120, 76, 5, 1, 0, 2, 120, 127,
- 6, 1, 0, 2, 120, 76, 7, 1, 0, 2, 120, 54,
- 8, 1, 0, 2, 120, 76, 1, 1, 0, 2, 124, 76,
- 3, 1, 0, 2, 124, 127, 4, 1, 0, 2, 124, 76,
- 5, 1, 0, 2, 124, 127, 6, 1, 0, 2, 124, 76,
- 7, 1, 0, 2, 124, 54, 8, 1, 0, 2, 124, 76,
- 1, 1, 0, 2, 128, 76, 3, 1, 0, 2, 128, 127,
- 4, 1, 0, 2, 128, 76, 5, 1, 0, 2, 128, 127,
- 6, 1, 0, 2, 128, 76, 7, 1, 0, 2, 128, 54,
- 8, 1, 0, 2, 128, 76, 1, 1, 0, 2, 132, 76,
- 3, 1, 0, 2, 132, 76, 4, 1, 0, 2, 132, 76,
- 5, 1, 0, 2, 132, 62, 6, 1, 0, 2, 132, 76,
- 7, 1, 0, 2, 132, 54, 8, 1, 0, 2, 132, 76,
- 1, 1, 0, 2, 136, 76, 3, 1, 0, 2, 136, 76,
- 4, 1, 0, 2, 136, 76, 5, 1, 0, 2, 136, 62,
- 6, 1, 0, 2, 136, 76, 7, 1, 0, 2, 136, 127,
- 8, 1, 0, 2, 136, 76, 1, 1, 0, 2, 140, 76,
- 3, 1, 0, 2, 140, 70, 4, 1, 0, 2, 140, 76,
- 5, 1, 0, 2, 140, 62, 6, 1, 0, 2, 140, 70,
- 7, 1, 0, 2, 140, 127, 8, 1, 0, 2, 140, 70,
- 1, 1, 0, 2, 144, 127, 3, 1, 0, 2, 144, 76,
- 4, 1, 0, 2, 144, 76, 5, 1, 0, 2, 144, 127,
- 6, 1, 0, 2, 144, 76, 7, 1, 0, 2, 144, 127,
- 8, 1, 0, 2, 144, 76, 1, 1, 0, 2, 149, 127,
- 3, 1, 0, 2, 149, 76, 4, 1, 0, 2, 149, 74,
- 5, 1, 0, 2, 149, 76, 6, 1, 0, 2, 149, 76,
- 7, 1, 0, 2, 149, 54, 8, 1, 0, 2, 149, 76,
- 1, 1, 0, 2, 153, 127, 3, 1, 0, 2, 153, 76,
- 4, 1, 0, 2, 153, 74, 5, 1, 0, 2, 153, 76,
- 6, 1, 0, 2, 153, 76, 7, 1, 0, 2, 153, 54,
- 8, 1, 0, 2, 153, 76, 1, 1, 0, 2, 157, 127,
- 3, 1, 0, 2, 157, 76, 4, 1, 0, 2, 157, 74,
- 5, 1, 0, 2, 157, 76, 6, 1, 0, 2, 157, 76,
- 7, 1, 0, 2, 157, 54, 8, 1, 0, 2, 157, 76,
- 1, 1, 0, 2, 161, 127, 3, 1, 0, 2, 161, 76,
- 4, 1, 0, 2, 161, 74, 5, 1, 0, 2, 161, 76,
- 6, 1, 0, 2, 161, 76, 7, 1, 0, 2, 161, 54,
- 8, 1, 0, 2, 161, 76, 1, 1, 0, 2, 165, 127,
- 3, 1, 0, 2, 165, 76, 4, 1, 0, 2, 165, 74,
- 5, 1, 0, 2, 165, 76, 6, 1, 0, 2, 165, 76,
- 7, 1, 0, 2, 165, 54, 8, 1, 0, 2, 165, 76,
- 1, 1, 0, 3, 36, 50, 3, 1, 0, 3, 36, 38,
- 4, 1, 0, 3, 36, 66, 5, 1, 0, 3, 36, 38,
- 6, 1, 0, 3, 36, 52, 7, 1, 0, 3, 36, 30,
- 8, 1, 0, 3, 36, 50, 1, 1, 0, 3, 40, 50,
- 3, 1, 0, 3, 40, 38, 4, 1, 0, 3, 40, 66,
- 5, 1, 0, 3, 40, 38, 6, 1, 0, 3, 40, 52,
- 7, 1, 0, 3, 40, 30, 8, 1, 0, 3, 40, 50,
- 1, 1, 0, 3, 44, 50, 3, 1, 0, 3, 44, 38,
- 4, 1, 0, 3, 44, 66, 5, 1, 0, 3, 44, 38,
- 6, 1, 0, 3, 44, 52, 7, 1, 0, 3, 44, 30,
- 8, 1, 0, 3, 44, 50, 1, 1, 0, 3, 48, 50,
- 3, 1, 0, 3, 48, 38, 4, 1, 0, 3, 48, 66,
- 5, 1, 0, 3, 48, 38, 6, 1, 0, 3, 48, 52,
- 7, 1, 0, 3, 48, 30, 8, 1, 0, 3, 48, 50,
- 1, 1, 0, 3, 52, 50, 3, 1, 0, 3, 52, 40,
- 4, 1, 0, 3, 52, 66, 5, 1, 0, 3, 52, 38,
- 6, 1, 0, 3, 52, 68, 7, 1, 0, 3, 52, 30,
- 8, 1, 0, 3, 52, 68, 1, 1, 0, 3, 56, 50,
- 3, 1, 0, 3, 56, 40, 4, 1, 0, 3, 56, 66,
- 5, 1, 0, 3, 56, 38, 6, 1, 0, 3, 56, 68,
- 7, 1, 0, 3, 56, 30, 8, 1, 0, 3, 56, 68,
- 1, 1, 0, 3, 60, 50, 3, 1, 0, 3, 60, 40,
- 4, 1, 0, 3, 60, 66, 5, 1, 0, 3, 60, 38,
- 6, 1, 0, 3, 60, 66, 7, 1, 0, 3, 60, 30,
- 8, 1, 0, 3, 60, 66, 1, 1, 0, 3, 64, 50,
- 3, 1, 0, 3, 64, 40, 4, 1, 0, 3, 64, 66,
- 5, 1, 0, 3, 64, 38, 6, 1, 0, 3, 64, 68,
- 7, 1, 0, 3, 64, 30, 8, 1, 0, 3, 64, 68,
- 1, 1, 0, 3, 100, 70, 3, 1, 0, 3, 100, 60,
- 4, 1, 0, 3, 100, 64, 5, 1, 0, 3, 100, 38,
- 6, 1, 0, 3, 100, 60, 7, 1, 0, 3, 100, 30,
- 8, 1, 0, 3, 100, 60, 1, 1, 0, 3, 104, 70,
- 3, 1, 0, 3, 104, 68, 4, 1, 0, 3, 104, 64,
- 5, 1, 0, 3, 104, 38, 6, 1, 0, 3, 104, 68,
- 7, 1, 0, 3, 104, 30, 8, 1, 0, 3, 104, 68,
- 1, 1, 0, 3, 108, 70, 3, 1, 0, 3, 108, 68,
- 4, 1, 0, 3, 108, 64, 5, 1, 0, 3, 108, 38,
- 6, 1, 0, 3, 108, 68, 7, 1, 0, 3, 108, 30,
- 8, 1, 0, 3, 108, 68, 1, 1, 0, 3, 112, 70,
- 3, 1, 0, 3, 112, 68, 4, 1, 0, 3, 112, 64,
- 5, 1, 0, 3, 112, 38, 6, 1, 0, 3, 112, 68,
- 7, 1, 0, 3, 112, 30, 8, 1, 0, 3, 112, 68,
- 1, 1, 0, 3, 116, 70, 3, 1, 0, 3, 116, 68,
- 4, 1, 0, 3, 116, 64, 5, 1, 0, 3, 116, 38,
- 6, 1, 0, 3, 116, 68, 7, 1, 0, 3, 116, 30,
- 8, 1, 0, 3, 116, 68, 1, 1, 0, 3, 120, 70,
- 3, 1, 0, 3, 120, 127, 4, 1, 0, 3, 120, 64,
- 5, 1, 0, 3, 120, 127, 6, 1, 0, 3, 120, 68,
- 7, 1, 0, 3, 120, 30, 8, 1, 0, 3, 120, 68,
- 1, 1, 0, 3, 124, 70, 3, 1, 0, 3, 124, 127,
- 4, 1, 0, 3, 124, 64, 5, 1, 0, 3, 124, 127,
- 6, 1, 0, 3, 124, 68, 7, 1, 0, 3, 124, 30,
- 8, 1, 0, 3, 124, 68, 1, 1, 0, 3, 128, 70,
- 3, 1, 0, 3, 128, 127, 4, 1, 0, 3, 128, 64,
- 5, 1, 0, 3, 128, 127, 6, 1, 0, 3, 128, 68,
- 7, 1, 0, 3, 128, 30, 8, 1, 0, 3, 128, 68,
- 1, 1, 0, 3, 132, 70, 3, 1, 0, 3, 132, 68,
- 4, 1, 0, 3, 132, 64, 5, 1, 0, 3, 132, 38,
- 6, 1, 0, 3, 132, 68, 7, 1, 0, 3, 132, 30,
- 8, 1, 0, 3, 132, 68, 1, 1, 0, 3, 136, 70,
- 3, 1, 0, 3, 136, 68, 4, 1, 0, 3, 136, 64,
- 5, 1, 0, 3, 136, 38, 6, 1, 0, 3, 136, 68,
- 7, 1, 0, 3, 136, 127, 8, 1, 0, 3, 136, 68,
- 1, 1, 0, 3, 140, 70, 3, 1, 0, 3, 140, 60,
- 4, 1, 0, 3, 140, 64, 5, 1, 0, 3, 140, 38,
- 6, 1, 0, 3, 140, 60, 7, 1, 0, 3, 140, 127,
- 8, 1, 0, 3, 140, 60, 1, 1, 0, 3, 144, 127,
- 3, 1, 0, 3, 144, 68, 4, 1, 0, 3, 144, 64,
- 5, 1, 0, 3, 144, 127, 6, 1, 0, 3, 144, 68,
- 7, 1, 0, 3, 144, 127, 8, 1, 0, 3, 144, 68,
- 1, 1, 0, 3, 149, 127, 3, 1, 0, 3, 149, 76,
- 4, 1, 0, 3, 149, 60, 5, 1, 0, 3, 149, 76,
- 6, 1, 0, 3, 149, 76, 7, 1, 0, 3, 149, 30,
- 8, 1, 0, 3, 149, 72, 1, 1, 0, 3, 153, 127,
- 3, 1, 0, 3, 153, 76, 4, 1, 0, 3, 153, 60,
- 5, 1, 0, 3, 153, 76, 6, 1, 0, 3, 153, 76,
- 7, 1, 0, 3, 153, 30, 8, 1, 0, 3, 153, 76,
- 1, 1, 0, 3, 157, 127, 3, 1, 0, 3, 157, 76,
- 4, 1, 0, 3, 157, 60, 5, 1, 0, 3, 157, 76,
- 6, 1, 0, 3, 157, 76, 7, 1, 0, 3, 157, 30,
- 8, 1, 0, 3, 157, 76, 1, 1, 0, 3, 161, 127,
- 3, 1, 0, 3, 161, 76, 4, 1, 0, 3, 161, 60,
- 5, 1, 0, 3, 161, 76, 6, 1, 0, 3, 161, 76,
- 7, 1, 0, 3, 161, 30, 8, 1, 0, 3, 161, 76,
- 1, 1, 0, 3, 165, 127, 3, 1, 0, 3, 165, 76,
- 4, 1, 0, 3, 165, 60, 5, 1, 0, 3, 165, 76,
- 6, 1, 0, 3, 165, 76, 7, 1, 0, 3, 165, 30,
- 8, 1, 0, 3, 165, 76, 1, 1, 1, 2, 38, 62,
- 3, 1, 1, 2, 38, 64, 4, 1, 1, 2, 38, 72,
- 5, 1, 1, 2, 38, 64, 6, 1, 1, 2, 38, 64,
- 7, 1, 1, 2, 38, 54, 8, 1, 1, 2, 38, 62,
- 1, 1, 1, 2, 46, 62, 3, 1, 1, 2, 46, 64,
- 4, 1, 1, 2, 46, 72, 5, 1, 1, 2, 46, 64,
- 6, 1, 1, 2, 46, 64, 7, 1, 1, 2, 46, 54,
- 8, 1, 1, 2, 46, 62, 1, 1, 1, 2, 54, 62,
- 3, 1, 1, 2, 54, 64, 4, 1, 1, 2, 54, 72,
- 5, 1, 1, 2, 54, 64, 6, 1, 1, 2, 54, 72,
- 7, 1, 1, 2, 54, 54, 8, 1, 1, 2, 54, 72,
- 1, 1, 1, 2, 62, 62, 3, 1, 1, 2, 62, 64,
- 4, 1, 1, 2, 62, 70, 5, 1, 1, 2, 62, 64,
- 6, 1, 1, 2, 62, 64, 7, 1, 1, 2, 62, 54,
- 8, 1, 1, 2, 62, 64, 1, 1, 1, 2, 102, 72,
- 3, 1, 1, 2, 102, 58, 4, 1, 1, 2, 102, 72,
- 5, 1, 1, 2, 102, 64, 6, 1, 1, 2, 102, 58,
- 7, 1, 1, 2, 102, 54, 8, 1, 1, 2, 102, 58,
- 1, 1, 1, 2, 110, 72, 3, 1, 1, 2, 110, 72,
- 4, 1, 1, 2, 110, 72, 5, 1, 1, 2, 110, 64,
- 6, 1, 1, 2, 110, 72, 7, 1, 1, 2, 110, 54,
- 8, 1, 1, 2, 110, 72, 1, 1, 1, 2, 118, 72,
- 3, 1, 1, 2, 118, 127, 4, 1, 1, 2, 118, 72,
- 5, 1, 1, 2, 118, 127, 6, 1, 1, 2, 118, 72,
- 7, 1, 1, 2, 118, 54, 8, 1, 1, 2, 118, 72,
- 1, 1, 1, 2, 126, 72, 3, 1, 1, 2, 126, 127,
- 4, 1, 1, 2, 126, 72, 5, 1, 1, 2, 126, 127,
- 6, 1, 1, 2, 126, 72, 7, 1, 1, 2, 126, 54,
- 8, 1, 1, 2, 126, 72, 1, 1, 1, 2, 134, 72,
- 3, 1, 1, 2, 134, 72, 4, 1, 1, 2, 134, 72,
- 5, 1, 1, 2, 134, 64, 6, 1, 1, 2, 134, 72,
- 7, 1, 1, 2, 134, 127, 8, 1, 1, 2, 134, 72,
- 1, 1, 1, 2, 142, 127, 3, 1, 1, 2, 142, 72,
- 4, 1, 1, 2, 142, 72, 5, 1, 1, 2, 142, 127,
- 6, 1, 1, 2, 142, 72, 7, 1, 1, 2, 142, 127,
- 8, 1, 1, 2, 142, 72, 1, 1, 1, 2, 151, 127,
- 3, 1, 1, 2, 151, 72, 4, 1, 1, 2, 151, 72,
- 5, 1, 1, 2, 151, 72, 6, 1, 1, 2, 151, 72,
- 7, 1, 1, 2, 151, 54, 8, 1, 1, 2, 151, 72,
- 1, 1, 1, 2, 159, 127, 3, 1, 1, 2, 159, 72,
- 4, 1, 1, 2, 159, 72, 5, 1, 1, 2, 159, 72,
- 6, 1, 1, 2, 159, 72, 7, 1, 1, 2, 159, 54,
- 8, 1, 1, 2, 159, 72, 1, 1, 1, 3, 38, 50,
- 3, 1, 1, 3, 38, 40, 4, 1, 1, 3, 38, 62,
- 5, 1, 1, 3, 38, 40, 6, 1, 1, 3, 38, 52,
- 7, 1, 1, 3, 38, 30, 8, 1, 1, 3, 38, 50,
- 1, 1, 1, 3, 46, 50, 3, 1, 1, 3, 46, 40,
- 4, 1, 1, 3, 46, 62, 5, 1, 1, 3, 46, 40,
- 6, 1, 1, 3, 46, 52, 7, 1, 1, 3, 46, 30,
- 8, 1, 1, 3, 46, 50, 1, 1, 1, 3, 54, 50,
- 3, 1, 1, 3, 54, 40, 4, 1, 1, 3, 54, 62,
- 5, 1, 1, 3, 54, 40, 6, 1, 1, 3, 54, 68,
- 7, 1, 1, 3, 54, 30, 8, 1, 1, 3, 54, 68,
- 1, 1, 1, 3, 62, 48, 3, 1, 1, 3, 62, 40,
- 4, 1, 1, 3, 62, 58, 5, 1, 1, 3, 62, 40,
- 6, 1, 1, 3, 62, 58, 7, 1, 1, 3, 62, 30,
- 8, 1, 1, 3, 62, 58, 1, 1, 1, 3, 102, 70,
- 3, 1, 1, 3, 102, 54, 4, 1, 1, 3, 102, 64,
- 5, 1, 1, 3, 102, 40, 6, 1, 1, 3, 102, 54,
- 7, 1, 1, 3, 102, 30, 8, 1, 1, 3, 102, 54,
- 1, 1, 1, 3, 110, 70, 3, 1, 1, 3, 110, 68,
- 4, 1, 1, 3, 110, 64, 5, 1, 1, 3, 110, 40,
- 6, 1, 1, 3, 110, 68, 7, 1, 1, 3, 110, 30,
- 8, 1, 1, 3, 110, 68, 1, 1, 1, 3, 118, 70,
- 3, 1, 1, 3, 118, 127, 4, 1, 1, 3, 118, 64,
- 5, 1, 1, 3, 118, 127, 6, 1, 1, 3, 118, 68,
- 7, 1, 1, 3, 118, 30, 8, 1, 1, 3, 118, 68,
- 1, 1, 1, 3, 126, 70, 3, 1, 1, 3, 126, 127,
- 4, 1, 1, 3, 126, 64, 5, 1, 1, 3, 126, 127,
- 6, 1, 1, 3, 126, 68, 7, 1, 1, 3, 126, 30,
- 8, 1, 1, 3, 126, 68, 1, 1, 1, 3, 134, 70,
- 3, 1, 1, 3, 134, 68, 4, 1, 1, 3, 134, 64,
- 5, 1, 1, 3, 134, 40, 6, 1, 1, 3, 134, 68,
- 7, 1, 1, 3, 134, 127, 8, 1, 1, 3, 134, 68,
- 1, 1, 1, 3, 142, 127, 3, 1, 1, 3, 142, 68,
- 4, 1, 1, 3, 142, 64, 5, 1, 1, 3, 142, 127,
- 6, 1, 1, 3, 142, 68, 7, 1, 1, 3, 142, 127,
- 8, 1, 1, 3, 142, 68, 1, 1, 1, 3, 151, 127,
- 3, 1, 1, 3, 151, 72, 4, 1, 1, 3, 151, 66,
- 5, 1, 1, 3, 151, 72, 6, 1, 1, 3, 151, 72,
- 7, 1, 1, 3, 151, 30, 8, 1, 1, 3, 151, 68,
- 1, 1, 1, 3, 159, 127, 3, 1, 1, 3, 159, 72,
- 4, 1, 1, 3, 159, 66, 5, 1, 1, 3, 159, 72,
- 6, 1, 1, 3, 159, 72, 7, 1, 1, 3, 159, 30,
- 8, 1, 1, 3, 159, 72, 1, 1, 2, 4, 42, 64,
- 3, 1, 2, 4, 42, 64, 4, 1, 2, 4, 42, 68,
- 5, 1, 2, 4, 42, 64, 6, 1, 2, 4, 42, 64,
- 7, 1, 2, 4, 42, 54, 8, 1, 2, 4, 42, 62,
- 1, 1, 2, 4, 58, 64, 3, 1, 2, 4, 58, 62,
- 4, 1, 2, 4, 58, 64, 5, 1, 2, 4, 58, 64,
- 6, 1, 2, 4, 58, 62, 7, 1, 2, 4, 58, 54,
- 8, 1, 2, 4, 58, 62, 1, 1, 2, 4, 106, 72,
- 3, 1, 2, 4, 106, 58, 4, 1, 2, 4, 106, 66,
- 5, 1, 2, 4, 106, 64, 6, 1, 2, 4, 106, 58,
- 7, 1, 2, 4, 106, 54, 8, 1, 2, 4, 106, 58,
- 1, 1, 2, 4, 122, 72, 3, 1, 2, 4, 122, 127,
- 4, 1, 2, 4, 122, 68, 5, 1, 2, 4, 122, 127,
- 6, 1, 2, 4, 122, 72, 7, 1, 2, 4, 122, 54,
- 8, 1, 2, 4, 122, 72, 1, 1, 2, 4, 138, 127,
- 3, 1, 2, 4, 138, 72, 4, 1, 2, 4, 138, 68,
- 5, 1, 2, 4, 138, 127, 6, 1, 2, 4, 138, 72,
- 7, 1, 2, 4, 138, 127, 8, 1, 2, 4, 138, 72,
- 1, 1, 2, 4, 155, 127, 3, 1, 2, 4, 155, 72,
- 4, 1, 2, 4, 155, 68, 5, 1, 2, 4, 155, 72,
- 6, 1, 2, 4, 155, 72, 7, 1, 2, 4, 155, 54,
- 8, 1, 2, 4, 155, 68, 1, 1, 2, 5, 42, 50,
- 3, 1, 2, 5, 42, 40, 4, 1, 2, 5, 42, 58,
- 5, 1, 2, 5, 42, 40, 6, 1, 2, 5, 42, 52,
- 7, 1, 2, 5, 42, 30, 8, 1, 2, 5, 42, 50,
- 1, 1, 2, 5, 58, 50, 3, 1, 2, 5, 58, 40,
- 4, 1, 2, 5, 58, 56, 5, 1, 2, 5, 58, 40,
- 6, 1, 2, 5, 58, 52, 7, 1, 2, 5, 58, 30,
- 8, 1, 2, 5, 58, 52, 1, 1, 2, 5, 106, 72,
- 3, 1, 2, 5, 106, 50, 4, 1, 2, 5, 106, 56,
- 5, 1, 2, 5, 106, 40, 6, 1, 2, 5, 106, 50,
- 7, 1, 2, 5, 106, 30, 8, 1, 2, 5, 106, 50,
- 1, 1, 2, 5, 122, 72, 3, 1, 2, 5, 122, 127,
- 4, 1, 2, 5, 122, 56, 5, 1, 2, 5, 122, 127,
- 6, 1, 2, 5, 122, 66, 7, 1, 2, 5, 122, 30,
- 8, 1, 2, 5, 122, 66, 1, 1, 2, 5, 138, 127,
- 3, 1, 2, 5, 138, 66, 4, 1, 2, 5, 138, 58,
- 5, 1, 2, 5, 138, 127, 6, 1, 2, 5, 138, 66,
- 7, 1, 2, 5, 138, 127, 8, 1, 2, 5, 138, 66,
- 1, 1, 2, 5, 155, 127, 3, 1, 2, 5, 155, 62,
- 4, 1, 2, 5, 155, 58, 5, 1, 2, 5, 155, 72,
- 6, 1, 2, 5, 155, 62, 7, 1, 2, 5, 155, 30,
- 8, 1, 2, 5, 155, 62
+static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
+ { 0, 0, 0, 0, 1, 72, },
+ { 2, 0, 0, 0, 1, 60, },
+ { 0, 0, 0, 0, 2, 72, },
+ { 2, 0, 0, 0, 2, 60, },
+ { 0, 0, 0, 0, 3, 76, },
+ { 2, 0, 0, 0, 3, 60, },
+ { 0, 0, 0, 0, 4, 76, },
+ { 2, 0, 0, 0, 4, 60, },
+ { 0, 0, 0, 0, 5, 76, },
+ { 2, 0, 0, 0, 5, 60, },
+ { 0, 0, 0, 0, 6, 76, },
+ { 2, 0, 0, 0, 6, 60, },
+ { 0, 0, 0, 0, 7, 76, },
+ { 2, 0, 0, 0, 7, 60, },
+ { 0, 0, 0, 0, 8, 76, },
+ { 2, 0, 0, 0, 8, 60, },
+ { 0, 0, 0, 0, 9, 76, },
+ { 2, 0, 0, 0, 9, 60, },
+ { 0, 0, 0, 0, 10, 72, },
+ { 2, 0, 0, 0, 10, 60, },
+ { 0, 0, 0, 0, 11, 72, },
+ { 2, 0, 0, 0, 11, 60, },
+ { 0, 0, 0, 0, 12, 52, },
+ { 2, 0, 0, 0, 12, 60, },
+ { 0, 0, 0, 0, 13, 48, },
+ { 2, 0, 0, 0, 13, 60, },
+ { 0, 0, 0, 0, 14, 127, },
+ { 2, 0, 0, 0, 14, 127, },
+ { 0, 0, 0, 1, 1, 52, },
+ { 2, 0, 0, 1, 1, 60, },
+ { 0, 0, 0, 1, 2, 60, },
+ { 2, 0, 0, 1, 2, 60, },
+ { 0, 0, 0, 1, 3, 64, },
+ { 2, 0, 0, 1, 3, 60, },
+ { 0, 0, 0, 1, 4, 68, },
+ { 2, 0, 0, 1, 4, 60, },
+ { 0, 0, 0, 1, 5, 76, },
+ { 2, 0, 0, 1, 5, 60, },
+ { 0, 0, 0, 1, 6, 76, },
+ { 2, 0, 0, 1, 6, 60, },
+ { 0, 0, 0, 1, 7, 76, },
+ { 2, 0, 0, 1, 7, 60, },
+ { 0, 0, 0, 1, 8, 68, },
+ { 2, 0, 0, 1, 8, 60, },
+ { 0, 0, 0, 1, 9, 64, },
+ { 2, 0, 0, 1, 9, 60, },
+ { 0, 0, 0, 1, 10, 60, },
+ { 2, 0, 0, 1, 10, 60, },
+ { 0, 0, 0, 1, 11, 52, },
+ { 2, 0, 0, 1, 11, 60, },
+ { 0, 0, 0, 1, 12, 40, },
+ { 2, 0, 0, 1, 12, 60, },
+ { 0, 0, 0, 1, 13, 28, },
+ { 2, 0, 0, 1, 13, 60, },
+ { 0, 0, 0, 1, 14, 127, },
+ { 2, 0, 0, 1, 14, 127, },
+ { 0, 0, 0, 2, 1, 52, },
+ { 2, 0, 0, 2, 1, 60, },
+ { 0, 0, 0, 2, 2, 60, },
+ { 2, 0, 0, 2, 2, 60, },
+ { 0, 0, 0, 2, 3, 64, },
+ { 2, 0, 0, 2, 3, 60, },
+ { 0, 0, 0, 2, 4, 68, },
+ { 2, 0, 0, 2, 4, 60, },
+ { 0, 0, 0, 2, 5, 76, },
+ { 2, 0, 0, 2, 5, 60, },
+ { 0, 0, 0, 2, 6, 76, },
+ { 2, 0, 0, 2, 6, 60, },
+ { 0, 0, 0, 2, 7, 76, },
+ { 2, 0, 0, 2, 7, 60, },
+ { 0, 0, 0, 2, 8, 68, },
+ { 2, 0, 0, 2, 8, 60, },
+ { 0, 0, 0, 2, 9, 64, },
+ { 2, 0, 0, 2, 9, 60, },
+ { 0, 0, 0, 2, 10, 60, },
+ { 2, 0, 0, 2, 10, 60, },
+ { 0, 0, 0, 2, 11, 52, },
+ { 2, 0, 0, 2, 11, 60, },
+ { 0, 0, 0, 2, 12, 40, },
+ { 2, 0, 0, 2, 12, 60, },
+ { 0, 0, 0, 2, 13, 28, },
+ { 2, 0, 0, 2, 13, 60, },
+ { 0, 0, 0, 2, 14, 127, },
+ { 2, 0, 0, 2, 14, 127, },
+ { 0, 0, 0, 3, 1, 52, },
+ { 2, 0, 0, 3, 1, 36, },
+ { 0, 0, 0, 3, 2, 60, },
+ { 2, 0, 0, 3, 2, 36, },
+ { 0, 0, 0, 3, 3, 64, },
+ { 2, 0, 0, 3, 3, 36, },
+ { 0, 0, 0, 3, 4, 68, },
+ { 2, 0, 0, 3, 4, 36, },
+ { 0, 0, 0, 3, 5, 76, },
+ { 2, 0, 0, 3, 5, 36, },
+ { 0, 0, 0, 3, 6, 76, },
+ { 2, 0, 0, 3, 6, 36, },
+ { 0, 0, 0, 3, 7, 76, },
+ { 2, 0, 0, 3, 7, 36, },
+ { 0, 0, 0, 3, 8, 68, },
+ { 2, 0, 0, 3, 8, 36, },
+ { 0, 0, 0, 3, 9, 64, },
+ { 2, 0, 0, 3, 9, 36, },
+ { 0, 0, 0, 3, 10, 60, },
+ { 2, 0, 0, 3, 10, 36, },
+ { 0, 0, 0, 3, 11, 52, },
+ { 2, 0, 0, 3, 11, 36, },
+ { 0, 0, 0, 3, 12, 40, },
+ { 2, 0, 0, 3, 12, 36, },
+ { 0, 0, 0, 3, 13, 28, },
+ { 2, 0, 0, 3, 13, 36, },
+ { 0, 0, 0, 3, 14, 127, },
+ { 2, 0, 0, 3, 14, 127, },
+ { 0, 0, 1, 2, 1, 127, },
+ { 2, 0, 1, 2, 1, 127, },
+ { 0, 0, 1, 2, 2, 127, },
+ { 2, 0, 1, 2, 2, 127, },
+ { 0, 0, 1, 2, 3, 52, },
+ { 2, 0, 1, 2, 3, 60, },
+ { 0, 0, 1, 2, 4, 52, },
+ { 2, 0, 1, 2, 4, 60, },
+ { 0, 0, 1, 2, 5, 60, },
+ { 2, 0, 1, 2, 5, 60, },
+ { 0, 0, 1, 2, 6, 64, },
+ { 2, 0, 1, 2, 6, 60, },
+ { 0, 0, 1, 2, 7, 60, },
+ { 2, 0, 1, 2, 7, 60, },
+ { 0, 0, 1, 2, 8, 52, },
+ { 2, 0, 1, 2, 8, 60, },
+ { 0, 0, 1, 2, 9, 52, },
+ { 2, 0, 1, 2, 9, 60, },
+ { 0, 0, 1, 2, 10, 40, },
+ { 2, 0, 1, 2, 10, 60, },
+ { 0, 0, 1, 2, 11, 28, },
+ { 2, 0, 1, 2, 11, 60, },
+ { 0, 0, 1, 2, 12, 127, },
+ { 2, 0, 1, 2, 12, 127, },
+ { 0, 0, 1, 2, 13, 127, },
+ { 2, 0, 1, 2, 13, 127, },
+ { 0, 0, 1, 2, 14, 127, },
+ { 2, 0, 1, 2, 14, 127, },
+ { 0, 0, 1, 3, 1, 127, },
+ { 2, 0, 1, 3, 1, 127, },
+ { 0, 0, 1, 3, 2, 127, },
+ { 2, 0, 1, 3, 2, 127, },
+ { 0, 0, 1, 3, 3, 48, },
+ { 2, 0, 1, 3, 3, 36, },
+ { 0, 0, 1, 3, 4, 48, },
+ { 2, 0, 1, 3, 4, 36, },
+ { 0, 0, 1, 3, 5, 60, },
+ { 2, 0, 1, 3, 5, 36, },
+ { 0, 0, 1, 3, 6, 64, },
+ { 2, 0, 1, 3, 6, 36, },
+ { 0, 0, 1, 3, 7, 60, },
+ { 2, 0, 1, 3, 7, 36, },
+ { 0, 0, 1, 3, 8, 52, },
+ { 2, 0, 1, 3, 8, 36, },
+ { 0, 0, 1, 3, 9, 52, },
+ { 2, 0, 1, 3, 9, 36, },
+ { 0, 0, 1, 3, 10, 40, },
+ { 2, 0, 1, 3, 10, 36, },
+ { 0, 0, 1, 3, 11, 26, },
+ { 2, 0, 1, 3, 11, 36, },
+ { 0, 0, 1, 3, 12, 127, },
+ { 2, 0, 1, 3, 12, 127, },
+ { 0, 0, 1, 3, 13, 127, },
+ { 2, 0, 1, 3, 13, 127, },
+ { 0, 0, 1, 3, 14, 127, },
+ { 2, 0, 1, 3, 14, 127, },
+ { 0, 1, 0, 1, 36, 74, },
+ { 2, 1, 0, 1, 36, 62, },
+ { 0, 1, 0, 1, 40, 76, },
+ { 2, 1, 0, 1, 40, 62, },
+ { 0, 1, 0, 1, 44, 76, },
+ { 2, 1, 0, 1, 44, 62, },
+ { 0, 1, 0, 1, 48, 76, },
+ { 2, 1, 0, 1, 48, 62, },
+ { 0, 1, 0, 1, 52, 76, },
+ { 2, 1, 0, 1, 52, 62, },
+ { 0, 1, 0, 1, 56, 76, },
+ { 2, 1, 0, 1, 56, 62, },
+ { 0, 1, 0, 1, 60, 76, },
+ { 2, 1, 0, 1, 60, 62, },
+ { 0, 1, 0, 1, 64, 74, },
+ { 2, 1, 0, 1, 64, 62, },
+ { 0, 1, 0, 1, 100, 72, },
+ { 2, 1, 0, 1, 100, 62, },
+ { 0, 1, 0, 1, 104, 76, },
+ { 2, 1, 0, 1, 104, 62, },
+ { 0, 1, 0, 1, 108, 76, },
+ { 2, 1, 0, 1, 108, 62, },
+ { 0, 1, 0, 1, 112, 76, },
+ { 2, 1, 0, 1, 112, 62, },
+ { 0, 1, 0, 1, 116, 76, },
+ { 2, 1, 0, 1, 116, 62, },
+ { 0, 1, 0, 1, 120, 76, },
+ { 2, 1, 0, 1, 120, 62, },
+ { 0, 1, 0, 1, 124, 76, },
+ { 2, 1, 0, 1, 124, 62, },
+ { 0, 1, 0, 1, 128, 76, },
+ { 2, 1, 0, 1, 128, 62, },
+ { 0, 1, 0, 1, 132, 76, },
+ { 2, 1, 0, 1, 132, 62, },
+ { 0, 1, 0, 1, 136, 76, },
+ { 2, 1, 0, 1, 136, 62, },
+ { 0, 1, 0, 1, 140, 72, },
+ { 2, 1, 0, 1, 140, 62, },
+ { 0, 1, 0, 1, 144, 76, },
+ { 2, 1, 0, 1, 144, 127, },
+ { 0, 1, 0, 1, 149, 76, },
+ { 2, 1, 0, 1, 149, -128, },
+ { 0, 1, 0, 1, 153, 76, },
+ { 2, 1, 0, 1, 153, -128, },
+ { 0, 1, 0, 1, 157, 76, },
+ { 2, 1, 0, 1, 157, -128, },
+ { 0, 1, 0, 1, 161, 76, },
+ { 2, 1, 0, 1, 161, -128, },
+ { 0, 1, 0, 1, 165, 76, },
+ { 2, 1, 0, 1, 165, -128, },
+ { 0, 1, 0, 2, 36, 72, },
+ { 2, 1, 0, 2, 36, 62, },
+ { 0, 1, 0, 2, 40, 76, },
+ { 2, 1, 0, 2, 40, 62, },
+ { 0, 1, 0, 2, 44, 76, },
+ { 2, 1, 0, 2, 44, 62, },
+ { 0, 1, 0, 2, 48, 76, },
+ { 2, 1, 0, 2, 48, 62, },
+ { 0, 1, 0, 2, 52, 76, },
+ { 2, 1, 0, 2, 52, 62, },
+ { 0, 1, 0, 2, 56, 76, },
+ { 2, 1, 0, 2, 56, 62, },
+ { 0, 1, 0, 2, 60, 76, },
+ { 2, 1, 0, 2, 60, 62, },
+ { 0, 1, 0, 2, 64, 74, },
+ { 2, 1, 0, 2, 64, 62, },
+ { 0, 1, 0, 2, 100, 70, },
+ { 2, 1, 0, 2, 100, 62, },
+ { 0, 1, 0, 2, 104, 76, },
+ { 2, 1, 0, 2, 104, 62, },
+ { 0, 1, 0, 2, 108, 76, },
+ { 2, 1, 0, 2, 108, 62, },
+ { 0, 1, 0, 2, 112, 76, },
+ { 2, 1, 0, 2, 112, 62, },
+ { 0, 1, 0, 2, 116, 76, },
+ { 2, 1, 0, 2, 116, 62, },
+ { 0, 1, 0, 2, 120, 76, },
+ { 2, 1, 0, 2, 120, 62, },
+ { 0, 1, 0, 2, 124, 76, },
+ { 2, 1, 0, 2, 124, 62, },
+ { 0, 1, 0, 2, 128, 76, },
+ { 2, 1, 0, 2, 128, 62, },
+ { 0, 1, 0, 2, 132, 76, },
+ { 2, 1, 0, 2, 132, 62, },
+ { 0, 1, 0, 2, 136, 76, },
+ { 2, 1, 0, 2, 136, 62, },
+ { 0, 1, 0, 2, 140, 70, },
+ { 2, 1, 0, 2, 140, 62, },
+ { 0, 1, 0, 2, 144, 76, },
+ { 2, 1, 0, 2, 144, 127, },
+ { 0, 1, 0, 2, 149, 76, },
+ { 2, 1, 0, 2, 149, -128, },
+ { 0, 1, 0, 2, 153, 76, },
+ { 2, 1, 0, 2, 153, -128, },
+ { 0, 1, 0, 2, 157, 76, },
+ { 2, 1, 0, 2, 157, -128, },
+ { 0, 1, 0, 2, 161, 76, },
+ { 2, 1, 0, 2, 161, -128, },
+ { 0, 1, 0, 2, 165, 76, },
+ { 2, 1, 0, 2, 165, -128, },
+ { 0, 1, 0, 3, 36, 68, },
+ { 2, 1, 0, 3, 36, 38, },
+ { 0, 1, 0, 3, 40, 68, },
+ { 2, 1, 0, 3, 40, 38, },
+ { 0, 1, 0, 3, 44, 68, },
+ { 2, 1, 0, 3, 44, 38, },
+ { 0, 1, 0, 3, 48, 68, },
+ { 2, 1, 0, 3, 48, 38, },
+ { 0, 1, 0, 3, 52, 68, },
+ { 2, 1, 0, 3, 52, 38, },
+ { 0, 1, 0, 3, 56, 68, },
+ { 2, 1, 0, 3, 56, 38, },
+ { 0, 1, 0, 3, 60, 66, },
+ { 2, 1, 0, 3, 60, 38, },
+ { 0, 1, 0, 3, 64, 68, },
+ { 2, 1, 0, 3, 64, 38, },
+ { 0, 1, 0, 3, 100, 60, },
+ { 2, 1, 0, 3, 100, 38, },
+ { 0, 1, 0, 3, 104, 68, },
+ { 2, 1, 0, 3, 104, 38, },
+ { 0, 1, 0, 3, 108, 68, },
+ { 2, 1, 0, 3, 108, 38, },
+ { 0, 1, 0, 3, 112, 68, },
+ { 2, 1, 0, 3, 112, 38, },
+ { 0, 1, 0, 3, 116, 68, },
+ { 2, 1, 0, 3, 116, 38, },
+ { 0, 1, 0, 3, 120, 68, },
+ { 2, 1, 0, 3, 120, 38, },
+ { 0, 1, 0, 3, 124, 68, },
+ { 2, 1, 0, 3, 124, 38, },
+ { 0, 1, 0, 3, 128, 68, },
+ { 2, 1, 0, 3, 128, 38, },
+ { 0, 1, 0, 3, 132, 68, },
+ { 2, 1, 0, 3, 132, 38, },
+ { 0, 1, 0, 3, 136, 68, },
+ { 2, 1, 0, 3, 136, 38, },
+ { 0, 1, 0, 3, 140, 60, },
+ { 2, 1, 0, 3, 140, 38, },
+ { 0, 1, 0, 3, 144, 68, },
+ { 2, 1, 0, 3, 144, 127, },
+ { 0, 1, 0, 3, 149, 76, },
+ { 2, 1, 0, 3, 149, -128, },
+ { 0, 1, 0, 3, 153, 76, },
+ { 2, 1, 0, 3, 153, -128, },
+ { 0, 1, 0, 3, 157, 76, },
+ { 2, 1, 0, 3, 157, -128, },
+ { 0, 1, 0, 3, 161, 76, },
+ { 2, 1, 0, 3, 161, -128, },
+ { 0, 1, 0, 3, 165, 76, },
+ { 2, 1, 0, 3, 165, -128, },
+ { 0, 1, 1, 2, 38, 66, },
+ { 2, 1, 1, 2, 38, 64, },
+ { 0, 1, 1, 2, 46, 72, },
+ { 2, 1, 1, 2, 46, 64, },
+ { 0, 1, 1, 2, 54, 72, },
+ { 2, 1, 1, 2, 54, 64, },
+ { 0, 1, 1, 2, 62, 64, },
+ { 2, 1, 1, 2, 62, 64, },
+ { 0, 1, 1, 2, 102, 58, },
+ { 2, 1, 1, 2, 102, 64, },
+ { 0, 1, 1, 2, 110, 72, },
+ { 2, 1, 1, 2, 110, 64, },
+ { 0, 1, 1, 2, 118, 72, },
+ { 2, 1, 1, 2, 118, 64, },
+ { 0, 1, 1, 2, 126, 72, },
+ { 2, 1, 1, 2, 126, 64, },
+ { 0, 1, 1, 2, 134, 72, },
+ { 2, 1, 1, 2, 134, 64, },
+ { 0, 1, 1, 2, 142, 72, },
+ { 2, 1, 1, 2, 142, 127, },
+ { 0, 1, 1, 2, 151, 72, },
+ { 2, 1, 1, 2, 151, -128, },
+ { 0, 1, 1, 2, 159, 72, },
+ { 2, 1, 1, 2, 159, -128, },
+ { 0, 1, 1, 3, 38, 60, },
+ { 2, 1, 1, 3, 38, 40, },
+ { 0, 1, 1, 3, 46, 68, },
+ { 2, 1, 1, 3, 46, 40, },
+ { 0, 1, 1, 3, 54, 68, },
+ { 2, 1, 1, 3, 54, 40, },
+ { 0, 1, 1, 3, 62, 58, },
+ { 2, 1, 1, 3, 62, 40, },
+ { 0, 1, 1, 3, 102, 54, },
+ { 2, 1, 1, 3, 102, 40, },
+ { 0, 1, 1, 3, 110, 68, },
+ { 2, 1, 1, 3, 110, 40, },
+ { 0, 1, 1, 3, 118, 68, },
+ { 2, 1, 1, 3, 118, 40, },
+ { 0, 1, 1, 3, 126, 68, },
+ { 2, 1, 1, 3, 126, 40, },
+ { 0, 1, 1, 3, 134, 68, },
+ { 2, 1, 1, 3, 134, 40, },
+ { 0, 1, 1, 3, 142, 68, },
+ { 2, 1, 1, 3, 142, 127, },
+ { 0, 1, 1, 3, 151, 72, },
+ { 2, 1, 1, 3, 151, -128, },
+ { 0, 1, 1, 3, 159, 72, },
+ { 2, 1, 1, 3, 159, -128, },
+ { 0, 1, 2, 4, 42, 64, },
+ { 2, 1, 2, 4, 42, 64, },
+ { 0, 1, 2, 4, 58, 62, },
+ { 2, 1, 2, 4, 58, 64, },
+ { 0, 1, 2, 4, 106, 58, },
+ { 2, 1, 2, 4, 106, 64, },
+ { 0, 1, 2, 4, 122, 72, },
+ { 2, 1, 2, 4, 122, 64, },
+ { 0, 1, 2, 4, 138, 72, },
+ { 2, 1, 2, 4, 138, 127, },
+ { 0, 1, 2, 4, 155, 72, },
+ { 2, 1, 2, 4, 155, -128, },
+ { 0, 1, 2, 5, 42, 54, },
+ { 2, 1, 2, 5, 42, 40, },
+ { 0, 1, 2, 5, 58, 52, },
+ { 2, 1, 2, 5, 58, 40, },
+ { 0, 1, 2, 5, 106, 50, },
+ { 2, 1, 2, 5, 106, 40, },
+ { 0, 1, 2, 5, 122, 66, },
+ { 2, 1, 2, 5, 122, 40, },
+ { 0, 1, 2, 5, 138, 66, },
+ { 2, 1, 2, 5, 138, 127, },
+ { 0, 1, 2, 5, 155, 62, },
+ { 2, 1, 2, 5, 155, -128, },
+ { 1, 0, 0, 0, 1, 68, },
+ { 3, 0, 0, 0, 1, 72, },
+ { 4, 0, 0, 0, 1, 76, },
+ { 5, 0, 0, 0, 1, 60, },
+ { 6, 0, 0, 0, 1, 72, },
+ { 7, 0, 0, 0, 1, 60, },
+ { 8, 0, 0, 0, 1, 72, },
+ { 1, 0, 0, 0, 2, 68, },
+ { 3, 0, 0, 0, 2, 72, },
+ { 4, 0, 0, 0, 2, 76, },
+ { 5, 0, 0, 0, 2, 60, },
+ { 6, 0, 0, 0, 2, 72, },
+ { 7, 0, 0, 0, 2, 60, },
+ { 8, 0, 0, 0, 2, 72, },
+ { 1, 0, 0, 0, 3, 68, },
+ { 3, 0, 0, 0, 3, 76, },
+ { 4, 0, 0, 0, 3, 76, },
+ { 5, 0, 0, 0, 3, 60, },
+ { 6, 0, 0, 0, 3, 76, },
+ { 7, 0, 0, 0, 3, 60, },
+ { 8, 0, 0, 0, 3, 76, },
+ { 1, 0, 0, 0, 4, 68, },
+ { 3, 0, 0, 0, 4, 76, },
+ { 4, 0, 0, 0, 4, 76, },
+ { 5, 0, 0, 0, 4, 60, },
+ { 6, 0, 0, 0, 4, 76, },
+ { 7, 0, 0, 0, 4, 60, },
+ { 8, 0, 0, 0, 4, 76, },
+ { 1, 0, 0, 0, 5, 68, },
+ { 3, 0, 0, 0, 5, 76, },
+ { 4, 0, 0, 0, 5, 76, },
+ { 5, 0, 0, 0, 5, 60, },
+ { 6, 0, 0, 0, 5, 76, },
+ { 7, 0, 0, 0, 5, 60, },
+ { 8, 0, 0, 0, 5, 76, },
+ { 1, 0, 0, 0, 6, 68, },
+ { 3, 0, 0, 0, 6, 76, },
+ { 4, 0, 0, 0, 6, 76, },
+ { 5, 0, 0, 0, 6, 60, },
+ { 6, 0, 0, 0, 6, 76, },
+ { 7, 0, 0, 0, 6, 60, },
+ { 8, 0, 0, 0, 6, 76, },
+ { 1, 0, 0, 0, 7, 68, },
+ { 3, 0, 0, 0, 7, 76, },
+ { 4, 0, 0, 0, 7, 76, },
+ { 5, 0, 0, 0, 7, 60, },
+ { 6, 0, 0, 0, 7, 76, },
+ { 7, 0, 0, 0, 7, 60, },
+ { 8, 0, 0, 0, 7, 76, },
+ { 1, 0, 0, 0, 8, 68, },
+ { 3, 0, 0, 0, 8, 76, },
+ { 4, 0, 0, 0, 8, 76, },
+ { 5, 0, 0, 0, 8, 60, },
+ { 6, 0, 0, 0, 8, 76, },
+ { 7, 0, 0, 0, 8, 60, },
+ { 8, 0, 0, 0, 8, 76, },
+ { 1, 0, 0, 0, 9, 68, },
+ { 3, 0, 0, 0, 9, 76, },
+ { 4, 0, 0, 0, 9, 76, },
+ { 5, 0, 0, 0, 9, 60, },
+ { 6, 0, 0, 0, 9, 76, },
+ { 7, 0, 0, 0, 9, 60, },
+ { 8, 0, 0, 0, 9, 76, },
+ { 1, 0, 0, 0, 10, 68, },
+ { 3, 0, 0, 0, 10, 72, },
+ { 4, 0, 0, 0, 10, 76, },
+ { 5, 0, 0, 0, 10, 60, },
+ { 6, 0, 0, 0, 10, 72, },
+ { 7, 0, 0, 0, 10, 60, },
+ { 8, 0, 0, 0, 10, 72, },
+ { 1, 0, 0, 0, 11, 68, },
+ { 3, 0, 0, 0, 11, 72, },
+ { 4, 0, 0, 0, 11, 76, },
+ { 5, 0, 0, 0, 11, 60, },
+ { 6, 0, 0, 0, 11, 72, },
+ { 7, 0, 0, 0, 11, 60, },
+ { 8, 0, 0, 0, 11, 72, },
+ { 1, 0, 0, 0, 12, 68, },
+ { 3, 0, 0, 0, 12, 52, },
+ { 4, 0, 0, 0, 12, 76, },
+ { 5, 0, 0, 0, 12, 60, },
+ { 6, 0, 0, 0, 12, 52, },
+ { 7, 0, 0, 0, 12, 60, },
+ { 8, 0, 0, 0, 12, 52, },
+ { 1, 0, 0, 0, 13, 68, },
+ { 3, 0, 0, 0, 13, 48, },
+ { 4, 0, 0, 0, 13, 76, },
+ { 5, 0, 0, 0, 13, 60, },
+ { 6, 0, 0, 0, 13, 48, },
+ { 7, 0, 0, 0, 13, 60, },
+ { 8, 0, 0, 0, 13, 48, },
+ { 1, 0, 0, 0, 14, 68, },
+ { 3, 0, 0, 0, 14, 127, },
+ { 4, 0, 0, 0, 14, 127, },
+ { 5, 0, 0, 0, 14, 127, },
+ { 6, 0, 0, 0, 14, 127, },
+ { 7, 0, 0, 0, 14, 127, },
+ { 8, 0, 0, 0, 14, 127, },
+ { 1, 0, 0, 1, 1, 76, },
+ { 3, 0, 0, 1, 1, 52, },
+ { 4, 0, 0, 1, 1, 76, },
+ { 5, 0, 0, 1, 1, 60, },
+ { 6, 0, 0, 1, 1, 52, },
+ { 7, 0, 0, 1, 1, 60, },
+ { 8, 0, 0, 1, 1, 52, },
+ { 1, 0, 0, 1, 2, 76, },
+ { 3, 0, 0, 1, 2, 60, },
+ { 4, 0, 0, 1, 2, 76, },
+ { 5, 0, 0, 1, 2, 60, },
+ { 6, 0, 0, 1, 2, 60, },
+ { 7, 0, 0, 1, 2, 60, },
+ { 8, 0, 0, 1, 2, 60, },
+ { 1, 0, 0, 1, 3, 76, },
+ { 3, 0, 0, 1, 3, 64, },
+ { 4, 0, 0, 1, 3, 76, },
+ { 5, 0, 0, 1, 3, 60, },
+ { 6, 0, 0, 1, 3, 64, },
+ { 7, 0, 0, 1, 3, 60, },
+ { 8, 0, 0, 1, 3, 64, },
+ { 1, 0, 0, 1, 4, 76, },
+ { 3, 0, 0, 1, 4, 68, },
+ { 4, 0, 0, 1, 4, 76, },
+ { 5, 0, 0, 1, 4, 60, },
+ { 6, 0, 0, 1, 4, 68, },
+ { 7, 0, 0, 1, 4, 60, },
+ { 8, 0, 0, 1, 4, 68, },
+ { 1, 0, 0, 1, 5, 76, },
+ { 3, 0, 0, 1, 5, 76, },
+ { 4, 0, 0, 1, 5, 76, },
+ { 5, 0, 0, 1, 5, 60, },
+ { 6, 0, 0, 1, 5, 76, },
+ { 7, 0, 0, 1, 5, 60, },
+ { 8, 0, 0, 1, 5, 76, },
+ { 1, 0, 0, 1, 6, 76, },
+ { 3, 0, 0, 1, 6, 76, },
+ { 4, 0, 0, 1, 6, 76, },
+ { 5, 0, 0, 1, 6, 60, },
+ { 6, 0, 0, 1, 6, 76, },
+ { 7, 0, 0, 1, 6, 60, },
+ { 8, 0, 0, 1, 6, 76, },
+ { 1, 0, 0, 1, 7, 76, },
+ { 3, 0, 0, 1, 7, 76, },
+ { 4, 0, 0, 1, 7, 76, },
+ { 5, 0, 0, 1, 7, 60, },
+ { 6, 0, 0, 1, 7, 76, },
+ { 7, 0, 0, 1, 7, 60, },
+ { 8, 0, 0, 1, 7, 76, },
+ { 1, 0, 0, 1, 8, 76, },
+ { 3, 0, 0, 1, 8, 68, },
+ { 4, 0, 0, 1, 8, 76, },
+ { 5, 0, 0, 1, 8, 60, },
+ { 6, 0, 0, 1, 8, 68, },
+ { 7, 0, 0, 1, 8, 60, },
+ { 8, 0, 0, 1, 8, 68, },
+ { 1, 0, 0, 1, 9, 76, },
+ { 3, 0, 0, 1, 9, 64, },
+ { 4, 0, 0, 1, 9, 76, },
+ { 5, 0, 0, 1, 9, 60, },
+ { 6, 0, 0, 1, 9, 64, },
+ { 7, 0, 0, 1, 9, 60, },
+ { 8, 0, 0, 1, 9, 64, },
+ { 1, 0, 0, 1, 10, 76, },
+ { 3, 0, 0, 1, 10, 60, },
+ { 4, 0, 0, 1, 10, 76, },
+ { 5, 0, 0, 1, 10, 60, },
+ { 6, 0, 0, 1, 10, 60, },
+ { 7, 0, 0, 1, 10, 60, },
+ { 8, 0, 0, 1, 10, 60, },
+ { 1, 0, 0, 1, 11, 76, },
+ { 3, 0, 0, 1, 11, 52, },
+ { 4, 0, 0, 1, 11, 76, },
+ { 5, 0, 0, 1, 11, 60, },
+ { 6, 0, 0, 1, 11, 52, },
+ { 7, 0, 0, 1, 11, 60, },
+ { 8, 0, 0, 1, 11, 52, },
+ { 1, 0, 0, 1, 12, 76, },
+ { 3, 0, 0, 1, 12, 40, },
+ { 4, 0, 0, 1, 12, 76, },
+ { 5, 0, 0, 1, 12, 60, },
+ { 6, 0, 0, 1, 12, 40, },
+ { 7, 0, 0, 1, 12, 60, },
+ { 8, 0, 0, 1, 12, 40, },
+ { 1, 0, 0, 1, 13, 76, },
+ { 3, 0, 0, 1, 13, 28, },
+ { 4, 0, 0, 1, 13, 70, },
+ { 5, 0, 0, 1, 13, 60, },
+ { 6, 0, 0, 1, 13, 28, },
+ { 7, 0, 0, 1, 13, 60, },
+ { 8, 0, 0, 1, 13, 28, },
+ { 1, 0, 0, 1, 14, 127, },
+ { 3, 0, 0, 1, 14, 127, },
+ { 4, 0, 0, 1, 14, 127, },
+ { 5, 0, 0, 1, 14, 127, },
+ { 6, 0, 0, 1, 14, 127, },
+ { 7, 0, 0, 1, 14, 127, },
+ { 8, 0, 0, 1, 14, 127, },
+ { 1, 0, 0, 2, 1, 76, },
+ { 3, 0, 0, 2, 1, 52, },
+ { 4, 0, 0, 2, 1, 76, },
+ { 5, 0, 0, 2, 1, 60, },
+ { 6, 0, 0, 2, 1, 52, },
+ { 7, 0, 0, 2, 1, 60, },
+ { 8, 0, 0, 2, 1, 52, },
+ { 1, 0, 0, 2, 2, 76, },
+ { 3, 0, 0, 2, 2, 60, },
+ { 4, 0, 0, 2, 2, 76, },
+ { 5, 0, 0, 2, 2, 60, },
+ { 6, 0, 0, 2, 2, 60, },
+ { 7, 0, 0, 2, 2, 60, },
+ { 8, 0, 0, 2, 2, 60, },
+ { 1, 0, 0, 2, 3, 76, },
+ { 3, 0, 0, 2, 3, 64, },
+ { 4, 0, 0, 2, 3, 76, },
+ { 5, 0, 0, 2, 3, 60, },
+ { 6, 0, 0, 2, 3, 64, },
+ { 7, 0, 0, 2, 3, 60, },
+ { 8, 0, 0, 2, 3, 64, },
+ { 1, 0, 0, 2, 4, 76, },
+ { 3, 0, 0, 2, 4, 68, },
+ { 4, 0, 0, 2, 4, 76, },
+ { 5, 0, 0, 2, 4, 60, },
+ { 6, 0, 0, 2, 4, 68, },
+ { 7, 0, 0, 2, 4, 60, },
+ { 8, 0, 0, 2, 4, 68, },
+ { 1, 0, 0, 2, 5, 76, },
+ { 3, 0, 0, 2, 5, 76, },
+ { 4, 0, 0, 2, 5, 76, },
+ { 5, 0, 0, 2, 5, 60, },
+ { 6, 0, 0, 2, 5, 76, },
+ { 7, 0, 0, 2, 5, 60, },
+ { 8, 0, 0, 2, 5, 76, },
+ { 1, 0, 0, 2, 6, 76, },
+ { 3, 0, 0, 2, 6, 76, },
+ { 4, 0, 0, 2, 6, 76, },
+ { 5, 0, 0, 2, 6, 60, },
+ { 6, 0, 0, 2, 6, 76, },
+ { 7, 0, 0, 2, 6, 60, },
+ { 8, 0, 0, 2, 6, 76, },
+ { 1, 0, 0, 2, 7, 76, },
+ { 3, 0, 0, 2, 7, 76, },
+ { 4, 0, 0, 2, 7, 76, },
+ { 5, 0, 0, 2, 7, 60, },
+ { 6, 0, 0, 2, 7, 76, },
+ { 7, 0, 0, 2, 7, 60, },
+ { 8, 0, 0, 2, 7, 76, },
+ { 1, 0, 0, 2, 8, 76, },
+ { 3, 0, 0, 2, 8, 68, },
+ { 4, 0, 0, 2, 8, 76, },
+ { 5, 0, 0, 2, 8, 60, },
+ { 6, 0, 0, 2, 8, 68, },
+ { 7, 0, 0, 2, 8, 60, },
+ { 8, 0, 0, 2, 8, 68, },
+ { 1, 0, 0, 2, 9, 76, },
+ { 3, 0, 0, 2, 9, 64, },
+ { 4, 0, 0, 2, 9, 76, },
+ { 5, 0, 0, 2, 9, 60, },
+ { 6, 0, 0, 2, 9, 64, },
+ { 7, 0, 0, 2, 9, 60, },
+ { 8, 0, 0, 2, 9, 64, },
+ { 1, 0, 0, 2, 10, 76, },
+ { 3, 0, 0, 2, 10, 60, },
+ { 4, 0, 0, 2, 10, 76, },
+ { 5, 0, 0, 2, 10, 60, },
+ { 6, 0, 0, 2, 10, 60, },
+ { 7, 0, 0, 2, 10, 60, },
+ { 8, 0, 0, 2, 10, 60, },
+ { 1, 0, 0, 2, 11, 76, },
+ { 3, 0, 0, 2, 11, 52, },
+ { 4, 0, 0, 2, 11, 76, },
+ { 5, 0, 0, 2, 11, 60, },
+ { 6, 0, 0, 2, 11, 52, },
+ { 7, 0, 0, 2, 11, 60, },
+ { 8, 0, 0, 2, 11, 52, },
+ { 1, 0, 0, 2, 12, 76, },
+ { 3, 0, 0, 2, 12, 40, },
+ { 4, 0, 0, 2, 12, 76, },
+ { 5, 0, 0, 2, 12, 60, },
+ { 6, 0, 0, 2, 12, 40, },
+ { 7, 0, 0, 2, 12, 60, },
+ { 8, 0, 0, 2, 12, 40, },
+ { 1, 0, 0, 2, 13, 76, },
+ { 3, 0, 0, 2, 13, 28, },
+ { 4, 0, 0, 2, 13, 72, },
+ { 5, 0, 0, 2, 13, 60, },
+ { 6, 0, 0, 2, 13, 28, },
+ { 7, 0, 0, 2, 13, 60, },
+ { 8, 0, 0, 2, 13, 28, },
+ { 1, 0, 0, 2, 14, 127, },
+ { 3, 0, 0, 2, 14, 127, },
+ { 4, 0, 0, 2, 14, 127, },
+ { 5, 0, 0, 2, 14, 127, },
+ { 6, 0, 0, 2, 14, 127, },
+ { 7, 0, 0, 2, 14, 127, },
+ { 8, 0, 0, 2, 14, 127, },
+ { 1, 0, 0, 3, 1, 66, },
+ { 3, 0, 0, 3, 1, 52, },
+ { 4, 0, 0, 3, 1, 68, },
+ { 5, 0, 0, 3, 1, 36, },
+ { 6, 0, 0, 3, 1, 52, },
+ { 7, 0, 0, 3, 1, 36, },
+ { 8, 0, 0, 3, 1, 52, },
+ { 1, 0, 0, 3, 2, 66, },
+ { 3, 0, 0, 3, 2, 60, },
+ { 4, 0, 0, 3, 2, 70, },
+ { 5, 0, 0, 3, 2, 36, },
+ { 6, 0, 0, 3, 2, 60, },
+ { 7, 0, 0, 3, 2, 36, },
+ { 8, 0, 0, 3, 2, 60, },
+ { 1, 0, 0, 3, 3, 66, },
+ { 3, 0, 0, 3, 3, 64, },
+ { 4, 0, 0, 3, 3, 70, },
+ { 5, 0, 0, 3, 3, 36, },
+ { 6, 0, 0, 3, 3, 64, },
+ { 7, 0, 0, 3, 3, 36, },
+ { 8, 0, 0, 3, 3, 64, },
+ { 1, 0, 0, 3, 4, 66, },
+ { 3, 0, 0, 3, 4, 68, },
+ { 4, 0, 0, 3, 4, 70, },
+ { 5, 0, 0, 3, 4, 36, },
+ { 6, 0, 0, 3, 4, 68, },
+ { 7, 0, 0, 3, 4, 36, },
+ { 8, 0, 0, 3, 4, 68, },
+ { 1, 0, 0, 3, 5, 66, },
+ { 3, 0, 0, 3, 5, 76, },
+ { 4, 0, 0, 3, 5, 70, },
+ { 5, 0, 0, 3, 5, 36, },
+ { 6, 0, 0, 3, 5, 76, },
+ { 7, 0, 0, 3, 5, 36, },
+ { 8, 0, 0, 3, 5, 76, },
+ { 1, 0, 0, 3, 6, 66, },
+ { 3, 0, 0, 3, 6, 76, },
+ { 4, 0, 0, 3, 6, 70, },
+ { 5, 0, 0, 3, 6, 36, },
+ { 6, 0, 0, 3, 6, 76, },
+ { 7, 0, 0, 3, 6, 36, },
+ { 8, 0, 0, 3, 6, 76, },
+ { 1, 0, 0, 3, 7, 66, },
+ { 3, 0, 0, 3, 7, 76, },
+ { 4, 0, 0, 3, 7, 70, },
+ { 5, 0, 0, 3, 7, 36, },
+ { 6, 0, 0, 3, 7, 76, },
+ { 7, 0, 0, 3, 7, 36, },
+ { 8, 0, 0, 3, 7, 76, },
+ { 1, 0, 0, 3, 8, 66, },
+ { 3, 0, 0, 3, 8, 68, },
+ { 4, 0, 0, 3, 8, 70, },
+ { 5, 0, 0, 3, 8, 36, },
+ { 6, 0, 0, 3, 8, 68, },
+ { 7, 0, 0, 3, 8, 36, },
+ { 8, 0, 0, 3, 8, 68, },
+ { 1, 0, 0, 3, 9, 66, },
+ { 3, 0, 0, 3, 9, 64, },
+ { 4, 0, 0, 3, 9, 70, },
+ { 5, 0, 0, 3, 9, 36, },
+ { 6, 0, 0, 3, 9, 64, },
+ { 7, 0, 0, 3, 9, 36, },
+ { 8, 0, 0, 3, 9, 64, },
+ { 1, 0, 0, 3, 10, 66, },
+ { 3, 0, 0, 3, 10, 60, },
+ { 4, 0, 0, 3, 10, 70, },
+ { 5, 0, 0, 3, 10, 36, },
+ { 6, 0, 0, 3, 10, 60, },
+ { 7, 0, 0, 3, 10, 36, },
+ { 8, 0, 0, 3, 10, 60, },
+ { 1, 0, 0, 3, 11, 66, },
+ { 3, 0, 0, 3, 11, 52, },
+ { 4, 0, 0, 3, 11, 70, },
+ { 5, 0, 0, 3, 11, 36, },
+ { 6, 0, 0, 3, 11, 52, },
+ { 7, 0, 0, 3, 11, 36, },
+ { 8, 0, 0, 3, 11, 52, },
+ { 1, 0, 0, 3, 12, 66, },
+ { 3, 0, 0, 3, 12, 40, },
+ { 4, 0, 0, 3, 12, 70, },
+ { 5, 0, 0, 3, 12, 36, },
+ { 6, 0, 0, 3, 12, 40, },
+ { 7, 0, 0, 3, 12, 36, },
+ { 8, 0, 0, 3, 12, 40, },
+ { 1, 0, 0, 3, 13, 66, },
+ { 3, 0, 0, 3, 13, 28, },
+ { 4, 0, 0, 3, 13, 62, },
+ { 5, 0, 0, 3, 13, 36, },
+ { 6, 0, 0, 3, 13, 28, },
+ { 7, 0, 0, 3, 13, 36, },
+ { 8, 0, 0, 3, 13, 28, },
+ { 1, 0, 0, 3, 14, 127, },
+ { 3, 0, 0, 3, 14, 127, },
+ { 4, 0, 0, 3, 14, 127, },
+ { 5, 0, 0, 3, 14, 127, },
+ { 6, 0, 0, 3, 14, 127, },
+ { 7, 0, 0, 3, 14, 127, },
+ { 8, 0, 0, 3, 14, 127, },
+ { 1, 0, 1, 2, 1, 127, },
+ { 3, 0, 1, 2, 1, 127, },
+ { 4, 0, 1, 2, 1, 127, },
+ { 5, 0, 1, 2, 1, 127, },
+ { 6, 0, 1, 2, 1, 127, },
+ { 7, 0, 1, 2, 1, 127, },
+ { 8, 0, 1, 2, 1, 127, },
+ { 1, 0, 1, 2, 2, 127, },
+ { 3, 0, 1, 2, 2, 127, },
+ { 4, 0, 1, 2, 2, 127, },
+ { 5, 0, 1, 2, 2, 127, },
+ { 6, 0, 1, 2, 2, 127, },
+ { 7, 0, 1, 2, 2, 127, },
+ { 8, 0, 1, 2, 2, 127, },
+ { 1, 0, 1, 2, 3, 72, },
+ { 3, 0, 1, 2, 3, 52, },
+ { 4, 0, 1, 2, 3, 72, },
+ { 5, 0, 1, 2, 3, 60, },
+ { 6, 0, 1, 2, 3, 52, },
+ { 7, 0, 1, 2, 3, 60, },
+ { 8, 0, 1, 2, 3, 52, },
+ { 1, 0, 1, 2, 4, 72, },
+ { 3, 0, 1, 2, 4, 52, },
+ { 4, 0, 1, 2, 4, 72, },
+ { 5, 0, 1, 2, 4, 60, },
+ { 6, 0, 1, 2, 4, 52, },
+ { 7, 0, 1, 2, 4, 60, },
+ { 8, 0, 1, 2, 4, 52, },
+ { 1, 0, 1, 2, 5, 72, },
+ { 3, 0, 1, 2, 5, 60, },
+ { 4, 0, 1, 2, 5, 72, },
+ { 5, 0, 1, 2, 5, 60, },
+ { 6, 0, 1, 2, 5, 60, },
+ { 7, 0, 1, 2, 5, 60, },
+ { 8, 0, 1, 2, 5, 60, },
+ { 1, 0, 1, 2, 6, 72, },
+ { 3, 0, 1, 2, 6, 64, },
+ { 4, 0, 1, 2, 6, 72, },
+ { 5, 0, 1, 2, 6, 60, },
+ { 6, 0, 1, 2, 6, 64, },
+ { 7, 0, 1, 2, 6, 60, },
+ { 8, 0, 1, 2, 6, 64, },
+ { 1, 0, 1, 2, 7, 72, },
+ { 3, 0, 1, 2, 7, 60, },
+ { 4, 0, 1, 2, 7, 72, },
+ { 5, 0, 1, 2, 7, 60, },
+ { 6, 0, 1, 2, 7, 60, },
+ { 7, 0, 1, 2, 7, 60, },
+ { 8, 0, 1, 2, 7, 60, },
+ { 1, 0, 1, 2, 8, 72, },
+ { 3, 0, 1, 2, 8, 52, },
+ { 4, 0, 1, 2, 8, 72, },
+ { 5, 0, 1, 2, 8, 60, },
+ { 6, 0, 1, 2, 8, 52, },
+ { 7, 0, 1, 2, 8, 60, },
+ { 8, 0, 1, 2, 8, 52, },
+ { 1, 0, 1, 2, 9, 72, },
+ { 3, 0, 1, 2, 9, 52, },
+ { 4, 0, 1, 2, 9, 72, },
+ { 5, 0, 1, 2, 9, 60, },
+ { 6, 0, 1, 2, 9, 52, },
+ { 7, 0, 1, 2, 9, 60, },
+ { 8, 0, 1, 2, 9, 52, },
+ { 1, 0, 1, 2, 10, 72, },
+ { 3, 0, 1, 2, 10, 40, },
+ { 4, 0, 1, 2, 10, 72, },
+ { 5, 0, 1, 2, 10, 60, },
+ { 6, 0, 1, 2, 10, 40, },
+ { 7, 0, 1, 2, 10, 60, },
+ { 8, 0, 1, 2, 10, 40, },
+ { 1, 0, 1, 2, 11, 72, },
+ { 3, 0, 1, 2, 11, 28, },
+ { 4, 0, 1, 2, 11, 70, },
+ { 5, 0, 1, 2, 11, 60, },
+ { 6, 0, 1, 2, 11, 28, },
+ { 7, 0, 1, 2, 11, 60, },
+ { 8, 0, 1, 2, 11, 28, },
+ { 1, 0, 1, 2, 12, 127, },
+ { 3, 0, 1, 2, 12, 127, },
+ { 4, 0, 1, 2, 12, 127, },
+ { 5, 0, 1, 2, 12, 127, },
+ { 6, 0, 1, 2, 12, 127, },
+ { 7, 0, 1, 2, 12, 127, },
+ { 8, 0, 1, 2, 12, 127, },
+ { 1, 0, 1, 2, 13, 127, },
+ { 3, 0, 1, 2, 13, 127, },
+ { 4, 0, 1, 2, 13, 127, },
+ { 5, 0, 1, 2, 13, 127, },
+ { 6, 0, 1, 2, 13, 127, },
+ { 7, 0, 1, 2, 13, 127, },
+ { 8, 0, 1, 2, 13, 127, },
+ { 1, 0, 1, 2, 14, 127, },
+ { 3, 0, 1, 2, 14, 127, },
+ { 4, 0, 1, 2, 14, 127, },
+ { 5, 0, 1, 2, 14, 127, },
+ { 6, 0, 1, 2, 14, 127, },
+ { 7, 0, 1, 2, 14, 127, },
+ { 8, 0, 1, 2, 14, 127, },
+ { 1, 0, 1, 3, 1, 127, },
+ { 3, 0, 1, 3, 1, 127, },
+ { 4, 0, 1, 3, 1, 127, },
+ { 5, 0, 1, 3, 1, 127, },
+ { 6, 0, 1, 3, 1, 127, },
+ { 7, 0, 1, 3, 1, 127, },
+ { 8, 0, 1, 3, 1, 127, },
+ { 1, 0, 1, 3, 2, 127, },
+ { 3, 0, 1, 3, 2, 127, },
+ { 4, 0, 1, 3, 2, 127, },
+ { 5, 0, 1, 3, 2, 127, },
+ { 6, 0, 1, 3, 2, 127, },
+ { 7, 0, 1, 3, 2, 127, },
+ { 8, 0, 1, 3, 2, 127, },
+ { 1, 0, 1, 3, 3, 66, },
+ { 3, 0, 1, 3, 3, 48, },
+ { 4, 0, 1, 3, 3, 66, },
+ { 5, 0, 1, 3, 3, 36, },
+ { 6, 0, 1, 3, 3, 48, },
+ { 7, 0, 1, 3, 3, 36, },
+ { 8, 0, 1, 3, 3, 48, },
+ { 1, 0, 1, 3, 4, 66, },
+ { 3, 0, 1, 3, 4, 48, },
+ { 4, 0, 1, 3, 4, 70, },
+ { 5, 0, 1, 3, 4, 36, },
+ { 6, 0, 1, 3, 4, 48, },
+ { 7, 0, 1, 3, 4, 36, },
+ { 8, 0, 1, 3, 4, 48, },
+ { 1, 0, 1, 3, 5, 66, },
+ { 3, 0, 1, 3, 5, 60, },
+ { 4, 0, 1, 3, 5, 70, },
+ { 5, 0, 1, 3, 5, 36, },
+ { 6, 0, 1, 3, 5, 60, },
+ { 7, 0, 1, 3, 5, 36, },
+ { 8, 0, 1, 3, 5, 60, },
+ { 1, 0, 1, 3, 6, 66, },
+ { 3, 0, 1, 3, 6, 64, },
+ { 4, 0, 1, 3, 6, 70, },
+ { 5, 0, 1, 3, 6, 36, },
+ { 6, 0, 1, 3, 6, 64, },
+ { 7, 0, 1, 3, 6, 36, },
+ { 8, 0, 1, 3, 6, 64, },
+ { 1, 0, 1, 3, 7, 66, },
+ { 3, 0, 1, 3, 7, 60, },
+ { 4, 0, 1, 3, 7, 70, },
+ { 5, 0, 1, 3, 7, 36, },
+ { 6, 0, 1, 3, 7, 60, },
+ { 7, 0, 1, 3, 7, 36, },
+ { 8, 0, 1, 3, 7, 60, },
+ { 1, 0, 1, 3, 8, 66, },
+ { 3, 0, 1, 3, 8, 52, },
+ { 4, 0, 1, 3, 8, 70, },
+ { 5, 0, 1, 3, 8, 36, },
+ { 6, 0, 1, 3, 8, 52, },
+ { 7, 0, 1, 3, 8, 36, },
+ { 8, 0, 1, 3, 8, 52, },
+ { 1, 0, 1, 3, 9, 66, },
+ { 3, 0, 1, 3, 9, 52, },
+ { 4, 0, 1, 3, 9, 70, },
+ { 5, 0, 1, 3, 9, 36, },
+ { 6, 0, 1, 3, 9, 52, },
+ { 7, 0, 1, 3, 9, 36, },
+ { 8, 0, 1, 3, 9, 52, },
+ { 1, 0, 1, 3, 10, 66, },
+ { 3, 0, 1, 3, 10, 40, },
+ { 4, 0, 1, 3, 10, 70, },
+ { 5, 0, 1, 3, 10, 36, },
+ { 6, 0, 1, 3, 10, 40, },
+ { 7, 0, 1, 3, 10, 36, },
+ { 8, 0, 1, 3, 10, 40, },
+ { 1, 0, 1, 3, 11, 66, },
+ { 3, 0, 1, 3, 11, 26, },
+ { 4, 0, 1, 3, 11, 66, },
+ { 5, 0, 1, 3, 11, 36, },
+ { 6, 0, 1, 3, 11, 26, },
+ { 7, 0, 1, 3, 11, 36, },
+ { 8, 0, 1, 3, 11, 26, },
+ { 1, 0, 1, 3, 12, 127, },
+ { 3, 0, 1, 3, 12, 127, },
+ { 4, 0, 1, 3, 12, 127, },
+ { 5, 0, 1, 3, 12, 127, },
+ { 6, 0, 1, 3, 12, 127, },
+ { 7, 0, 1, 3, 12, 127, },
+ { 8, 0, 1, 3, 12, 127, },
+ { 1, 0, 1, 3, 13, 127, },
+ { 3, 0, 1, 3, 13, 127, },
+ { 4, 0, 1, 3, 13, 127, },
+ { 5, 0, 1, 3, 13, 127, },
+ { 6, 0, 1, 3, 13, 127, },
+ { 7, 0, 1, 3, 13, 127, },
+ { 8, 0, 1, 3, 13, 127, },
+ { 1, 0, 1, 3, 14, 127, },
+ { 3, 0, 1, 3, 14, 127, },
+ { 4, 0, 1, 3, 14, 127, },
+ { 5, 0, 1, 3, 14, 127, },
+ { 6, 0, 1, 3, 14, 127, },
+ { 7, 0, 1, 3, 14, 127, },
+ { 8, 0, 1, 3, 14, 127, },
+ { 1, 1, 0, 1, 36, 60, },
+ { 3, 1, 0, 1, 36, 62, },
+ { 4, 1, 0, 1, 36, 76, },
+ { 5, 1, 0, 1, 36, 62, },
+ { 6, 1, 0, 1, 36, 64, },
+ { 7, 1, 0, 1, 36, 54, },
+ { 8, 1, 0, 1, 36, 62, },
+ { 1, 1, 0, 1, 40, 62, },
+ { 3, 1, 0, 1, 40, 62, },
+ { 4, 1, 0, 1, 40, 76, },
+ { 5, 1, 0, 1, 40, 62, },
+ { 6, 1, 0, 1, 40, 64, },
+ { 7, 1, 0, 1, 40, 54, },
+ { 8, 1, 0, 1, 40, 62, },
+ { 1, 1, 0, 1, 44, 62, },
+ { 3, 1, 0, 1, 44, 62, },
+ { 4, 1, 0, 1, 44, 76, },
+ { 5, 1, 0, 1, 44, 62, },
+ { 6, 1, 0, 1, 44, 64, },
+ { 7, 1, 0, 1, 44, 54, },
+ { 8, 1, 0, 1, 44, 62, },
+ { 1, 1, 0, 1, 48, 62, },
+ { 3, 1, 0, 1, 48, 62, },
+ { 4, 1, 0, 1, 48, 76, },
+ { 5, 1, 0, 1, 48, 62, },
+ { 6, 1, 0, 1, 48, 64, },
+ { 7, 1, 0, 1, 48, 54, },
+ { 8, 1, 0, 1, 48, 62, },
+ { 1, 1, 0, 1, 52, 62, },
+ { 3, 1, 0, 1, 52, 64, },
+ { 4, 1, 0, 1, 52, 76, },
+ { 5, 1, 0, 1, 52, 62, },
+ { 6, 1, 0, 1, 52, 76, },
+ { 7, 1, 0, 1, 52, 54, },
+ { 8, 1, 0, 1, 52, 76, },
+ { 1, 1, 0, 1, 56, 62, },
+ { 3, 1, 0, 1, 56, 64, },
+ { 4, 1, 0, 1, 56, 76, },
+ { 5, 1, 0, 1, 56, 62, },
+ { 6, 1, 0, 1, 56, 76, },
+ { 7, 1, 0, 1, 56, 54, },
+ { 8, 1, 0, 1, 56, 76, },
+ { 1, 1, 0, 1, 60, 62, },
+ { 3, 1, 0, 1, 60, 64, },
+ { 4, 1, 0, 1, 60, 76, },
+ { 5, 1, 0, 1, 60, 62, },
+ { 6, 1, 0, 1, 60, 76, },
+ { 7, 1, 0, 1, 60, 54, },
+ { 8, 1, 0, 1, 60, 76, },
+ { 1, 1, 0, 1, 64, 60, },
+ { 3, 1, 0, 1, 64, 64, },
+ { 4, 1, 0, 1, 64, 76, },
+ { 5, 1, 0, 1, 64, 62, },
+ { 6, 1, 0, 1, 64, 74, },
+ { 7, 1, 0, 1, 64, 54, },
+ { 8, 1, 0, 1, 64, 74, },
+ { 1, 1, 0, 1, 100, 76, },
+ { 3, 1, 0, 1, 100, 72, },
+ { 4, 1, 0, 1, 100, 76, },
+ { 5, 1, 0, 1, 100, 62, },
+ { 6, 1, 0, 1, 100, 72, },
+ { 7, 1, 0, 1, 100, 54, },
+ { 8, 1, 0, 1, 100, 72, },
+ { 1, 1, 0, 1, 104, 76, },
+ { 3, 1, 0, 1, 104, 76, },
+ { 4, 1, 0, 1, 104, 76, },
+ { 5, 1, 0, 1, 104, 62, },
+ { 6, 1, 0, 1, 104, 76, },
+ { 7, 1, 0, 1, 104, 54, },
+ { 8, 1, 0, 1, 104, 76, },
+ { 1, 1, 0, 1, 108, 76, },
+ { 3, 1, 0, 1, 108, 76, },
+ { 4, 1, 0, 1, 108, 76, },
+ { 5, 1, 0, 1, 108, 62, },
+ { 6, 1, 0, 1, 108, 76, },
+ { 7, 1, 0, 1, 108, 54, },
+ { 8, 1, 0, 1, 108, 76, },
+ { 1, 1, 0, 1, 112, 76, },
+ { 3, 1, 0, 1, 112, 76, },
+ { 4, 1, 0, 1, 112, 76, },
+ { 5, 1, 0, 1, 112, 62, },
+ { 6, 1, 0, 1, 112, 76, },
+ { 7, 1, 0, 1, 112, 54, },
+ { 8, 1, 0, 1, 112, 76, },
+ { 1, 1, 0, 1, 116, 76, },
+ { 3, 1, 0, 1, 116, 76, },
+ { 4, 1, 0, 1, 116, 76, },
+ { 5, 1, 0, 1, 116, 62, },
+ { 6, 1, 0, 1, 116, 76, },
+ { 7, 1, 0, 1, 116, 54, },
+ { 8, 1, 0, 1, 116, 76, },
+ { 1, 1, 0, 1, 120, 76, },
+ { 3, 1, 0, 1, 120, 127, },
+ { 4, 1, 0, 1, 120, 76, },
+ { 5, 1, 0, 1, 120, 127, },
+ { 6, 1, 0, 1, 120, 76, },
+ { 7, 1, 0, 1, 120, 54, },
+ { 8, 1, 0, 1, 120, 76, },
+ { 1, 1, 0, 1, 124, 76, },
+ { 3, 1, 0, 1, 124, 127, },
+ { 4, 1, 0, 1, 124, 76, },
+ { 5, 1, 0, 1, 124, 127, },
+ { 6, 1, 0, 1, 124, 76, },
+ { 7, 1, 0, 1, 124, 54, },
+ { 8, 1, 0, 1, 124, 76, },
+ { 1, 1, 0, 1, 128, 76, },
+ { 3, 1, 0, 1, 128, 127, },
+ { 4, 1, 0, 1, 128, 76, },
+ { 5, 1, 0, 1, 128, 127, },
+ { 6, 1, 0, 1, 128, 76, },
+ { 7, 1, 0, 1, 128, 54, },
+ { 8, 1, 0, 1, 128, 76, },
+ { 1, 1, 0, 1, 132, 76, },
+ { 3, 1, 0, 1, 132, 76, },
+ { 4, 1, 0, 1, 132, 76, },
+ { 5, 1, 0, 1, 132, 62, },
+ { 6, 1, 0, 1, 132, 76, },
+ { 7, 1, 0, 1, 132, 54, },
+ { 8, 1, 0, 1, 132, 76, },
+ { 1, 1, 0, 1, 136, 76, },
+ { 3, 1, 0, 1, 136, 76, },
+ { 4, 1, 0, 1, 136, 76, },
+ { 5, 1, 0, 1, 136, 62, },
+ { 6, 1, 0, 1, 136, 76, },
+ { 7, 1, 0, 1, 136, 127, },
+ { 8, 1, 0, 1, 136, 76, },
+ { 1, 1, 0, 1, 140, 76, },
+ { 3, 1, 0, 1, 140, 72, },
+ { 4, 1, 0, 1, 140, 76, },
+ { 5, 1, 0, 1, 140, 62, },
+ { 6, 1, 0, 1, 140, 72, },
+ { 7, 1, 0, 1, 140, 127, },
+ { 8, 1, 0, 1, 140, 72, },
+ { 1, 1, 0, 1, 144, 127, },
+ { 3, 1, 0, 1, 144, 76, },
+ { 4, 1, 0, 1, 144, 76, },
+ { 5, 1, 0, 1, 144, 127, },
+ { 6, 1, 0, 1, 144, 76, },
+ { 7, 1, 0, 1, 144, 127, },
+ { 8, 1, 0, 1, 144, 76, },
+ { 1, 1, 0, 1, 149, 127, },
+ { 3, 1, 0, 1, 149, 76, },
+ { 4, 1, 0, 1, 149, 74, },
+ { 5, 1, 0, 1, 149, 76, },
+ { 6, 1, 0, 1, 149, 76, },
+ { 7, 1, 0, 1, 149, 54, },
+ { 8, 1, 0, 1, 149, 76, },
+ { 1, 1, 0, 1, 153, 127, },
+ { 3, 1, 0, 1, 153, 76, },
+ { 4, 1, 0, 1, 153, 74, },
+ { 5, 1, 0, 1, 153, 76, },
+ { 6, 1, 0, 1, 153, 76, },
+ { 7, 1, 0, 1, 153, 54, },
+ { 8, 1, 0, 1, 153, 76, },
+ { 1, 1, 0, 1, 157, 127, },
+ { 3, 1, 0, 1, 157, 76, },
+ { 4, 1, 0, 1, 157, 74, },
+ { 5, 1, 0, 1, 157, 76, },
+ { 6, 1, 0, 1, 157, 76, },
+ { 7, 1, 0, 1, 157, 54, },
+ { 8, 1, 0, 1, 157, 76, },
+ { 1, 1, 0, 1, 161, 127, },
+ { 3, 1, 0, 1, 161, 76, },
+ { 4, 1, 0, 1, 161, 74, },
+ { 5, 1, 0, 1, 161, 76, },
+ { 6, 1, 0, 1, 161, 76, },
+ { 7, 1, 0, 1, 161, 54, },
+ { 8, 1, 0, 1, 161, 76, },
+ { 1, 1, 0, 1, 165, 127, },
+ { 3, 1, 0, 1, 165, 76, },
+ { 4, 1, 0, 1, 165, 74, },
+ { 5, 1, 0, 1, 165, 76, },
+ { 6, 1, 0, 1, 165, 76, },
+ { 7, 1, 0, 1, 165, 54, },
+ { 8, 1, 0, 1, 165, 76, },
+ { 1, 1, 0, 2, 36, 62, },
+ { 3, 1, 0, 2, 36, 62, },
+ { 4, 1, 0, 2, 36, 76, },
+ { 5, 1, 0, 2, 36, 62, },
+ { 6, 1, 0, 2, 36, 64, },
+ { 7, 1, 0, 2, 36, 54, },
+ { 8, 1, 0, 2, 36, 62, },
+ { 1, 1, 0, 2, 40, 62, },
+ { 3, 1, 0, 2, 40, 62, },
+ { 4, 1, 0, 2, 40, 76, },
+ { 5, 1, 0, 2, 40, 62, },
+ { 6, 1, 0, 2, 40, 64, },
+ { 7, 1, 0, 2, 40, 54, },
+ { 8, 1, 0, 2, 40, 62, },
+ { 1, 1, 0, 2, 44, 62, },
+ { 3, 1, 0, 2, 44, 62, },
+ { 4, 1, 0, 2, 44, 76, },
+ { 5, 1, 0, 2, 44, 62, },
+ { 6, 1, 0, 2, 44, 64, },
+ { 7, 1, 0, 2, 44, 54, },
+ { 8, 1, 0, 2, 44, 62, },
+ { 1, 1, 0, 2, 48, 62, },
+ { 3, 1, 0, 2, 48, 62, },
+ { 4, 1, 0, 2, 48, 76, },
+ { 5, 1, 0, 2, 48, 62, },
+ { 6, 1, 0, 2, 48, 64, },
+ { 7, 1, 0, 2, 48, 54, },
+ { 8, 1, 0, 2, 48, 62, },
+ { 1, 1, 0, 2, 52, 62, },
+ { 3, 1, 0, 2, 52, 64, },
+ { 4, 1, 0, 2, 52, 76, },
+ { 5, 1, 0, 2, 52, 62, },
+ { 6, 1, 0, 2, 52, 76, },
+ { 7, 1, 0, 2, 52, 54, },
+ { 8, 1, 0, 2, 52, 76, },
+ { 1, 1, 0, 2, 56, 62, },
+ { 3, 1, 0, 2, 56, 64, },
+ { 4, 1, 0, 2, 56, 76, },
+ { 5, 1, 0, 2, 56, 62, },
+ { 6, 1, 0, 2, 56, 76, },
+ { 7, 1, 0, 2, 56, 54, },
+ { 8, 1, 0, 2, 56, 76, },
+ { 1, 1, 0, 2, 60, 62, },
+ { 3, 1, 0, 2, 60, 64, },
+ { 4, 1, 0, 2, 60, 76, },
+ { 5, 1, 0, 2, 60, 62, },
+ { 6, 1, 0, 2, 60, 76, },
+ { 7, 1, 0, 2, 60, 54, },
+ { 8, 1, 0, 2, 60, 76, },
+ { 1, 1, 0, 2, 64, 60, },
+ { 3, 1, 0, 2, 64, 64, },
+ { 4, 1, 0, 2, 64, 74, },
+ { 5, 1, 0, 2, 64, 62, },
+ { 6, 1, 0, 2, 64, 74, },
+ { 7, 1, 0, 2, 64, 54, },
+ { 8, 1, 0, 2, 64, 74, },
+ { 1, 1, 0, 2, 100, 76, },
+ { 3, 1, 0, 2, 100, 70, },
+ { 4, 1, 0, 2, 100, 76, },
+ { 5, 1, 0, 2, 100, 62, },
+ { 6, 1, 0, 2, 100, 70, },
+ { 7, 1, 0, 2, 100, 54, },
+ { 8, 1, 0, 2, 100, 70, },
+ { 1, 1, 0, 2, 104, 76, },
+ { 3, 1, 0, 2, 104, 76, },
+ { 4, 1, 0, 2, 104, 76, },
+ { 5, 1, 0, 2, 104, 62, },
+ { 6, 1, 0, 2, 104, 76, },
+ { 7, 1, 0, 2, 104, 54, },
+ { 8, 1, 0, 2, 104, 76, },
+ { 1, 1, 0, 2, 108, 76, },
+ { 3, 1, 0, 2, 108, 76, },
+ { 4, 1, 0, 2, 108, 76, },
+ { 5, 1, 0, 2, 108, 62, },
+ { 6, 1, 0, 2, 108, 76, },
+ { 7, 1, 0, 2, 108, 54, },
+ { 8, 1, 0, 2, 108, 76, },
+ { 1, 1, 0, 2, 112, 76, },
+ { 3, 1, 0, 2, 112, 76, },
+ { 4, 1, 0, 2, 112, 76, },
+ { 5, 1, 0, 2, 112, 62, },
+ { 6, 1, 0, 2, 112, 76, },
+ { 7, 1, 0, 2, 112, 54, },
+ { 8, 1, 0, 2, 112, 76, },
+ { 1, 1, 0, 2, 116, 76, },
+ { 3, 1, 0, 2, 116, 76, },
+ { 4, 1, 0, 2, 116, 76, },
+ { 5, 1, 0, 2, 116, 62, },
+ { 6, 1, 0, 2, 116, 76, },
+ { 7, 1, 0, 2, 116, 54, },
+ { 8, 1, 0, 2, 116, 76, },
+ { 1, 1, 0, 2, 120, 76, },
+ { 3, 1, 0, 2, 120, 127, },
+ { 4, 1, 0, 2, 120, 76, },
+ { 5, 1, 0, 2, 120, 127, },
+ { 6, 1, 0, 2, 120, 76, },
+ { 7, 1, 0, 2, 120, 54, },
+ { 8, 1, 0, 2, 120, 76, },
+ { 1, 1, 0, 2, 124, 76, },
+ { 3, 1, 0, 2, 124, 127, },
+ { 4, 1, 0, 2, 124, 76, },
+ { 5, 1, 0, 2, 124, 127, },
+ { 6, 1, 0, 2, 124, 76, },
+ { 7, 1, 0, 2, 124, 54, },
+ { 8, 1, 0, 2, 124, 76, },
+ { 1, 1, 0, 2, 128, 76, },
+ { 3, 1, 0, 2, 128, 127, },
+ { 4, 1, 0, 2, 128, 76, },
+ { 5, 1, 0, 2, 128, 127, },
+ { 6, 1, 0, 2, 128, 76, },
+ { 7, 1, 0, 2, 128, 54, },
+ { 8, 1, 0, 2, 128, 76, },
+ { 1, 1, 0, 2, 132, 76, },
+ { 3, 1, 0, 2, 132, 76, },
+ { 4, 1, 0, 2, 132, 76, },
+ { 5, 1, 0, 2, 132, 62, },
+ { 6, 1, 0, 2, 132, 76, },
+ { 7, 1, 0, 2, 132, 54, },
+ { 8, 1, 0, 2, 132, 76, },
+ { 1, 1, 0, 2, 136, 76, },
+ { 3, 1, 0, 2, 136, 76, },
+ { 4, 1, 0, 2, 136, 76, },
+ { 5, 1, 0, 2, 136, 62, },
+ { 6, 1, 0, 2, 136, 76, },
+ { 7, 1, 0, 2, 136, 127, },
+ { 8, 1, 0, 2, 136, 76, },
+ { 1, 1, 0, 2, 140, 76, },
+ { 3, 1, 0, 2, 140, 70, },
+ { 4, 1, 0, 2, 140, 76, },
+ { 5, 1, 0, 2, 140, 62, },
+ { 6, 1, 0, 2, 140, 70, },
+ { 7, 1, 0, 2, 140, 127, },
+ { 8, 1, 0, 2, 140, 70, },
+ { 1, 1, 0, 2, 144, 127, },
+ { 3, 1, 0, 2, 144, 76, },
+ { 4, 1, 0, 2, 144, 76, },
+ { 5, 1, 0, 2, 144, 127, },
+ { 6, 1, 0, 2, 144, 76, },
+ { 7, 1, 0, 2, 144, 127, },
+ { 8, 1, 0, 2, 144, 76, },
+ { 1, 1, 0, 2, 149, 127, },
+ { 3, 1, 0, 2, 149, 76, },
+ { 4, 1, 0, 2, 149, 74, },
+ { 5, 1, 0, 2, 149, 76, },
+ { 6, 1, 0, 2, 149, 76, },
+ { 7, 1, 0, 2, 149, 54, },
+ { 8, 1, 0, 2, 149, 76, },
+ { 1, 1, 0, 2, 153, 127, },
+ { 3, 1, 0, 2, 153, 76, },
+ { 4, 1, 0, 2, 153, 74, },
+ { 5, 1, 0, 2, 153, 76, },
+ { 6, 1, 0, 2, 153, 76, },
+ { 7, 1, 0, 2, 153, 54, },
+ { 8, 1, 0, 2, 153, 76, },
+ { 1, 1, 0, 2, 157, 127, },
+ { 3, 1, 0, 2, 157, 76, },
+ { 4, 1, 0, 2, 157, 74, },
+ { 5, 1, 0, 2, 157, 76, },
+ { 6, 1, 0, 2, 157, 76, },
+ { 7, 1, 0, 2, 157, 54, },
+ { 8, 1, 0, 2, 157, 76, },
+ { 1, 1, 0, 2, 161, 127, },
+ { 3, 1, 0, 2, 161, 76, },
+ { 4, 1, 0, 2, 161, 74, },
+ { 5, 1, 0, 2, 161, 76, },
+ { 6, 1, 0, 2, 161, 76, },
+ { 7, 1, 0, 2, 161, 54, },
+ { 8, 1, 0, 2, 161, 76, },
+ { 1, 1, 0, 2, 165, 127, },
+ { 3, 1, 0, 2, 165, 76, },
+ { 4, 1, 0, 2, 165, 74, },
+ { 5, 1, 0, 2, 165, 76, },
+ { 6, 1, 0, 2, 165, 76, },
+ { 7, 1, 0, 2, 165, 54, },
+ { 8, 1, 0, 2, 165, 76, },
+ { 1, 1, 0, 3, 36, 50, },
+ { 3, 1, 0, 3, 36, 38, },
+ { 4, 1, 0, 3, 36, 66, },
+ { 5, 1, 0, 3, 36, 38, },
+ { 6, 1, 0, 3, 36, 52, },
+ { 7, 1, 0, 3, 36, 30, },
+ { 8, 1, 0, 3, 36, 50, },
+ { 1, 1, 0, 3, 40, 50, },
+ { 3, 1, 0, 3, 40, 38, },
+ { 4, 1, 0, 3, 40, 66, },
+ { 5, 1, 0, 3, 40, 38, },
+ { 6, 1, 0, 3, 40, 52, },
+ { 7, 1, 0, 3, 40, 30, },
+ { 8, 1, 0, 3, 40, 50, },
+ { 1, 1, 0, 3, 44, 50, },
+ { 3, 1, 0, 3, 44, 38, },
+ { 4, 1, 0, 3, 44, 66, },
+ { 5, 1, 0, 3, 44, 38, },
+ { 6, 1, 0, 3, 44, 52, },
+ { 7, 1, 0, 3, 44, 30, },
+ { 8, 1, 0, 3, 44, 50, },
+ { 1, 1, 0, 3, 48, 50, },
+ { 3, 1, 0, 3, 48, 38, },
+ { 4, 1, 0, 3, 48, 66, },
+ { 5, 1, 0, 3, 48, 38, },
+ { 6, 1, 0, 3, 48, 52, },
+ { 7, 1, 0, 3, 48, 30, },
+ { 8, 1, 0, 3, 48, 50, },
+ { 1, 1, 0, 3, 52, 50, },
+ { 3, 1, 0, 3, 52, 40, },
+ { 4, 1, 0, 3, 52, 66, },
+ { 5, 1, 0, 3, 52, 38, },
+ { 6, 1, 0, 3, 52, 68, },
+ { 7, 1, 0, 3, 52, 30, },
+ { 8, 1, 0, 3, 52, 68, },
+ { 1, 1, 0, 3, 56, 50, },
+ { 3, 1, 0, 3, 56, 40, },
+ { 4, 1, 0, 3, 56, 66, },
+ { 5, 1, 0, 3, 56, 38, },
+ { 6, 1, 0, 3, 56, 68, },
+ { 7, 1, 0, 3, 56, 30, },
+ { 8, 1, 0, 3, 56, 68, },
+ { 1, 1, 0, 3, 60, 50, },
+ { 3, 1, 0, 3, 60, 40, },
+ { 4, 1, 0, 3, 60, 66, },
+ { 5, 1, 0, 3, 60, 38, },
+ { 6, 1, 0, 3, 60, 66, },
+ { 7, 1, 0, 3, 60, 30, },
+ { 8, 1, 0, 3, 60, 66, },
+ { 1, 1, 0, 3, 64, 50, },
+ { 3, 1, 0, 3, 64, 40, },
+ { 4, 1, 0, 3, 64, 66, },
+ { 5, 1, 0, 3, 64, 38, },
+ { 6, 1, 0, 3, 64, 68, },
+ { 7, 1, 0, 3, 64, 30, },
+ { 8, 1, 0, 3, 64, 68, },
+ { 1, 1, 0, 3, 100, 70, },
+ { 3, 1, 0, 3, 100, 60, },
+ { 4, 1, 0, 3, 100, 64, },
+ { 5, 1, 0, 3, 100, 38, },
+ { 6, 1, 0, 3, 100, 60, },
+ { 7, 1, 0, 3, 100, 30, },
+ { 8, 1, 0, 3, 100, 60, },
+ { 1, 1, 0, 3, 104, 70, },
+ { 3, 1, 0, 3, 104, 68, },
+ { 4, 1, 0, 3, 104, 64, },
+ { 5, 1, 0, 3, 104, 38, },
+ { 6, 1, 0, 3, 104, 68, },
+ { 7, 1, 0, 3, 104, 30, },
+ { 8, 1, 0, 3, 104, 68, },
+ { 1, 1, 0, 3, 108, 70, },
+ { 3, 1, 0, 3, 108, 68, },
+ { 4, 1, 0, 3, 108, 64, },
+ { 5, 1, 0, 3, 108, 38, },
+ { 6, 1, 0, 3, 108, 68, },
+ { 7, 1, 0, 3, 108, 30, },
+ { 8, 1, 0, 3, 108, 68, },
+ { 1, 1, 0, 3, 112, 70, },
+ { 3, 1, 0, 3, 112, 68, },
+ { 4, 1, 0, 3, 112, 64, },
+ { 5, 1, 0, 3, 112, 38, },
+ { 6, 1, 0, 3, 112, 68, },
+ { 7, 1, 0, 3, 112, 30, },
+ { 8, 1, 0, 3, 112, 68, },
+ { 1, 1, 0, 3, 116, 70, },
+ { 3, 1, 0, 3, 116, 68, },
+ { 4, 1, 0, 3, 116, 64, },
+ { 5, 1, 0, 3, 116, 38, },
+ { 6, 1, 0, 3, 116, 68, },
+ { 7, 1, 0, 3, 116, 30, },
+ { 8, 1, 0, 3, 116, 68, },
+ { 1, 1, 0, 3, 120, 70, },
+ { 3, 1, 0, 3, 120, 127, },
+ { 4, 1, 0, 3, 120, 64, },
+ { 5, 1, 0, 3, 120, 127, },
+ { 6, 1, 0, 3, 120, 68, },
+ { 7, 1, 0, 3, 120, 30, },
+ { 8, 1, 0, 3, 120, 68, },
+ { 1, 1, 0, 3, 124, 70, },
+ { 3, 1, 0, 3, 124, 127, },
+ { 4, 1, 0, 3, 124, 64, },
+ { 5, 1, 0, 3, 124, 127, },
+ { 6, 1, 0, 3, 124, 68, },
+ { 7, 1, 0, 3, 124, 30, },
+ { 8, 1, 0, 3, 124, 68, },
+ { 1, 1, 0, 3, 128, 70, },
+ { 3, 1, 0, 3, 128, 127, },
+ { 4, 1, 0, 3, 128, 64, },
+ { 5, 1, 0, 3, 128, 127, },
+ { 6, 1, 0, 3, 128, 68, },
+ { 7, 1, 0, 3, 128, 30, },
+ { 8, 1, 0, 3, 128, 68, },
+ { 1, 1, 0, 3, 132, 70, },
+ { 3, 1, 0, 3, 132, 68, },
+ { 4, 1, 0, 3, 132, 64, },
+ { 5, 1, 0, 3, 132, 38, },
+ { 6, 1, 0, 3, 132, 68, },
+ { 7, 1, 0, 3, 132, 30, },
+ { 8, 1, 0, 3, 132, 68, },
+ { 1, 1, 0, 3, 136, 70, },
+ { 3, 1, 0, 3, 136, 68, },
+ { 4, 1, 0, 3, 136, 64, },
+ { 5, 1, 0, 3, 136, 38, },
+ { 6, 1, 0, 3, 136, 68, },
+ { 7, 1, 0, 3, 136, 127, },
+ { 8, 1, 0, 3, 136, 68, },
+ { 1, 1, 0, 3, 140, 70, },
+ { 3, 1, 0, 3, 140, 60, },
+ { 4, 1, 0, 3, 140, 64, },
+ { 5, 1, 0, 3, 140, 38, },
+ { 6, 1, 0, 3, 140, 60, },
+ { 7, 1, 0, 3, 140, 127, },
+ { 8, 1, 0, 3, 140, 60, },
+ { 1, 1, 0, 3, 144, 127, },
+ { 3, 1, 0, 3, 144, 68, },
+ { 4, 1, 0, 3, 144, 64, },
+ { 5, 1, 0, 3, 144, 127, },
+ { 6, 1, 0, 3, 144, 68, },
+ { 7, 1, 0, 3, 144, 127, },
+ { 8, 1, 0, 3, 144, 68, },
+ { 1, 1, 0, 3, 149, 127, },
+ { 3, 1, 0, 3, 149, 76, },
+ { 4, 1, 0, 3, 149, 60, },
+ { 5, 1, 0, 3, 149, 76, },
+ { 6, 1, 0, 3, 149, 76, },
+ { 7, 1, 0, 3, 149, 30, },
+ { 8, 1, 0, 3, 149, 72, },
+ { 1, 1, 0, 3, 153, 127, },
+ { 3, 1, 0, 3, 153, 76, },
+ { 4, 1, 0, 3, 153, 60, },
+ { 5, 1, 0, 3, 153, 76, },
+ { 6, 1, 0, 3, 153, 76, },
+ { 7, 1, 0, 3, 153, 30, },
+ { 8, 1, 0, 3, 153, 76, },
+ { 1, 1, 0, 3, 157, 127, },
+ { 3, 1, 0, 3, 157, 76, },
+ { 4, 1, 0, 3, 157, 60, },
+ { 5, 1, 0, 3, 157, 76, },
+ { 6, 1, 0, 3, 157, 76, },
+ { 7, 1, 0, 3, 157, 30, },
+ { 8, 1, 0, 3, 157, 76, },
+ { 1, 1, 0, 3, 161, 127, },
+ { 3, 1, 0, 3, 161, 76, },
+ { 4, 1, 0, 3, 161, 60, },
+ { 5, 1, 0, 3, 161, 76, },
+ { 6, 1, 0, 3, 161, 76, },
+ { 7, 1, 0, 3, 161, 30, },
+ { 8, 1, 0, 3, 161, 76, },
+ { 1, 1, 0, 3, 165, 127, },
+ { 3, 1, 0, 3, 165, 76, },
+ { 4, 1, 0, 3, 165, 60, },
+ { 5, 1, 0, 3, 165, 76, },
+ { 6, 1, 0, 3, 165, 76, },
+ { 7, 1, 0, 3, 165, 30, },
+ { 8, 1, 0, 3, 165, 76, },
+ { 1, 1, 1, 2, 38, 62, },
+ { 3, 1, 1, 2, 38, 64, },
+ { 4, 1, 1, 2, 38, 72, },
+ { 5, 1, 1, 2, 38, 64, },
+ { 6, 1, 1, 2, 38, 64, },
+ { 7, 1, 1, 2, 38, 54, },
+ { 8, 1, 1, 2, 38, 62, },
+ { 1, 1, 1, 2, 46, 62, },
+ { 3, 1, 1, 2, 46, 64, },
+ { 4, 1, 1, 2, 46, 72, },
+ { 5, 1, 1, 2, 46, 64, },
+ { 6, 1, 1, 2, 46, 64, },
+ { 7, 1, 1, 2, 46, 54, },
+ { 8, 1, 1, 2, 46, 62, },
+ { 1, 1, 1, 2, 54, 62, },
+ { 3, 1, 1, 2, 54, 64, },
+ { 4, 1, 1, 2, 54, 72, },
+ { 5, 1, 1, 2, 54, 64, },
+ { 6, 1, 1, 2, 54, 72, },
+ { 7, 1, 1, 2, 54, 54, },
+ { 8, 1, 1, 2, 54, 72, },
+ { 1, 1, 1, 2, 62, 62, },
+ { 3, 1, 1, 2, 62, 64, },
+ { 4, 1, 1, 2, 62, 70, },
+ { 5, 1, 1, 2, 62, 64, },
+ { 6, 1, 1, 2, 62, 64, },
+ { 7, 1, 1, 2, 62, 54, },
+ { 8, 1, 1, 2, 62, 64, },
+ { 1, 1, 1, 2, 102, 72, },
+ { 3, 1, 1, 2, 102, 58, },
+ { 4, 1, 1, 2, 102, 72, },
+ { 5, 1, 1, 2, 102, 64, },
+ { 6, 1, 1, 2, 102, 58, },
+ { 7, 1, 1, 2, 102, 54, },
+ { 8, 1, 1, 2, 102, 58, },
+ { 1, 1, 1, 2, 110, 72, },
+ { 3, 1, 1, 2, 110, 72, },
+ { 4, 1, 1, 2, 110, 72, },
+ { 5, 1, 1, 2, 110, 64, },
+ { 6, 1, 1, 2, 110, 72, },
+ { 7, 1, 1, 2, 110, 54, },
+ { 8, 1, 1, 2, 110, 72, },
+ { 1, 1, 1, 2, 118, 72, },
+ { 3, 1, 1, 2, 118, 127, },
+ { 4, 1, 1, 2, 118, 72, },
+ { 5, 1, 1, 2, 118, 127, },
+ { 6, 1, 1, 2, 118, 72, },
+ { 7, 1, 1, 2, 118, 54, },
+ { 8, 1, 1, 2, 118, 72, },
+ { 1, 1, 1, 2, 126, 72, },
+ { 3, 1, 1, 2, 126, 127, },
+ { 4, 1, 1, 2, 126, 72, },
+ { 5, 1, 1, 2, 126, 127, },
+ { 6, 1, 1, 2, 126, 72, },
+ { 7, 1, 1, 2, 126, 54, },
+ { 8, 1, 1, 2, 126, 72, },
+ { 1, 1, 1, 2, 134, 72, },
+ { 3, 1, 1, 2, 134, 72, },
+ { 4, 1, 1, 2, 134, 72, },
+ { 5, 1, 1, 2, 134, 64, },
+ { 6, 1, 1, 2, 134, 72, },
+ { 7, 1, 1, 2, 134, 127, },
+ { 8, 1, 1, 2, 134, 72, },
+ { 1, 1, 1, 2, 142, 127, },
+ { 3, 1, 1, 2, 142, 72, },
+ { 4, 1, 1, 2, 142, 72, },
+ { 5, 1, 1, 2, 142, 127, },
+ { 6, 1, 1, 2, 142, 72, },
+ { 7, 1, 1, 2, 142, 127, },
+ { 8, 1, 1, 2, 142, 72, },
+ { 1, 1, 1, 2, 151, 127, },
+ { 3, 1, 1, 2, 151, 72, },
+ { 4, 1, 1, 2, 151, 72, },
+ { 5, 1, 1, 2, 151, 72, },
+ { 6, 1, 1, 2, 151, 72, },
+ { 7, 1, 1, 2, 151, 54, },
+ { 8, 1, 1, 2, 151, 72, },
+ { 1, 1, 1, 2, 159, 127, },
+ { 3, 1, 1, 2, 159, 72, },
+ { 4, 1, 1, 2, 159, 72, },
+ { 5, 1, 1, 2, 159, 72, },
+ { 6, 1, 1, 2, 159, 72, },
+ { 7, 1, 1, 2, 159, 54, },
+ { 8, 1, 1, 2, 159, 72, },
+ { 1, 1, 1, 3, 38, 50, },
+ { 3, 1, 1, 3, 38, 40, },
+ { 4, 1, 1, 3, 38, 62, },
+ { 5, 1, 1, 3, 38, 40, },
+ { 6, 1, 1, 3, 38, 52, },
+ { 7, 1, 1, 3, 38, 30, },
+ { 8, 1, 1, 3, 38, 50, },
+ { 1, 1, 1, 3, 46, 50, },
+ { 3, 1, 1, 3, 46, 40, },
+ { 4, 1, 1, 3, 46, 62, },
+ { 5, 1, 1, 3, 46, 40, },
+ { 6, 1, 1, 3, 46, 52, },
+ { 7, 1, 1, 3, 46, 30, },
+ { 8, 1, 1, 3, 46, 50, },
+ { 1, 1, 1, 3, 54, 50, },
+ { 3, 1, 1, 3, 54, 40, },
+ { 4, 1, 1, 3, 54, 62, },
+ { 5, 1, 1, 3, 54, 40, },
+ { 6, 1, 1, 3, 54, 68, },
+ { 7, 1, 1, 3, 54, 30, },
+ { 8, 1, 1, 3, 54, 68, },
+ { 1, 1, 1, 3, 62, 48, },
+ { 3, 1, 1, 3, 62, 40, },
+ { 4, 1, 1, 3, 62, 58, },
+ { 5, 1, 1, 3, 62, 40, },
+ { 6, 1, 1, 3, 62, 58, },
+ { 7, 1, 1, 3, 62, 30, },
+ { 8, 1, 1, 3, 62, 58, },
+ { 1, 1, 1, 3, 102, 70, },
+ { 3, 1, 1, 3, 102, 54, },
+ { 4, 1, 1, 3, 102, 64, },
+ { 5, 1, 1, 3, 102, 40, },
+ { 6, 1, 1, 3, 102, 54, },
+ { 7, 1, 1, 3, 102, 30, },
+ { 8, 1, 1, 3, 102, 54, },
+ { 1, 1, 1, 3, 110, 70, },
+ { 3, 1, 1, 3, 110, 68, },
+ { 4, 1, 1, 3, 110, 64, },
+ { 5, 1, 1, 3, 110, 40, },
+ { 6, 1, 1, 3, 110, 68, },
+ { 7, 1, 1, 3, 110, 30, },
+ { 8, 1, 1, 3, 110, 68, },
+ { 1, 1, 1, 3, 118, 70, },
+ { 3, 1, 1, 3, 118, 127, },
+ { 4, 1, 1, 3, 118, 64, },
+ { 5, 1, 1, 3, 118, 127, },
+ { 6, 1, 1, 3, 118, 68, },
+ { 7, 1, 1, 3, 118, 30, },
+ { 8, 1, 1, 3, 118, 68, },
+ { 1, 1, 1, 3, 126, 70, },
+ { 3, 1, 1, 3, 126, 127, },
+ { 4, 1, 1, 3, 126, 64, },
+ { 5, 1, 1, 3, 126, 127, },
+ { 6, 1, 1, 3, 126, 68, },
+ { 7, 1, 1, 3, 126, 30, },
+ { 8, 1, 1, 3, 126, 68, },
+ { 1, 1, 1, 3, 134, 70, },
+ { 3, 1, 1, 3, 134, 68, },
+ { 4, 1, 1, 3, 134, 64, },
+ { 5, 1, 1, 3, 134, 40, },
+ { 6, 1, 1, 3, 134, 68, },
+ { 7, 1, 1, 3, 134, 127, },
+ { 8, 1, 1, 3, 134, 68, },
+ { 1, 1, 1, 3, 142, 127, },
+ { 3, 1, 1, 3, 142, 68, },
+ { 4, 1, 1, 3, 142, 64, },
+ { 5, 1, 1, 3, 142, 127, },
+ { 6, 1, 1, 3, 142, 68, },
+ { 7, 1, 1, 3, 142, 127, },
+ { 8, 1, 1, 3, 142, 68, },
+ { 1, 1, 1, 3, 151, 127, },
+ { 3, 1, 1, 3, 151, 72, },
+ { 4, 1, 1, 3, 151, 66, },
+ { 5, 1, 1, 3, 151, 72, },
+ { 6, 1, 1, 3, 151, 72, },
+ { 7, 1, 1, 3, 151, 30, },
+ { 8, 1, 1, 3, 151, 68, },
+ { 1, 1, 1, 3, 159, 127, },
+ { 3, 1, 1, 3, 159, 72, },
+ { 4, 1, 1, 3, 159, 66, },
+ { 5, 1, 1, 3, 159, 72, },
+ { 6, 1, 1, 3, 159, 72, },
+ { 7, 1, 1, 3, 159, 30, },
+ { 8, 1, 1, 3, 159, 72, },
+ { 1, 1, 2, 4, 42, 64, },
+ { 3, 1, 2, 4, 42, 64, },
+ { 4, 1, 2, 4, 42, 68, },
+ { 5, 1, 2, 4, 42, 64, },
+ { 6, 1, 2, 4, 42, 64, },
+ { 7, 1, 2, 4, 42, 54, },
+ { 8, 1, 2, 4, 42, 62, },
+ { 1, 1, 2, 4, 58, 64, },
+ { 3, 1, 2, 4, 58, 62, },
+ { 4, 1, 2, 4, 58, 64, },
+ { 5, 1, 2, 4, 58, 64, },
+ { 6, 1, 2, 4, 58, 62, },
+ { 7, 1, 2, 4, 58, 54, },
+ { 8, 1, 2, 4, 58, 62, },
+ { 1, 1, 2, 4, 106, 72, },
+ { 3, 1, 2, 4, 106, 58, },
+ { 4, 1, 2, 4, 106, 66, },
+ { 5, 1, 2, 4, 106, 64, },
+ { 6, 1, 2, 4, 106, 58, },
+ { 7, 1, 2, 4, 106, 54, },
+ { 8, 1, 2, 4, 106, 58, },
+ { 1, 1, 2, 4, 122, 72, },
+ { 3, 1, 2, 4, 122, 127, },
+ { 4, 1, 2, 4, 122, 68, },
+ { 5, 1, 2, 4, 122, 127, },
+ { 6, 1, 2, 4, 122, 72, },
+ { 7, 1, 2, 4, 122, 54, },
+ { 8, 1, 2, 4, 122, 72, },
+ { 1, 1, 2, 4, 138, 127, },
+ { 3, 1, 2, 4, 138, 72, },
+ { 4, 1, 2, 4, 138, 68, },
+ { 5, 1, 2, 4, 138, 127, },
+ { 6, 1, 2, 4, 138, 72, },
+ { 7, 1, 2, 4, 138, 127, },
+ { 8, 1, 2, 4, 138, 72, },
+ { 1, 1, 2, 4, 155, 127, },
+ { 3, 1, 2, 4, 155, 72, },
+ { 4, 1, 2, 4, 155, 68, },
+ { 5, 1, 2, 4, 155, 72, },
+ { 6, 1, 2, 4, 155, 72, },
+ { 7, 1, 2, 4, 155, 54, },
+ { 8, 1, 2, 4, 155, 68, },
+ { 1, 1, 2, 5, 42, 50, },
+ { 3, 1, 2, 5, 42, 40, },
+ { 4, 1, 2, 5, 42, 58, },
+ { 5, 1, 2, 5, 42, 40, },
+ { 6, 1, 2, 5, 42, 52, },
+ { 7, 1, 2, 5, 42, 30, },
+ { 8, 1, 2, 5, 42, 50, },
+ { 1, 1, 2, 5, 58, 50, },
+ { 3, 1, 2, 5, 58, 40, },
+ { 4, 1, 2, 5, 58, 56, },
+ { 5, 1, 2, 5, 58, 40, },
+ { 6, 1, 2, 5, 58, 52, },
+ { 7, 1, 2, 5, 58, 30, },
+ { 8, 1, 2, 5, 58, 52, },
+ { 1, 1, 2, 5, 106, 72, },
+ { 3, 1, 2, 5, 106, 50, },
+ { 4, 1, 2, 5, 106, 56, },
+ { 5, 1, 2, 5, 106, 40, },
+ { 6, 1, 2, 5, 106, 50, },
+ { 7, 1, 2, 5, 106, 30, },
+ { 8, 1, 2, 5, 106, 50, },
+ { 1, 1, 2, 5, 122, 72, },
+ { 3, 1, 2, 5, 122, 127, },
+ { 4, 1, 2, 5, 122, 56, },
+ { 5, 1, 2, 5, 122, 127, },
+ { 6, 1, 2, 5, 122, 66, },
+ { 7, 1, 2, 5, 122, 30, },
+ { 8, 1, 2, 5, 122, 66, },
+ { 1, 1, 2, 5, 138, 127, },
+ { 3, 1, 2, 5, 138, 66, },
+ { 4, 1, 2, 5, 138, 58, },
+ { 5, 1, 2, 5, 138, 127, },
+ { 6, 1, 2, 5, 138, 66, },
+ { 7, 1, 2, 5, 138, 127, },
+ { 8, 1, 2, 5, 138, 66, },
+ { 1, 1, 2, 5, 155, 127, },
+ { 3, 1, 2, 5, 155, 62, },
+ { 4, 1, 2, 5, 155, 58, },
+ { 5, 1, 2, 5, 155, 72, },
+ { 6, 1, 2, 5, 155, 62, },
+ { 7, 1, 2, 5, 155, 30, },
+ { 8, 1, 2, 5, 155, 62, },
};
RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type0);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 49df3bb08d41..ce5e92d82efc 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -1818,7 +1818,8 @@ out:
return status;
}
-static int rsi_mac80211_cancel_roc(struct ieee80211_hw *hw)
+static int rsi_mac80211_cancel_roc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index b42cd50b837e..1bebba4e8527 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -230,19 +230,16 @@ static void rsi_reset_card(struct sdio_func *pfunction)
rsi_dbg(ERR_ZONE, "%s: CMD0 failed : %d\n", __func__, err);
/* Issue CMD5, arg = 0 */
- if (!host->ocr_avail) {
- err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0,
- (MMC_RSP_R4 | MMC_CMD_BCR), &resp);
- if (err)
- rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
- __func__, err);
-
- host->ocr_avail = resp;
- }
+ err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0,
+ (MMC_RSP_R4 | MMC_CMD_BCR), &resp);
+ if (err)
+ rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
+ __func__, err);
+ card->ocr = resp;
/* Issue CMD5, arg = ocr. Wait till card is ready */
for (i = 0; i < 100; i++) {
err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND,
- host->ocr_avail,
+ card->ocr,
(MMC_RSP_R4 | MMC_CMD_BCR), &resp);
if (err) {
rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
@@ -844,11 +841,11 @@ static int rsi_init_sdio_interface(struct rsi_hw *adapter,
struct sdio_func *pfunction)
{
struct rsi_91x_sdiodev *rsi_91x_dev;
- int status = -ENOMEM;
+ int status;
rsi_91x_dev = kzalloc(sizeof(*rsi_91x_dev), GFP_KERNEL);
if (!rsi_91x_dev)
- return status;
+ return -ENOMEM;
adapter->rsi_dev = rsi_91x_dev;
@@ -890,7 +887,7 @@ static int rsi_init_sdio_interface(struct rsi_hw *adapter,
#ifdef CONFIG_RSI_DEBUGFS
adapter->num_debugfs_entries = MAX_DEBUGFS_ENTRIES;
#endif
- return status;
+ return 0;
fail:
sdio_disable_func(pfunction);
sdio_release_host(pfunction);
@@ -944,7 +941,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter)
put_unaligned_le32(TA_HOLD_THREAD_VALUE, data);
addr = TA_HOLD_THREAD_REG | RSI_SD_REQUEST_MASTER;
status = rsi_sdio_write_register_multiple(adapter, addr,
- (u8 *)&data,
+ (u8 *)data,
RSI_9116_REG_SIZE);
if (status < 0) {
rsi_dbg(ERR_ZONE, "Unable to hold TA threads\n");
@@ -954,7 +951,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter)
put_unaligned_le32(TA_SOFT_RST_CLR, data);
addr = TA_SOFT_RESET_REG | RSI_SD_REQUEST_MASTER;
status = rsi_sdio_write_register_multiple(adapter, addr,
- (u8 *)&data,
+ (u8 *)data,
RSI_9116_REG_SIZE);
if (status < 0) {
rsi_dbg(ERR_ZONE, "Unable to get TA out of reset\n");
@@ -964,7 +961,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter)
put_unaligned_le32(TA_PC_ZERO, data);
addr = TA_TH0_PC_REG | RSI_SD_REQUEST_MASTER;
status = rsi_sdio_write_register_multiple(adapter, addr,
- (u8 *)&data,
+ (u8 *)data,
RSI_9116_REG_SIZE);
if (status < 0) {
rsi_dbg(ERR_ZONE, "Unable to Reset TA PC value\n");
@@ -975,7 +972,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter)
put_unaligned_le32(TA_RELEASE_THREAD_VALUE, data);
addr = TA_RELEASE_THREAD_REG | RSI_SD_REQUEST_MASTER;
status = rsi_sdio_write_register_multiple(adapter, addr,
- (u8 *)&data,
+ (u8 *)data,
RSI_9116_REG_SIZE);
if (status < 0) {
rsi_dbg(ERR_ZONE, "Unable to release TA threads\n");
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index b74dc8bc9755..547ad538d8b6 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5749,7 +5749,8 @@ static void wlcore_roc_complete_work(struct work_struct *work)
ieee80211_remain_on_channel_expired(wl->hw);
}
-static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
+static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index a25b17932edb..007bf6803293 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1226,7 +1226,6 @@ fail:
static int wl3501_close(struct net_device *dev)
{
struct wl3501_card *this = netdev_priv(dev);
- int rc = -ENODEV;
unsigned long flags;
struct pcmcia_device *link;
link = this->p_dev;
@@ -1241,10 +1240,9 @@ static int wl3501_close(struct net_device *dev)
/* Mask interrupts from the SUTRO */
wl3501_block_interrupt(this);
- rc = 0;
printk(KERN_INFO "%s: WL3501 closed\n", dev->name);
spin_unlock_irqrestore(&this->lock, flags);
- return rc;
+ return 0;
}
/**
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index c9262ffeefe4..0020b2e8c279 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -136,12 +136,12 @@ static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
static u16 frag_get_pending_idx(skb_frag_t *frag)
{
- return (u16)frag->page_offset;
+ return (u16)skb_frag_off(frag);
}
static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
{
- frag->page_offset = pending_idx;
+ skb_frag_off_set(frag, pending_idx);
}
static inline pending_ring_idx_t pending_index(unsigned i)
@@ -1057,7 +1057,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
int j;
skb->truesize += skb->data_len;
for (j = 0; j < i; j++)
- put_page(frags[j].page.p);
+ put_page(skb_frag_page(&frags[j]));
return -ENOMEM;
}
@@ -1069,8 +1069,8 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
BUG();
offset += len;
- frags[i].page.p = page;
- frags[i].page_offset = 0;
+ __skb_frag_set_page(&frags[i], page);
+ skb_frag_off_set(&frags[i], 0);
skb_frag_size_set(&frags[i], len);
}
@@ -1655,9 +1655,6 @@ static int __init netback_init(void)
#ifdef CONFIG_DEBUG_FS
xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
- if (IS_ERR_OR_NULL(xen_netback_dbg_root))
- pr_warn("Init of debugfs returned %ld!\n",
- PTR_ERR(xen_netback_dbg_root));
#endif /* CONFIG_DEBUG_FS */
return 0;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 41034264bd34..f533b7372d59 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -170,50 +170,26 @@ DEFINE_SHOW_ATTRIBUTE(xenvif_ctrl);
static void xenvif_debugfs_addif(struct xenvif *vif)
{
- struct dentry *pfile;
int i;
- if (IS_ERR_OR_NULL(xen_netback_dbg_root))
- return;
-
vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name,
xen_netback_dbg_root);
- if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root)) {
- for (i = 0; i < vif->num_queues; ++i) {
- char filename[sizeof("io_ring_q") + 4];
-
- snprintf(filename, sizeof(filename), "io_ring_q%d", i);
- pfile = debugfs_create_file(filename,
- 0600,
- vif->xenvif_dbg_root,
- &vif->queues[i],
- &xenvif_dbg_io_ring_ops_fops);
- if (IS_ERR_OR_NULL(pfile))
- pr_warn("Creation of io_ring file returned %ld!\n",
- PTR_ERR(pfile));
- }
+ for (i = 0; i < vif->num_queues; ++i) {
+ char filename[sizeof("io_ring_q") + 4];
- if (vif->ctrl_irq) {
- pfile = debugfs_create_file("ctrl",
- 0400,
- vif->xenvif_dbg_root,
- vif,
- &xenvif_ctrl_fops);
- if (IS_ERR_OR_NULL(pfile))
- pr_warn("Creation of ctrl file returned %ld!\n",
- PTR_ERR(pfile));
- }
- } else
- netdev_warn(vif->dev,
- "Creation of vif debugfs dir returned %ld!\n",
- PTR_ERR(vif->xenvif_dbg_root));
+ snprintf(filename, sizeof(filename), "io_ring_q%d", i);
+ debugfs_create_file(filename, 0600, vif->xenvif_dbg_root,
+ &vif->queues[i],
+ &xenvif_dbg_io_ring_ops_fops);
+ }
+
+ if (vif->ctrl_irq)
+ debugfs_create_file("ctrl", 0400, vif->xenvif_dbg_root, vif,
+ &xenvif_ctrl_fops);
}
static void xenvif_debugfs_delif(struct xenvif *vif)
{
- if (IS_ERR_OR_NULL(xen_netback_dbg_root))
- return;
-
debugfs_remove_recursive(vif->xenvif_dbg_root);
vif->xenvif_dbg_root = NULL;
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8d33970a2950..b930d5f95222 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -531,7 +531,7 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
unsigned long size = skb_frag_size(frag);
- unsigned long offset = frag->page_offset;
+ unsigned long offset = skb_frag_off(frag);
/* Skip unused frames from start of page */
offset &= ~PAGE_MASK;
@@ -674,8 +674,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
/* Requests for all the frags. */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- tx = xennet_make_txreqs(queue, tx, skb,
- skb_frag_page(frag), frag->page_offset,
+ tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
+ skb_frag_off(frag),
skb_frag_size(frag));
}
@@ -1040,7 +1040,7 @@ err:
if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
- skb_shinfo(skb)->frags[0].page_offset = rx->offset;
+ skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
skb->data_len = rx->status;
skb->len += rx->status;