aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/he.c7
-rw-r--r--drivers/atm/solos-pci.c9
-rw-r--r--drivers/bcma/host_pci.c6
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c6
-rw-r--r--drivers/hsi/clients/ssi_protocol.c3
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_sdma.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c1
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c3
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c3
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c6
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c12
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c3
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c3
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c9
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c3
-rw-r--r--drivers/isdn/mISDN/layer2.c9
-rw-r--r--drivers/isdn/mISDN/stack.c6
-rw-r--r--drivers/isdn/mISDN/tei.c6
-rw-r--r--drivers/net/arcnet/capmode.c2
-rw-r--r--drivers/net/bonding/bond_debugfs.c5
-rw-r--r--drivers/net/caif/caif_serial.c26
-rw-r--r--drivers/net/caif/caif_virtio.c6
-rw-r--r--drivers/net/can/Kconfig13
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/flexcan.c138
-rw-r--r--drivers/net/can/janz-ican3.c1
-rw-r--r--drivers/net/can/kvaser_pciefd.c1907
-rw-r--r--drivers/net/can/m_can/Kconfig22
-rw-r--r--drivers/net/can/m_can/Makefile2
-rw-r--r--drivers/net/can/m_can/m_can.c1079
-rw-r--r--drivers/net/can/m_can/m_can.h110
-rw-r--r--drivers/net/can/m_can/m_can_platform.c201
-rw-r--r--drivers/net/can/m_can/tcan4x5x.c527
-rw-r--r--drivers/net/can/rcar/rcar_can.c23
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c2
-rw-r--r--drivers/net/can/sja1000/Kconfig79
-rw-r--r--drivers/net/can/sja1000/Makefile11
-rw-r--r--drivers/net/can/sja1000/f81601.c211
-rw-r--r--drivers/net/can/sja1000/peak_pci.c2
-rw-r--r--drivers/net/can/spi/hi311x.c62
-rw-r--r--drivers/net/can/spi/mcp251x.c121
-rw-r--r--drivers/net/can/sun4i_can.c1
-rw-r--r--drivers/net/can/ti_hecc.c268
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c3
-rw-r--r--drivers/net/can/vcan.c19
-rw-r--r--drivers/net/can/xilinx_can.c292
-rw-r--r--drivers/net/dsa/b53/b53_common.c7
-rw-r--r--drivers/net/dsa/b53/b53_srab.c8
-rw-r--r--drivers/net/dsa/bcm_sf2.c7
-rw-r--r--drivers/net/dsa/lan9303-core.c6
-rw-r--r--drivers/net/dsa/lantiq_gswip.c16
-rw-r--r--drivers/net/dsa/microchip/Kconfig18
-rw-r--r--drivers/net/dsa/microchip/Makefile2
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c1310
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h1004
-rw-r--r--drivers/net/dsa/microchip/ksz8795_spi.c103
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c1
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c1
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c21
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h173
-rw-r--r--drivers/net/dsa/microchip/ksz_priv.h155
-rw-r--r--drivers/net/dsa/mt7530.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c576
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h41
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c95
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h5
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c7
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c72
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h12
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_avb.c29
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c135
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h32
-rw-r--r--drivers/net/dsa/mv88e6xxx/port_hidden.c70
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c95
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h6
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c473
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h100
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.c4
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c16
-rw-r--r--drivers/net/ethernet/3com/3c59x.c8
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/ni65.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c107
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c33
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c10
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c10
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c10
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c12
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c3
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c11
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c9
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c1457
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h187
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c197
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c195
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h109
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c181
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c15
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c23
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c23
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c21
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c7
-rw-r--r--drivers/net/ethernet/cortina/gemini.c9
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c21
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/faraday/Kconfig1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c39
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c54
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c86
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c97
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c40
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h5
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig9
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile19
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c97
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.h12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c101
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c78
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h45
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c46
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c363
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h38
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c148
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c43
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h60
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c63
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c414
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c151
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1052
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h71
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c47
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c100
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c20
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c11
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c23
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c24
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c9
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c110
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c80
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c155
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c101
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c61
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c146
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c112
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c200
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c144
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c565
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c655
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c57
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c161
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c543
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h39
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c11
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c11
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c9
-rw-r--r--drivers/net/ethernet/jme.c15
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c10
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c19
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c75
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c3
-rw-r--r--drivers/net/ethernet/marvell/skge.c45
-rw-r--r--drivers/net/ethernet/marvell/sky2.c3
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c71
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c999
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h123
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h114
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c205
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c404
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c256
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c187
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c316
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1068
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c446
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c371
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h104
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c240
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c67
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c267
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h7
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c7
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c5
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c401
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h49
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c145
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.h41
-rw-r--r--drivers/net/ethernet/mscc/ocelot_regs.c11
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c160
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h19
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c197
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c200
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c17
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c3
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c3
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig9
-rw-r--r--drivers/net/ethernet/qlogic/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c82
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h32
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c34
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c122
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c61
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h35
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h15
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c117
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c12
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c12
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c13
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c3
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c3
-rw-r--r--drivers/net/ethernet/realtek/Kconfig9
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c1087
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c10
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon_boards.c18
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/rx.c5
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c1038
-rw-r--r--drivers/net/ethernet/sgi/meth.c3
-rw-r--r--drivers/net/ethernet/sis/sis900.c68
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c3
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h132
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c787
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h47
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c192
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c105
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c435
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c173
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c516
-rw-r--r--drivers/net/ethernet/sun/cassini.c8
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c7
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c5
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c6
-rw-r--r--drivers/net/ethernet/via/via-rhine.c4
-rw-r--r--drivers/net/hamradio/baycom_epp.c3
-rw-r--r--drivers/net/hamradio/hdlcdrv.c3
-rw-r--r--drivers/net/hamradio/mkiss.c11
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/ieee802154/adf7242.c13
-rw-r--r--drivers/net/ieee802154/at86rf230.c20
-rw-r--r--drivers/net/ieee802154/ca8210.c9
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c1
-rw-r--r--drivers/net/netdevsim/dev.c414
-rw-r--r--drivers/net/netdevsim/netdevsim.h4
-rw-r--r--drivers/net/phy/Kconfig26
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/adin.c720
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/dp83822.c5
-rw-r--r--drivers/net/phy/dp83848.c11
-rw-r--r--drivers/net/phy/dp83tc811.c4
-rw-r--r--drivers/net/phy/mdio-aspeed.c157
-rw-r--r--drivers/net/phy/mdio-bcm-iproc.c4
-rw-r--r--drivers/net/phy/mdio-cavium.h2
-rw-r--r--drivers/net/phy/mdio-hisi-femac.c4
-rw-r--r--drivers/net/phy/mdio-moxart.c4
-rw-r--r--drivers/net/phy/mdio-mux-meson-g12a.c4
-rw-r--r--drivers/net/phy/mdio-sun4i.c4
-rw-r--r--drivers/net/phy/mdio-xgene.c4
-rw-r--r--drivers/net/phy/meson-gxl.c2
-rw-r--r--drivers/net/phy/microchip.c1
-rw-r--r--drivers/net/phy/microchip_t1.c1
-rw-r--r--drivers/net/phy/mscc.c4
-rw-r--r--drivers/net/phy/phy-core.c66
-rw-r--r--drivers/net/phy/phy.c62
-rw-r--r--drivers/net/phy/phy_device.c111
-rw-r--r--drivers/net/phy/realtek.c188
-rw-r--r--drivers/net/phy/sfp.c73
-rw-r--r--drivers/net/phy/swphy.c8
-rw-r--r--drivers/net/phy/vitesse.c6
-rw-r--r--drivers/net/slip/slhc.c30
-rw-r--r--drivers/net/thunderbolt.c2
-rw-r--r--drivers/net/usb/asix_common.c9
-rw-r--r--drivers/net/usb/ax88179_178a.c14
-rw-r--r--drivers/net/usb/lan78xx.c23
-rw-r--r--drivers/net/usb/lg-vl600.c4
-rw-r--r--drivers/net/usb/r8152.c599
-rw-r--r--drivers/net/usb/rtl8150.c6
-rw-r--r--drivers/net/usb/smsc75xx.c20
-rw-r--r--drivers/net/usb/sr9800.c9
-rw-r--r--drivers/net/usb/usbnet.c6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c9
-rw-r--r--drivers/net/wimax/i2400m/debugfs.c150
-rw-r--r--drivers/net/wimax/i2400m/driver.c7
-rw-r--r--drivers/net/wimax/i2400m/fw.c9
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h7
-rw-r--r--drivers/net/wimax/i2400m/rx.c1
-rw-r--r--drivers/net/wimax/i2400m/usb.c64
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c9
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/phy.c21
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c61
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c13
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c5
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c20
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c14
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/cmd.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c3
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c136
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c9
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c11
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c257
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h1046
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c215
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h803
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c2507
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h369
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c112
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c135
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h73
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c45
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h233
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c74
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c15
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c9
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h62
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c462
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.c1564
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c355
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c2635
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c31
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c3
-rw-r--r--drivers/net/wireless/wl3501_cs.c4
-rw-r--r--drivers/net/xen-netback/netback.c13
-rw-r--r--drivers/net/xen-netback/xenbus.c46
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/nfc/nxp-nci/Kconfig7
-rw-r--r--drivers/nfc/nxp-nci/core.c2
-rw-r--r--drivers/nfc/nxp-nci/i2c.c134
-rw-r--r--drivers/nfc/nxp-nci/nxp-nci.h1
-rw-r--r--drivers/pci/Kconfig1
-rw-r--r--drivers/pci/controller/Kconfig7
-rw-r--r--drivers/pci/controller/Makefile1
-rw-r--r--drivers/pci/controller/pci-hyperv-intf.c67
-rw-r--r--drivers/pci/controller/pci-hyperv.c308
-rw-r--r--drivers/ptp/ptp_dte.c5
-rw-r--r--drivers/s390/cio/qdio.h3
-rw-r--r--drivers/s390/cio/qdio_main.c75
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/net/ctcm_fsms.c42
-rw-r--r--drivers/s390/net/ctcm_main.c6
-rw-r--r--drivers/s390/net/ctcm_mpc.c6
-rw-r--r--drivers/s390/net/lcs.c6
-rw-r--r--drivers/s390/net/qeth_core.h88
-rw-r--r--drivers/s390/net/qeth_core_main.c673
-rw-r--r--drivers/s390/net/qeth_core_mpc.h1
-rw-r--r--drivers/s390/net/qeth_ethtool.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c74
-rw-r--r--drivers/s390/net/qeth_l3_main.c33
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c3
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c4
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/octeon/Kconfig3
-rw-r--r--drivers/staging/octeon/ethernet-defines.h2
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c6
-rw-r--r--drivers/staging/octeon/ethernet-mem.c5
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c10
-rw-r--r--drivers/staging/octeon/ethernet-rx.c13
-rw-r--r--drivers/staging/octeon/ethernet-rx.h2
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c8
-rw-r--r--drivers/staging/octeon/ethernet-spi.c10
-rw-r--r--drivers/staging/octeon/ethernet-tx.c19
-rw-r--r--drivers/staging/octeon/ethernet-util.h4
-rw-r--r--drivers/staging/octeon/ethernet.c12
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h29
-rw-r--r--drivers/staging/octeon/octeon-stubs.h1429
-rw-r--r--drivers/staging/qlge/Kconfig10
-rw-r--r--drivers/staging/qlge/Makefile (renamed from drivers/net/ethernet/qlogic/qlge/Makefile)0
-rw-r--r--drivers/staging/qlge/TODO46
-rw-r--r--drivers/staging/qlge/qlge.h (renamed from drivers/net/ethernet/qlogic/qlge/qlge.h)0
-rw-r--r--drivers/staging/qlge/qlge_dbg.c (renamed from drivers/net/ethernet/qlogic/qlge/qlge_dbg.c)0
-rw-r--r--drivers/staging/qlge/qlge_ethtool.c (renamed from drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c)0
-rw-r--r--drivers/staging/qlge/qlge_main.c (renamed from drivers/net/ethernet/qlogic/qlge/qlge_main.c)0
-rw-r--r--drivers/staging/qlge/qlge_mpi.c (renamed from drivers/net/ethernet/qlogic/qlge/qlge_mpi.c)0
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c6
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c13
-rw-r--r--drivers/vhost/vsock.c68
643 files changed, 39380 insertions, 12966 deletions
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 79b718430cd1..b23d1e4bad33 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1136,7 +1136,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
else
put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
skb_frag_page(&skb_shinfo(skb)->frags[i]) +
- skb_shinfo(skb)->frags[i].page_offset,
+ skb_frag_off(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]));
}
if (skb->len & 3) {
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 211607986134..70b00ae4ec38 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2580,10 +2580,9 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
slot = 0;
}
- tpd->iovec[slot].addr = dma_map_single(&he_dev->pci_dev->dev,
- (void *) page_address(frag->page) + frag->page_offset,
- frag->size, DMA_TO_DEVICE);
- tpd->iovec[slot].len = frag->size;
+ tpd->iovec[slot].addr = skb_frag_dma_map(&he_dev->pci_dev->dev,
+ frag, 0, skb_frag_size(frag), DMA_TO_DEVICE);
+ tpd->iovec[slot].len = skb_frag_size(frag);
++slot;
}
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 5c4c6eeb505c..c32f7dd9879a 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -516,9 +516,8 @@ struct geos_gpio_attr {
static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct pci_dev *pdev = to_pci_dev(dev);
struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
- struct solos_card *card = pci_get_drvdata(pdev);
+ struct solos_card *card = dev_get_drvdata(dev);
uint32_t data32;
if (count != 1 && (count != 2 || buf[1] != '\n'))
@@ -542,9 +541,8 @@ static ssize_t geos_gpio_store(struct device *dev, struct device_attribute *attr
static ssize_t geos_gpio_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct pci_dev *pdev = to_pci_dev(dev);
struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
- struct solos_card *card = pci_get_drvdata(pdev);
+ struct solos_card *card = dev_get_drvdata(dev);
uint32_t data32;
data32 = ioread32(card->config_regs + GPIO_STATUS);
@@ -556,9 +554,8 @@ static ssize_t geos_gpio_show(struct device *dev, struct device_attribute *attr,
static ssize_t hardware_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct pci_dev *pdev = to_pci_dev(dev);
struct geos_gpio_attr *gattr = container_of(attr, struct geos_gpio_attr, attr);
- struct solos_card *card = pci_get_drvdata(pdev);
+ struct solos_card *card = dev_get_drvdata(dev);
uint32_t data32;
data32 = ioread32(card->config_regs + GPIO_STATUS);
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index f52239feb4cb..69c10a7b7c61 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -260,8 +260,7 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
#ifdef CONFIG_PM_SLEEP
static int bcma_host_pci_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct bcma_bus *bus = pci_get_drvdata(pdev);
+ struct bcma_bus *bus = dev_get_drvdata(dev);
bus->mapped_core = NULL;
@@ -270,8 +269,7 @@ static int bcma_host_pci_suspend(struct device *dev)
static int bcma_host_pci_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct bcma_bus *bus = pci_get_drvdata(pdev);
+ struct bcma_bus *bus = dev_get_drvdata(dev);
return bcma_bus_resume(bus);
}
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 551bca6fef24..c70cb5f272cf 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -1134,7 +1134,9 @@ copy:
}
/* Update the skb. */
if (merge) {
- skb_shinfo(skb)->frags[i - 1].size += copy;
+ skb_frag_size_add(
+ &skb_shinfo(skb)->frags[i - 1],
+ copy);
} else {
skb_fill_page_desc(skb, i, page, off, copy);
if (off + copy < pg_size) {
@@ -1247,7 +1249,7 @@ new_buf:
i = skb_shinfo(skb)->nr_frags;
if (skb_can_coalesce(skb, i, page, offset)) {
- skb_shinfo(skb)->frags[i - 1].size += copy;
+ skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
} else if (i < MAX_SKB_FRAGS) {
get_page(page);
skb_fill_page_desc(skb, i, page, offset, copy);
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 9aeed98b87a1..0253e76f1df2 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -181,7 +181,8 @@ static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
sg = sg_next(sg);
BUG_ON(!sg);
frag = &skb_shinfo(skb)->frags[i];
- sg_set_page(sg, frag->page.p, frag->size, frag->page_offset);
+ sg_set_page(sg, skb_frag_page(frag), skb_frag_size(frag),
+ skb_frag_off(frag));
}
}
diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c
index af1b1ffcb38e..7d90b900131b 100644
--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
+++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
@@ -102,13 +102,13 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
goto bail_txadd;
for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(tx->skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i];
/* combine physically continuous fragments later? */
ret = sdma_txadd_page(sde->dd,
&tx->txreq,
skb_frag_page(frag),
- frag->page_offset,
+ skb_frag_off(frag),
skb_frag_size(frag));
if (unlikely(ret))
goto bail_txadd;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index af5bbb35c058..25b6482c5368 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -922,6 +922,7 @@ static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
case MLX5_CMD_OP_QUERY_CONG_STATUS:
case MLX5_CMD_OP_QUERY_CONG_PARAMS:
case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+ case MLX5_CMD_OP_QUERY_LAG:
return true;
default:
return false;
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index b0d0687c7a68..8fc3630a9d4c 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -86,7 +86,7 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
xa_lock(&table->array);
srq = xa_load(&table->array, srqn);
if (srq)
- atomic_inc(&srq->common.refcount);
+ refcount_inc(&srq->common.refcount);
xa_unlock(&table->array);
return srq;
@@ -592,7 +592,7 @@ int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
if (err)
return err;
- atomic_set(&srq->common.refcount, 1);
+ refcount_set(&srq->common.refcount, 1);
init_completion(&srq->common.free);
err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
@@ -675,7 +675,7 @@ static int srq_event_notifier(struct notifier_block *nb,
xa_lock(&table->array);
srq = xa_load(&table->array, srqn);
if (srq)
- atomic_inc(&srq->common.refcount);
+ refcount_inc(&srq->common.refcount);
xa_unlock(&table->array);
if (!srq)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 78fa777c87b1..c332b4761816 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -293,7 +293,8 @@ int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping[i + off] = ib_dma_map_page(ca,
skb_frag_page(frag),
- frag->page_offset, skb_frag_size(frag),
+ skb_frag_off(frag),
+ skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
goto partial_error;
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index 81f2b183acc8..1137dd152b5c 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -509,8 +509,7 @@ HDLC_irq_xpr(struct bchannel *bch)
if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len) {
hdlc_fill_fifo(bch);
} else {
- if (bch->tx_skb)
- dev_kfree_skb(bch->tx_skb);
+ dev_kfree_skb(bch->tx_skb);
if (get_next_bframe(bch)) {
hdlc_fill_fifo(bch);
test_and_clear_bit(FLG_TX_EMPTY, &bch->Flags);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 4a069582fc6b..2330a7d24267 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1119,8 +1119,7 @@ tx_birq(struct bchannel *bch)
if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len)
hfcpci_fill_fifo(bch);
else {
- if (bch->tx_skb)
- dev_kfree_skb(bch->tx_skb);
+ dev_kfree_skb(bch->tx_skb);
if (get_next_bframe(bch))
hfcpci_fill_fifo(bch);
}
@@ -1132,8 +1131,7 @@ tx_dirq(struct dchannel *dch)
if (dch->tx_skb && dch->tx_idx < dch->tx_skb->len)
hfcpci_fill_dfifo(dch->hw);
else {
- if (dch->tx_skb)
- dev_kfree_skb(dch->tx_skb);
+ dev_kfree_skb(dch->tx_skb);
if (get_next_dframe(dch))
hfcpci_fill_dfifo(dch->hw);
}
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index f915399d75ca..bca880213e91 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -190,8 +190,7 @@ isac_rme_irq(struct isac_hw *isac)
#endif
}
WriteISAC(isac, ISAC_CMDR, 0x80);
- if (isac->dch.rx_skb)
- dev_kfree_skb(isac->dch.rx_skb);
+ dev_kfree_skb(isac->dch.rx_skb);
isac->dch.rx_skb = NULL;
} else {
count = ReadISAC(isac, ISAC_RBCL) & 0x1f;
@@ -210,8 +209,7 @@ isac_xpr_irq(struct isac_hw *isac)
if (isac->dch.tx_skb && isac->dch.tx_idx < isac->dch.tx_skb->len) {
isac_fill_fifo(isac);
} else {
- if (isac->dch.tx_skb)
- dev_kfree_skb(isac->dch.tx_skb);
+ dev_kfree_skb(isac->dch.tx_skb);
if (get_next_dframe(&isac->dch))
isac_fill_fifo(isac);
}
@@ -464,8 +462,7 @@ isacsx_rme_irq(struct isac_hw *isac)
isac->dch.err_crc++;
#endif
WriteISAC(isac, ISACX_CMDRD, ISACX_CMDRD_RMC);
- if (isac->dch.rx_skb)
- dev_kfree_skb(isac->dch.rx_skb);
+ dev_kfree_skb(isac->dch.rx_skb);
isac->dch.rx_skb = NULL;
} else {
count = ReadISAC(isac, ISACX_RBCLD) & 0x1f;
@@ -1012,8 +1009,7 @@ hscx_xpr(struct hscx_hw *hx)
if (hx->bch.tx_skb && hx->bch.tx_idx < hx->bch.tx_skb->len) {
hscx_fill_fifo(hx);
} else {
- if (hx->bch.tx_skb)
- dev_kfree_skb(hx->bch.tx_skb);
+ dev_kfree_skb(hx->bch.tx_skb);
if (get_next_bframe(&hx->bch)) {
hscx_fill_fifo(hx);
test_and_clear_bit(FLG_TX_EMPTY, &hx->bch.Flags);
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index fd5c52f37802..4a3e748a1c26 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -690,8 +690,7 @@ send_next(struct isar_ch *ch)
}
}
}
- if (ch->bch.tx_skb)
- dev_kfree_skb(ch->bch.tx_skb);
+ dev_kfree_skb(ch->bch.tx_skb);
if (get_next_bframe(&ch->bch)) {
isar_fill_fifo(ch);
test_and_clear_bit(FLG_TX_EMPTY, &ch->bch.Flags);
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 4e30affd1a7c..61caa7e50b9a 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -605,8 +605,7 @@ bc_next_frame(struct tiger_ch *bc)
if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) {
fill_dma(bc);
} else {
- if (bc->bch.tx_skb)
- dev_kfree_skb(bc->bch.tx_skb);
+ dev_kfree_skb(bc->bch.tx_skb);
if (get_next_bframe(&bc->bch)) {
fill_dma(bc);
test_and_clear_bit(FLG_TX_EMPTY, &bc->bch.Flags);
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 2402608dc98d..bad55fdacd36 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -356,8 +356,7 @@ handle_rxD(struct w6692_hw *card) {
card->dch.err_rx++;
#endif
}
- if (card->dch.rx_skb)
- dev_kfree_skb(card->dch.rx_skb);
+ dev_kfree_skb(card->dch.rx_skb);
card->dch.rx_skb = NULL;
WriteW6692(card, W_D_CMDR, W_D_CMDR_RACK | W_D_CMDR_RRST);
} else {
@@ -376,8 +375,7 @@ handle_txD(struct w6692_hw *card) {
if (card->dch.tx_skb && card->dch.tx_idx < card->dch.tx_skb->len) {
W6692_fill_Dfifo(card);
} else {
- if (card->dch.tx_skb)
- dev_kfree_skb(card->dch.tx_skb);
+ dev_kfree_skb(card->dch.tx_skb);
if (get_next_dframe(&card->dch))
W6692_fill_Dfifo(card);
}
@@ -636,8 +634,7 @@ send_next(struct w6692_ch *wch)
if (wch->bch.tx_skb && wch->bch.tx_idx < wch->bch.tx_skb->len) {
W6692_fill_Bfifo(wch);
} else {
- if (wch->bch.tx_skb)
- dev_kfree_skb(wch->bch.tx_skb);
+ dev_kfree_skb(wch->bch.tx_skb);
if (get_next_bframe(&wch->bch)) {
W6692_fill_Bfifo(wch);
test_and_clear_bit(FLG_TX_EMPTY, &wch->bch.Flags);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 447f241467bd..b57dcb834594 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -1254,8 +1254,7 @@ release_card(struct l1oip *hc)
mISDN_freebchannel(hc->chan[ch].bch);
kfree(hc->chan[ch].bch);
#ifdef REORDER_DEBUG
- if (hc->chan[ch].disorder_skb)
- dev_kfree_skb(hc->chan[ch].disorder_skb);
+ dev_kfree_skb(hc->chan[ch].disorder_skb);
#endif
}
}
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 68a481516729..5bf7fcb282c4 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -900,8 +900,7 @@ l2_disconnect(struct FsmInst *fi, int event, void *arg)
send_uframe(l2, NULL, DISC | 0x10, CMD);
mISDN_FsmDelTimer(&l2->t203, 1);
restart_t200(l2, 2);
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
}
static void
@@ -1722,8 +1721,7 @@ l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
enquiry_cr(l2, RNR, RSP, 0);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
}
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
}
static void
@@ -1736,8 +1734,7 @@ l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
enquiry_cr(l2, RR, RSP, 0);
test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
}
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
}
static void
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index fa2237e7bcf8..27aa32914425 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -75,8 +75,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
cskb = NULL;
}
read_unlock(&sl->lock);
- if (cskb)
- dev_kfree_skb(cskb);
+ dev_kfree_skb(cskb);
}
static void
@@ -134,8 +133,7 @@ send_layer2(struct mISDNstack *st, struct sk_buff *skb)
}
out:
mutex_unlock(&st->lmutex);
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
}
static inline int
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index a4fa594e1caf..59d28cb19738 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -1328,10 +1328,8 @@ mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb)
}
out:
read_unlock_irqrestore(&mgr->lock, flags);
- if (cskb)
- dev_kfree_skb(cskb);
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(cskb);
+ dev_kfree_skb(skb);
return 0;
}
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index b780be6f41ff..c09b567845e1 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -44,7 +44,7 @@ static void rx(struct net_device *dev, int bufnum,
{
struct arcnet_local *lp = netdev_priv(dev);
struct sk_buff *skb;
- struct archdr *pkt = pkthdr;
+ struct archdr *pkt;
char *pktbuf, *pkthdrbuf;
int ofs;
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 1360f1ffe070..f3f86ef68ae0 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -55,11 +55,6 @@ void bond_debug_register(struct bonding *bond)
bond->debug_dir =
debugfs_create_dir(bond->dev->name, bonding_debug_root);
- if (!bond->debug_dir) {
- netdev_warn(bond->dev, "failed to register to debugfs\n");
- return;
- }
-
debugfs_create_file("rlb_hash_table", 0400, bond->debug_dir,
bond, &bond_debug_rlb_hash_fops);
}
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index ed3c437063dc..40b079162804 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -94,26 +94,20 @@ static inline void update_tty_status(struct ser_device *ser)
}
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
{
- ser->debugfs_tty_dir =
- debugfs_create_dir(tty->name, debugfsdir);
- if (!IS_ERR(ser->debugfs_tty_dir)) {
- debugfs_create_blob("last_tx_msg", 0400,
- ser->debugfs_tty_dir,
- &ser->tx_blob);
+ ser->debugfs_tty_dir = debugfs_create_dir(tty->name, debugfsdir);
- debugfs_create_blob("last_rx_msg", 0400,
- ser->debugfs_tty_dir,
- &ser->rx_blob);
+ debugfs_create_blob("last_tx_msg", 0400, ser->debugfs_tty_dir,
+ &ser->tx_blob);
- debugfs_create_x32("ser_state", 0400,
- ser->debugfs_tty_dir,
- (u32 *)&ser->state);
+ debugfs_create_blob("last_rx_msg", 0400, ser->debugfs_tty_dir,
+ &ser->rx_blob);
- debugfs_create_x8("tty_status", 0400,
- ser->debugfs_tty_dir,
- &ser->tty_status);
+ debugfs_create_x32("ser_state", 0400, ser->debugfs_tty_dir,
+ (u32 *)&ser->state);
+
+ debugfs_create_x8("tty_status", 0400, ser->debugfs_tty_dir,
+ &ser->tty_status);
- }
ser->tx_blob.data = ser->tx_data;
ser->tx_blob.size = 0;
ser->rx_blob.data = ser->rx_data;
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 27e93a438dd9..eb426822ad06 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -623,11 +623,7 @@ static void cfv_netdev_setup(struct net_device *netdev)
/* Create debugfs counters for the device */
static inline void debugfs_init(struct cfv_info *cfv)
{
- cfv->debugfs =
- debugfs_create_dir(netdev_name(cfv->ndev), NULL);
-
- if (IS_ERR(cfv->debugfs))
- return;
+ cfv->debugfs = debugfs_create_dir(netdev_name(cfv->ndev), NULL);
debugfs_create_u32("rx-napi-complete", 0400, cfv->debugfs,
&cfv->stats.rx_napi_complete);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index ab585900a057..17c166cc8482 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -120,6 +120,19 @@ config CAN_JANZ_ICAN3
This driver can also be built as a module. If so, the module will be
called janz-ican3.ko.
+config CAN_KVASER_PCIEFD
+ depends on PCI
+ tristate "Kvaser PCIe FD cards"
+ help
+ This is a driver for the Kvaser PCI Express CAN FD family.
+
+ Supported devices:
+ Kvaser PCIEcan 4xHS
+ Kvaser PCIEcan 2xHS v2
+ Kvaser PCIEcan HS v2
+ Kvaser Mini PCI Express HS v2
+ Kvaser Mini PCI Express 2xHS v2
+
config CAN_SUN4I
tristate "Allwinner A10 CAN controller"
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 44922bf29b6a..22164300122d 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
obj-$(CONFIG_CAN_GRCAN) += grcan.o
obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
+obj-$(CONFIG_CAN_KVASER_PCIEFD) += kvaser_pciefd.o
obj-$(CONFIG_CAN_MSCAN) += mscan/
obj-$(CONFIG_CAN_M_CAN) += m_can/
obj-$(CONFIG_CAN_PEAK_PCIEFD) += peak_canfd/
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index fcec8bcb53d6..dc5695dffc2e 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -24,6 +24,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/regmap.h>
@@ -266,6 +267,7 @@ struct flexcan_stop_mode {
struct flexcan_priv {
struct can_priv can;
struct can_rx_offload offload;
+ struct device *dev;
struct flexcan_regs __iomem *regs;
struct flexcan_mb __iomem *tx_mb;
@@ -273,6 +275,8 @@ struct flexcan_priv {
u8 tx_mb_idx;
u8 mb_count;
u8 mb_size;
+ u8 clk_src; /* clock source of CAN Protocol Engine */
+
u32 reg_ctrl_default;
u32 reg_imask1_default;
u32 reg_imask2_default;
@@ -462,6 +466,27 @@ static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
priv->write(reg_ctrl, &regs->ctrl);
}
+static int flexcan_clks_enable(const struct flexcan_priv *priv)
+{
+ int err;
+
+ err = clk_prepare_enable(priv->clk_ipg);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(priv->clk_per);
+ if (err)
+ clk_disable_unprepare(priv->clk_ipg);
+
+ return err;
+}
+
+static void flexcan_clks_disable(const struct flexcan_priv *priv)
+{
+ clk_disable_unprepare(priv->clk_per);
+ clk_disable_unprepare(priv->clk_ipg);
+}
+
static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
{
if (!priv->reg_xceiver)
@@ -588,19 +613,13 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
const struct flexcan_priv *priv = netdev_priv(dev);
int err;
- err = clk_prepare_enable(priv->clk_ipg);
- if (err)
+ err = pm_runtime_get_sync(priv->dev);
+ if (err < 0)
return err;
- err = clk_prepare_enable(priv->clk_per);
- if (err)
- goto out_disable_ipg;
-
err = __flexcan_get_berr_counter(dev, bec);
- clk_disable_unprepare(priv->clk_per);
- out_disable_ipg:
- clk_disable_unprepare(priv->clk_ipg);
+ pm_runtime_put(priv->dev);
return err;
}
@@ -1233,17 +1252,13 @@ static int flexcan_open(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- err = clk_prepare_enable(priv->clk_ipg);
- if (err)
+ err = pm_runtime_get_sync(priv->dev);
+ if (err < 0)
return err;
- err = clk_prepare_enable(priv->clk_per);
- if (err)
- goto out_disable_ipg;
-
err = open_candev(dev);
if (err)
- goto out_disable_per;
+ goto out_runtime_put;
err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
if (err)
@@ -1306,10 +1321,8 @@ static int flexcan_open(struct net_device *dev)
free_irq(dev->irq, dev);
out_close:
close_candev(dev);
- out_disable_per:
- clk_disable_unprepare(priv->clk_per);
- out_disable_ipg:
- clk_disable_unprepare(priv->clk_ipg);
+ out_runtime_put:
+ pm_runtime_put(priv->dev);
return err;
}
@@ -1324,10 +1337,9 @@ static int flexcan_close(struct net_device *dev)
can_rx_offload_del(&priv->offload);
free_irq(dev->irq, dev);
- clk_disable_unprepare(priv->clk_per);
- clk_disable_unprepare(priv->clk_ipg);
close_candev(dev);
+ pm_runtime_put(priv->dev);
can_led_event(dev, CAN_LED_EVENT_STOP);
@@ -1367,20 +1379,20 @@ static int register_flexcandev(struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg, err;
- err = clk_prepare_enable(priv->clk_ipg);
+ err = flexcan_clks_enable(priv);
if (err)
return err;
- err = clk_prepare_enable(priv->clk_per);
- if (err)
- goto out_disable_ipg;
-
/* select "bus clock", chip must be disabled */
err = flexcan_chip_disable(priv);
if (err)
- goto out_disable_per;
+ goto out_clks_disable;
+
reg = priv->read(&regs->ctrl);
- reg |= FLEXCAN_CTRL_CLK_SRC;
+ if (priv->clk_src)
+ reg |= FLEXCAN_CTRL_CLK_SRC;
+ else
+ reg &= ~FLEXCAN_CTRL_CLK_SRC;
priv->write(reg, &regs->ctrl);
err = flexcan_chip_enable(priv);
@@ -1406,15 +1418,21 @@ static int register_flexcandev(struct net_device *dev)
}
err = register_candev(dev);
+ if (err)
+ goto out_chip_disable;
- /* disable core and turn off clocks */
- out_chip_disable:
+ /* Disable core and let pm_runtime_put() disable the clocks.
+ * If CONFIG_PM is not enabled, the clocks will stay powered.
+ */
flexcan_chip_disable(priv);
- out_disable_per:
- clk_disable_unprepare(priv->clk_per);
- out_disable_ipg:
- clk_disable_unprepare(priv->clk_ipg);
+ pm_runtime_put(priv->dev);
+ return 0;
+
+ out_chip_disable:
+ flexcan_chip_disable(priv);
+ out_clks_disable:
+ flexcan_clks_disable(priv);
return err;
}
@@ -1473,6 +1491,11 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, true);
+ if (of_property_read_bool(np, "wakeup-source"))
+ device_set_wakeup_enable(&pdev->dev, true);
+
+ return 0;
+
out_put_node:
of_node_put(gpr_np);
return ret;
@@ -1508,6 +1531,7 @@ static int flexcan_probe(struct platform_device *pdev)
struct clk *clk_ipg = NULL, *clk_per = NULL;
struct flexcan_regs __iomem *regs;
int err, irq;
+ u8 clk_src = 1;
u32 clock_freq = 0;
reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
@@ -1516,9 +1540,12 @@ static int flexcan_probe(struct platform_device *pdev)
else if (IS_ERR(reg_xceiver))
reg_xceiver = NULL;
- if (pdev->dev.of_node)
+ if (pdev->dev.of_node) {
of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &clock_freq);
+ of_property_read_u8(pdev->dev.of_node,
+ "fsl,clk-source", &clk_src);
+ }
if (!clock_freq) {
clk_ipg = devm_clk_get(&pdev->dev, "ipg");
@@ -1576,6 +1603,7 @@ static int flexcan_probe(struct platform_device *pdev)
priv->write = flexcan_write_le;
}
+ priv->dev = &pdev->dev;
priv->can.clock.freq = clock_freq;
priv->can.bittiming_const = &flexcan_bittiming_const;
priv->can.do_set_mode = flexcan_set_mode;
@@ -1586,9 +1614,14 @@ static int flexcan_probe(struct platform_device *pdev)
priv->regs = regs;
priv->clk_ipg = clk_ipg;
priv->clk_per = clk_per;
+ priv->clk_src = clk_src;
priv->devtype_data = devtype_data;
priv->reg_xceiver = reg_xceiver;
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
err = register_flexcandev(dev);
if (err) {
dev_err(&pdev->dev, "registering netdev failed\n");
@@ -1615,6 +1648,7 @@ static int flexcan_remove(struct platform_device *pdev)
struct net_device *dev = platform_get_drvdata(pdev);
unregister_flexcandev(dev);
+ pm_runtime_disable(&pdev->dev);
free_candev(dev);
return 0;
@@ -1624,7 +1658,7 @@ static int __maybe_unused flexcan_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- int err;
+ int err = 0;
if (netif_running(dev)) {
/* if wakeup is enabled, enter stop mode
@@ -1639,20 +1673,22 @@ static int __maybe_unused flexcan_suspend(struct device *device)
err = flexcan_chip_disable(priv);
if (err)
return err;
+
+ err = pm_runtime_force_suspend(device);
}
netif_stop_queue(dev);
netif_device_detach(dev);
}
priv->can.state = CAN_STATE_SLEEPING;
- return 0;
+ return err;
}
static int __maybe_unused flexcan_resume(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- int err;
+ int err = 0;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
@@ -1661,14 +1697,35 @@ static int __maybe_unused flexcan_resume(struct device *device)
if (device_may_wakeup(device)) {
disable_irq_wake(dev->irq);
} else {
- err = flexcan_chip_enable(priv);
+ err = pm_runtime_force_resume(device);
if (err)
return err;
+
+ err = flexcan_chip_enable(priv);
}
}
+
+ return err;
+}
+
+static int __maybe_unused flexcan_runtime_suspend(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct flexcan_priv *priv = netdev_priv(dev);
+
+ flexcan_clks_disable(priv);
+
return 0;
}
+static int __maybe_unused flexcan_runtime_resume(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct flexcan_priv *priv = netdev_priv(dev);
+
+ return flexcan_clks_enable(priv);
+}
+
static int __maybe_unused flexcan_noirq_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
@@ -1698,6 +1755,7 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
static const struct dev_pm_ops flexcan_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(flexcan_suspend, flexcan_resume)
+ SET_RUNTIME_PM_OPS(flexcan_runtime_suspend, flexcan_runtime_resume, NULL)
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(flexcan_noirq_suspend, flexcan_noirq_resume)
};
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 19d4f52a8f90..a761092e6ac9 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1936,7 +1936,6 @@ static int ican3_probe(struct platform_device *pdev)
/* find our IRQ number */
mod->irq = platform_get_irq(pdev, 0);
if (mod->irq < 0) {
- dev_err(dev, "IRQ line not found\n");
ret = -ENODEV;
goto out_free_ndev;
}
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
new file mode 100644
index 000000000000..f9815fda8840
--- /dev/null
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -0,0 +1,1907 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
+ * Parts of this driver are based on the following:
+ * - Kvaser linux pciefd driver (version 5.25)
+ * - PEAK linux canfd driver
+ * - Altera Avalon EPCS flash controller driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/can/dev.h>
+#include <linux/timer.h>
+#include <linux/netdevice.h>
+#include <linux/crc32.h>
+#include <linux/iopoll.h>
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
+MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
+
+#define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd"
+
+#define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
+#define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
+#define KVASER_PCIEFD_MAX_ERR_REP 256
+#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17
+#define KVASER_PCIEFD_MAX_CAN_CHANNELS 4
+#define KVASER_PCIEFD_DMA_COUNT 2
+
+#define KVASER_PCIEFD_DMA_SIZE (4 * 1024)
+#define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0)
+
+#define KVASER_PCIEFD_VENDOR 0x1a07
+#define KVASER_PCIEFD_4HS_ID 0x0d
+#define KVASER_PCIEFD_2HS_ID 0x0e
+#define KVASER_PCIEFD_HS_ID 0x0f
+#define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10
+#define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11
+
+/* PCIe IRQ registers */
+#define KVASER_PCIEFD_IRQ_REG 0x40
+#define KVASER_PCIEFD_IEN_REG 0x50
+/* DMA map */
+#define KVASER_PCIEFD_DMA_MAP_BASE 0x1000
+/* Kvaser KCAN CAN controller registers */
+#define KVASER_PCIEFD_KCAN0_BASE 0x10000
+#define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000
+#define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
+#define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
+#define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
+#define KVASER_PCIEFD_KCAN_CMD_REG 0x400
+#define KVASER_PCIEFD_KCAN_IEN_REG 0x408
+#define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
+#define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414
+#define KVASER_PCIEFD_KCAN_STAT_REG 0x418
+#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
+#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
+#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
+#define KVASER_PCIEFD_KCAN_PWM_REG 0x430
+/* Loopback control register */
+#define KVASER_PCIEFD_LOOP_REG 0x1f000
+/* System identification and information registers */
+#define KVASER_PCIEFD_SYSID_BASE 0x1f020
+#define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8)
+#define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc)
+#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
+/* Shared receive buffer registers */
+#define KVASER_PCIEFD_SRB_BASE 0x1f200
+#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
+#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
+#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
+#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
+#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
+/* EPCS flash controller registers */
+#define KVASER_PCIEFD_SPI_BASE 0x1fc00
+#define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE
+#define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4)
+#define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8)
+#define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc)
+#define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14)
+
+#define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f
+#define KVASER_PCIEFD_IRQ_SRB BIT(4)
+
+#define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24
+#define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16
+#define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1
+
+/* Reset DMA buffer 0, 1 and FIFO offset */
+#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
+#define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
+#define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
+
+/* DMA packet done, buffer 0 and 1 */
+#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
+#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
+/* DMA overflow, buffer 0 and 1 */
+#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
+#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
+/* DMA underflow, buffer 0 and 1 */
+#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
+#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
+
+/* DMA idle */
+#define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
+/* DMA support */
+#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
+
+/* DMA Enable */
+#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
+
+/* EPCS flash controller definitions */
+#define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024)
+#define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L)
+#define KVASER_PCIEFD_CFG_MAX_PARAMS 256
+#define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d
+#define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24
+#define KVASER_PCIEFD_CFG_SYS_VER 1
+#define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130
+#define KVASER_PCIEFD_SPI_TMT BIT(5)
+#define KVASER_PCIEFD_SPI_TRDY BIT(6)
+#define KVASER_PCIEFD_SPI_RRDY BIT(7)
+#define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14
+/* Commands for controlling the onboard flash */
+#define KVASER_PCIEFD_FLASH_RES_CMD 0xab
+#define KVASER_PCIEFD_FLASH_READ_CMD 0x3
+#define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5
+
+/* Kvaser KCAN definitions */
+#define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29)
+#define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29)
+
+#define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16
+/* Request status packet */
+#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
+/* Abort, flush and reset */
+#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
+
+/* Tx FIFO unaligned read */
+#define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
+/* Tx FIFO unaligned end */
+#define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
+/* Bus parameter protection error */
+#define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
+/* FDF bit when controller is in classic mode */
+#define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
+/* Rx FIFO overflow */
+#define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
+/* Abort done */
+#define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
+/* Tx buffer flush done */
+#define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
+/* Tx FIFO overflow */
+#define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
+/* Tx FIFO empty */
+#define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
+/* Transmitter unaligned */
+#define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
+
+#define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16
+
+#define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24
+/* Abort request */
+#define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
+/* Idle state. Controller in reset mode and no abort or flush pending */
+#define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
+/* Bus off */
+#define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
+/* Reset mode request */
+#define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
+/* Controller in reset mode */
+#define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
+/* Controller got one-shot capability */
+#define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
+/* Controller got CAN FD capability */
+#define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
+#define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \
+ KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \
+ KVASER_PCIEFD_KCAN_STAT_IRM)
+
+/* Reset mode */
+#define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
+/* Listen only mode */
+#define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
+/* Error packet enable */
+#define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
+/* CAN FD non-ISO */
+#define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
+/* Acknowledgment packet type */
+#define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
+/* Active error flag enable. Clear to force error passive */
+#define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
+/* Classic CAN mode */
+#define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
+
+#define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13
+#define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17
+#define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26
+
+#define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16
+
+/* Kvaser KCAN packet types */
+#define KVASER_PCIEFD_PACK_TYPE_DATA 0
+#define KVASER_PCIEFD_PACK_TYPE_ACK 1
+#define KVASER_PCIEFD_PACK_TYPE_TXRQ 2
+#define KVASER_PCIEFD_PACK_TYPE_ERROR 3
+#define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4
+#define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5
+#define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6
+#define KVASER_PCIEFD_PACK_TYPE_STATUS 8
+#define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9
+
+/* Kvaser KCAN packet common definitions */
+#define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff
+#define KVASER_PCIEFD_PACKET_CHID_SHIFT 25
+#define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28
+
+/* Kvaser KCAN TDATA and RDATA first word */
+#define KVASER_PCIEFD_RPACKET_IDE BIT(30)
+#define KVASER_PCIEFD_RPACKET_RTR BIT(29)
+/* Kvaser KCAN TDATA and RDATA second word */
+#define KVASER_PCIEFD_RPACKET_ESI BIT(13)
+#define KVASER_PCIEFD_RPACKET_BRS BIT(14)
+#define KVASER_PCIEFD_RPACKET_FDF BIT(15)
+#define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8
+/* Kvaser KCAN TDATA second word */
+#define KVASER_PCIEFD_TPACKET_SMS BIT(16)
+#define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
+
+/* Kvaser KCAN APACKET */
+#define KVASER_PCIEFD_APACKET_FLU BIT(8)
+#define KVASER_PCIEFD_APACKET_CT BIT(9)
+#define KVASER_PCIEFD_APACKET_ABL BIT(10)
+#define KVASER_PCIEFD_APACKET_NACK BIT(11)
+
+/* Kvaser KCAN SPACK first word */
+#define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8
+#define KVASER_PCIEFD_SPACK_BOFF BIT(16)
+#define KVASER_PCIEFD_SPACK_IDET BIT(20)
+#define KVASER_PCIEFD_SPACK_IRM BIT(21)
+#define KVASER_PCIEFD_SPACK_RMCD BIT(22)
+/* Kvaser KCAN SPACK second word */
+#define KVASER_PCIEFD_SPACK_AUTO BIT(21)
+#define KVASER_PCIEFD_SPACK_EWLR BIT(23)
+#define KVASER_PCIEFD_SPACK_EPLR BIT(24)
+
+struct kvaser_pciefd;
+
+struct kvaser_pciefd_can {
+ struct can_priv can;
+ struct kvaser_pciefd *kv_pcie;
+ void __iomem *reg_base;
+ struct can_berr_counter bec;
+ u8 cmd_seq;
+ int err_rep_cnt;
+ int echo_idx;
+ spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
+ spinlock_t echo_lock; /* Locks the message echo buffer */
+ struct timer_list bec_poll_timer;
+ struct completion start_comp, flush_comp;
+};
+
+struct kvaser_pciefd {
+ struct pci_dev *pci;
+ void __iomem *reg_base;
+ struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
+ void *dma_data[KVASER_PCIEFD_DMA_COUNT];
+ u8 nr_channels;
+ u32 freq;
+ u32 freq_to_ticks_div;
+};
+
+struct kvaser_pciefd_rx_packet {
+ u32 header[2];
+ u64 timestamp;
+};
+
+struct kvaser_pciefd_tx_packet {
+ u32 header[2];
+ u8 data[64];
+};
+
+static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
+ .name = KVASER_PCIEFD_DRV_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 255,
+ .tseg2_min = 1,
+ .tseg2_max = 32,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 4096,
+ .brp_inc = 1,
+};
+
+struct kvaser_pciefd_cfg_param {
+ __le32 magic;
+ __le32 nr;
+ __le32 len;
+ u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ];
+};
+
+struct kvaser_pciefd_cfg_img {
+ __le32 version;
+ __le32 magic;
+ __le32 crc;
+ struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS];
+};
+
+static struct pci_device_id kvaser_pciefd_id_table[] = {
+ { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), },
+ { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), },
+ { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), },
+ { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), },
+ { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), },
+ { 0,},
+};
+MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
+
+/* Onboard flash memory functions */
+static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
+{
+ u32 res;
+ int ret;
+
+ ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
+ res, res & msk, 0, 10);
+
+ return ret;
+}
+
+static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
+ u32 tx_len, u8 *rx, u32 rx_len)
+{
+ int c;
+
+ iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG);
+ iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
+ ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
+
+ c = tx_len;
+ while (c--) {
+ if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
+ return -EIO;
+
+ iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
+
+ if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
+ return -EIO;
+
+ ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
+ }
+
+ c = rx_len;
+ while (c-- > 0) {
+ if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
+ return -EIO;
+
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
+
+ if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
+ return -EIO;
+
+ *rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
+ }
+
+ if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT))
+ return -EIO;
+
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
+
+ if (c != -1) {
+ dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_cfg_img *img)
+{
+ int offset = KVASER_PCIEFD_CFG_IMG_OFFSET;
+ int res, crc;
+ u8 *crc_buff;
+
+ u8 cmd[] = {
+ KVASER_PCIEFD_FLASH_READ_CMD,
+ (u8)((offset >> 16) & 0xff),
+ (u8)((offset >> 8) & 0xff),
+ (u8)(offset & 0xff)
+ };
+
+ res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img,
+ KVASER_PCIEFD_CFG_IMG_SZ);
+ if (res)
+ return res;
+
+ crc_buff = (u8 *)img->params;
+
+ if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) {
+ dev_err(&pcie->pci->dev,
+ "Config flash corrupted, version number is wrong\n");
+ return -ENODEV;
+ }
+
+ if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) {
+ dev_err(&pcie->pci->dev,
+ "Config flash corrupted, magic number is wrong\n");
+ return -ENODEV;
+ }
+
+ crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params));
+ if (le32_to_cpu(img->crc) != crc) {
+ dev_err(&pcie->pci->dev,
+ "Stored CRC does not match flash image contents\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_cfg_img *img)
+{
+ struct kvaser_pciefd_cfg_param *param;
+
+ param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN];
+ memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len));
+}
+
+static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie)
+{
+ int res;
+ struct kvaser_pciefd_cfg_img *img;
+
+ /* Read electronic signature */
+ u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0};
+
+ res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1);
+ if (res)
+ return -EIO;
+
+ img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL);
+ if (!img)
+ return -ENOMEM;
+
+ if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) {
+ dev_err(&pcie->pci->dev,
+ "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n",
+ cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16);
+
+ res = -ENODEV;
+ goto image_free;
+ }
+
+ cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD;
+ res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1);
+ if (res) {
+ goto image_free;
+ } else if (cmd[0] & 1) {
+ res = -EIO;
+ /* No write is ever done, the WIP should never be set */
+ dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n");
+ goto image_free;
+ }
+
+ res = kvaser_pciefd_cfg_read_and_verify(pcie, img);
+ if (res) {
+ res = -EIO;
+ goto image_free;
+ }
+
+ kvaser_pciefd_cfg_read_params(pcie, img);
+
+image_free:
+ kfree(img);
+ return res;
+}
+
+static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
+{
+ u32 cmd;
+
+ cmd = KVASER_PCIEFD_KCAN_CMD_SRQ;
+ cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
+ iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
+}
+
+static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
+{
+ u32 mode;
+ unsigned long irq;
+
+ spin_lock_irqsave(&can->lock, irq);
+ mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) {
+ mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
+ iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ }
+ spin_unlock_irqrestore(&can->lock, irq);
+}
+
+static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
+{
+ u32 mode;
+ unsigned long irq;
+
+ spin_lock_irqsave(&can->lock, irq);
+ mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN;
+ iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ spin_unlock_irqrestore(&can->lock, irq);
+}
+
+static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
+{
+ u32 msk;
+
+ msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF |
+ KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
+ KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
+ KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
+ KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
+
+ iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+
+ return 0;
+}
+
+static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
+{
+ u32 mode;
+ unsigned long irq;
+
+ spin_lock_irqsave(&can->lock, irq);
+
+ mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM;
+ if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+ mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN;
+ else
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
+ } else {
+ mode |= KVASER_PCIEFD_KCAN_MODE_CCM;
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
+ }
+
+ if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
+
+ mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
+ mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
+ /* Use ACK packet type */
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_APT;
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
+ iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+
+ spin_unlock_irqrestore(&can->lock, irq);
+}
+
+static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
+{
+ u32 status;
+ unsigned long irq;
+
+ spin_lock_irqsave(&can->lock, irq);
+ iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
+ can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+
+ status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
+ if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
+ u32 cmd;
+
+ /* If controller is already idle, run abort, flush and reset */
+ cmd = KVASER_PCIEFD_KCAN_CMD_AT;
+ cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
+ iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
+ } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) {
+ u32 mode;
+
+ /* Put controller in reset mode */
+ mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ mode |= KVASER_PCIEFD_KCAN_MODE_RM;
+ iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ }
+
+ spin_unlock_irqrestore(&can->lock, irq);
+}
+
+static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
+{
+ u32 mode;
+ unsigned long irq;
+
+ del_timer(&can->bec_poll_timer);
+
+ if (!completion_done(&can->flush_comp))
+ kvaser_pciefd_start_controller_flush(can);
+
+ if (!wait_for_completion_timeout(&can->flush_comp,
+ KVASER_PCIEFD_WAIT_TIMEOUT)) {
+ netdev_err(can->can.dev, "Timeout during bus on flush\n");
+ return -ETIMEDOUT;
+ }
+
+ spin_lock_irqsave(&can->lock, irq);
+ iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
+ can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+
+ mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
+ iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+ spin_unlock_irqrestore(&can->lock, irq);
+
+ if (!wait_for_completion_timeout(&can->start_comp,
+ KVASER_PCIEFD_WAIT_TIMEOUT)) {
+ netdev_err(can->can.dev, "Timeout during bus on reset\n");
+ return -ETIMEDOUT;
+ }
+ /* Reset interrupt handling */
+ iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+
+ kvaser_pciefd_set_tx_irq(can);
+ kvaser_pciefd_setup_controller(can);
+
+ can->can.state = CAN_STATE_ERROR_ACTIVE;
+ netif_wake_queue(can->can.dev);
+ can->bec.txerr = 0;
+ can->bec.rxerr = 0;
+ can->err_rep_cnt = 0;
+
+ return 0;
+}
+
+static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
+{
+ u8 top;
+ u32 pwm_ctrl;
+ unsigned long irq;
+
+ spin_lock_irqsave(&can->lock, irq);
+ pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
+ top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff;
+
+ /* Set duty cycle to zero */
+ pwm_ctrl |= top;
+ iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
+ spin_unlock_irqrestore(&can->lock, irq);
+}
+
+static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
+{
+ int top, trigger;
+ u32 pwm_ctrl;
+ unsigned long irq;
+
+ kvaser_pciefd_pwm_stop(can);
+ spin_lock_irqsave(&can->lock, irq);
+
+ /* Set frequency to 500 KHz*/
+ top = can->can.clock.freq / (2 * 500000) - 1;
+
+ pwm_ctrl = top & 0xff;
+ pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
+ iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
+
+ /* Set duty cycle to 95 */
+ trigger = (100 * top - 95 * (top + 1) + 50) / 100;
+ pwm_ctrl = trigger & 0xff;
+ pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
+ iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
+ spin_unlock_irqrestore(&can->lock, irq);
+}
+
+static int kvaser_pciefd_open(struct net_device *netdev)
+{
+ int err;
+ struct kvaser_pciefd_can *can = netdev_priv(netdev);
+
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ err = kvaser_pciefd_bus_on(can);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int kvaser_pciefd_stop(struct net_device *netdev)
+{
+ struct kvaser_pciefd_can *can = netdev_priv(netdev);
+ int ret = 0;
+
+ /* Don't interrupt ongoing flush */
+ if (!completion_done(&can->flush_comp))
+ kvaser_pciefd_start_controller_flush(can);
+
+ if (!wait_for_completion_timeout(&can->flush_comp,
+ KVASER_PCIEFD_WAIT_TIMEOUT)) {
+ netdev_err(can->can.dev, "Timeout during stop\n");
+ ret = -ETIMEDOUT;
+ } else {
+ iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ del_timer(&can->bec_poll_timer);
+ }
+ close_candev(netdev);
+
+ return ret;
+}
+
+static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
+ struct kvaser_pciefd_can *can,
+ struct sk_buff *skb)
+{
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ int packet_size;
+ int seq = can->echo_idx;
+
+ memset(p, 0, sizeof(*p));
+
+ if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ p->header[0] |= KVASER_PCIEFD_RPACKET_RTR;
+
+ if (cf->can_id & CAN_EFF_FLAG)
+ p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
+
+ p->header[0] |= cf->can_id & CAN_EFF_MASK;
+ p->header[1] |= can_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
+ p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
+
+ if (can_is_canfd_skb(skb)) {
+ p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
+ if (cf->flags & CANFD_BRS)
+ p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
+ if (cf->flags & CANFD_ESI)
+ p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
+ }
+
+ p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK;
+
+ packet_size = cf->len;
+ memcpy(p->data, cf->data, packet_size);
+
+ return DIV_ROUND_UP(packet_size, 4);
+}
+
+static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct kvaser_pciefd_can *can = netdev_priv(netdev);
+ unsigned long irq_flags;
+ struct kvaser_pciefd_tx_packet packet;
+ int nwords;
+ u8 count;
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
+
+ spin_lock_irqsave(&can->echo_lock, irq_flags);
+
+ /* Prepare and save echo skb in internal slot */
+ can_put_echo_skb(skb, netdev, can->echo_idx);
+
+ /* Move echo index to the next slot */
+ can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
+
+ /* Write header to fifo */
+ iowrite32(packet.header[0],
+ can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
+ iowrite32(packet.header[1],
+ can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
+
+ if (nwords) {
+ u32 data_last = ((u32 *)packet.data)[nwords - 1];
+
+ /* Write data to fifo, except last word */
+ iowrite32_rep(can->reg_base +
+ KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
+ nwords - 1);
+ /* Write last word to end of fifo */
+ __raw_writel(data_last, can->reg_base +
+ KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
+ } else {
+ /* Complete write to fifo */
+ __raw_writel(0, can->reg_base +
+ KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
+ }
+
+ count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
+ /* No room for a new message, stop the queue until at least one
+ * successful transmit
+ */
+ if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT ||
+ can->can.echo_skb[can->echo_idx])
+ netif_stop_queue(netdev);
+
+ spin_unlock_irqrestore(&can->echo_lock, irq_flags);
+
+ return NETDEV_TX_OK;
+}
+
+static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
+{
+ u32 mode, test, btrn;
+ unsigned long irq_flags;
+ int ret;
+ struct can_bittiming *bt;
+
+ if (data)
+ bt = &can->can.data_bittiming;
+ else
+ bt = &can->can.bittiming;
+
+ btrn = ((bt->phase_seg2 - 1) & 0x1f) <<
+ KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT |
+ (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) <<
+ KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT |
+ ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT |
+ ((bt->brp - 1) & 0x1fff);
+
+ spin_lock_irqsave(&can->lock, irq_flags);
+ mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+
+ /* Put the circuit in reset mode */
+ iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM,
+ can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+
+ /* Can only set bittiming if in reset mode */
+ ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG,
+ test, test & KVASER_PCIEFD_KCAN_MODE_RM,
+ 0, 10);
+
+ if (ret) {
+ spin_unlock_irqrestore(&can->lock, irq_flags);
+ return -EBUSY;
+ }
+
+ if (data)
+ iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG);
+ else
+ iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG);
+
+ /* Restore previous reset mode status */
+ iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
+
+ spin_unlock_irqrestore(&can->lock, irq_flags);
+ return 0;
+}
+
+static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev)
+{
+ return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false);
+}
+
+static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev)
+{
+ return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true);
+}
+
+static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ struct kvaser_pciefd_can *can = netdev_priv(ndev);
+ int ret = 0;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ if (!can->can.restart_ms)
+ ret = kvaser_pciefd_bus_on(can);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct kvaser_pciefd_can *can = netdev_priv(ndev);
+
+ bec->rxerr = can->bec.rxerr;
+ bec->txerr = can->bec.txerr;
+ return 0;
+}
+
+static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
+{
+ struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
+
+ kvaser_pciefd_enable_err_gen(can);
+ kvaser_pciefd_request_status(can);
+ can->err_rep_cnt = 0;
+}
+
+static const struct net_device_ops kvaser_pciefd_netdev_ops = {
+ .ndo_open = kvaser_pciefd_open,
+ .ndo_stop = kvaser_pciefd_stop,
+ .ndo_start_xmit = kvaser_pciefd_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
+{
+ int i;
+
+ for (i = 0; i < pcie->nr_channels; i++) {
+ struct net_device *netdev;
+ struct kvaser_pciefd_can *can;
+ u32 status, tx_npackets;
+
+ netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
+ KVASER_PCIEFD_CAN_TX_MAX_COUNT);
+ if (!netdev)
+ return -ENOMEM;
+
+ can = netdev_priv(netdev);
+ netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
+ can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
+ i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
+
+ can->kv_pcie = pcie;
+ can->cmd_seq = 0;
+ can->err_rep_cnt = 0;
+ can->bec.txerr = 0;
+ can->bec.rxerr = 0;
+
+ init_completion(&can->start_comp);
+ init_completion(&can->flush_comp);
+ timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
+ 0);
+
+ tx_npackets = ioread32(can->reg_base +
+ KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
+ if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
+ 0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) {
+ dev_err(&pcie->pci->dev,
+ "Max Tx count is smaller than expected\n");
+
+ free_candev(netdev);
+ return -ENODEV;
+ }
+
+ can->can.clock.freq = pcie->freq;
+ can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT;
+ can->echo_idx = 0;
+ spin_lock_init(&can->echo_lock);
+ spin_lock_init(&can->lock);
+ can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
+ can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
+
+ can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
+ can->can.do_set_data_bittiming =
+ kvaser_pciefd_set_data_bittiming;
+
+ can->can.do_set_mode = kvaser_pciefd_set_mode;
+ can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
+
+ can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_FD_NON_ISO;
+
+ status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
+ if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
+ dev_err(&pcie->pci->dev,
+ "CAN FD not supported as expected %d\n", i);
+
+ free_candev(netdev);
+ return -ENODEV;
+ }
+
+ if (status & KVASER_PCIEFD_KCAN_STAT_CAP)
+ can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
+
+ netdev->flags |= IFF_ECHO;
+
+ SET_NETDEV_DEV(netdev, &pcie->pci->dev);
+
+ iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
+ KVASER_PCIEFD_KCAN_IRQ_TFD,
+ can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+
+ pcie->can[i] = can;
+ kvaser_pciefd_pwm_start(can);
+ }
+
+ return 0;
+}
+
+static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
+{
+ int i;
+
+ for (i = 0; i < pcie->nr_channels; i++) {
+ int err = register_candev(pcie->can[i]->can.dev);
+
+ if (err) {
+ int j;
+
+ /* Unregister all successfully registered devices. */
+ for (j = 0; j < i; j++)
+ unregister_candev(pcie->can[j]->can.dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie,
+ dma_addr_t addr, int offset)
+{
+ u32 word1, word2;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT;
+ word2 = addr >> 32;
+#else
+ word1 = addr;
+ word2 = 0;
+#endif
+ iowrite32(word1, pcie->reg_base + offset);
+ iowrite32(word2, pcie->reg_base + offset + 4);
+}
+
+static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
+{
+ int i;
+ u32 srb_status;
+ dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
+
+ /* Disable the DMA */
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
+ for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
+ unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i;
+
+ pcie->dma_data[i] =
+ dmam_alloc_coherent(&pcie->pci->dev,
+ KVASER_PCIEFD_DMA_SIZE,
+ &dma_addr[i],
+ GFP_KERNEL);
+
+ if (!pcie->dma_data[i] || !dma_addr[i]) {
+ dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n",
+ KVASER_PCIEFD_DMA_SIZE);
+ return -ENOMEM;
+ }
+
+ kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset);
+ }
+
+ /* Reset Rx FIFO, and both DMA buffers */
+ iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
+ KVASER_PCIEFD_SRB_CMD_RDB1,
+ pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+
+ srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
+ if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
+ dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
+ return -EIO;
+ }
+
+ /* Enable the DMA */
+ iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE,
+ pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
+
+ return 0;
+}
+
+static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
+{
+ u32 sysid, srb_status, build;
+ u8 sysid_nr_chan;
+ int ret;
+
+ ret = kvaser_pciefd_read_cfg(pcie);
+ if (ret)
+ return ret;
+
+ sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG);
+ sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff;
+ if (pcie->nr_channels != sysid_nr_chan) {
+ dev_err(&pcie->pci->dev,
+ "Number of channels does not match: %u vs %u\n",
+ pcie->nr_channels,
+ sysid_nr_chan);
+ return -ENODEV;
+ }
+
+ if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS)
+ pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS;
+
+ build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG);
+ dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n",
+ (sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff,
+ sysid & 0xff,
+ (build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff);
+
+ srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
+ if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
+ dev_err(&pcie->pci->dev,
+ "Hardware without DMA is not supported\n");
+ return -ENODEV;
+ }
+
+ pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG);
+ pcie->freq_to_ticks_div = pcie->freq / 1000000;
+ if (pcie->freq_to_ticks_div == 0)
+ pcie->freq_to_ticks_div = 1;
+
+ /* Turn off all loopback functionality */
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG);
+ return ret;
+}
+
+static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_rx_packet *p,
+ __le32 *data)
+{
+ struct sk_buff *skb;
+ struct canfd_frame *cf;
+ struct can_priv *priv;
+ struct net_device_stats *stats;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+
+ if (ch_id >= pcie->nr_channels)
+ return -EIO;
+
+ priv = &pcie->can[ch_id]->can;
+ stats = &priv->dev->stats;
+
+ if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
+ skb = alloc_canfd_skb(priv->dev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return -ENOMEM;
+ }
+
+ if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS)
+ cf->flags |= CANFD_BRS;
+
+ if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI)
+ cf->flags |= CANFD_ESI;
+ } else {
+ skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return -ENOMEM;
+ }
+ }
+
+ cf->can_id = p->header[0] & CAN_EFF_MASK;
+ if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
+ cf->can_id |= CAN_EFF_FLAG;
+
+ cf->len = can_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
+
+ if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR)
+ cf->can_id |= CAN_RTR_FLAG;
+ else
+ memcpy(cf->data, data, cf->len);
+
+ shhwtstamps = skb_hwtstamps(skb);
+
+ shhwtstamps->hwtstamp =
+ ns_to_ktime(div_u64(p->timestamp * 1000,
+ pcie->freq_to_ticks_div));
+
+ stats->rx_bytes += cf->len;
+ stats->rx_packets++;
+
+ return netif_rx(skb);
+}
+
+static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
+ struct can_frame *cf,
+ enum can_state new_state,
+ enum can_state tx_state,
+ enum can_state rx_state)
+{
+ can_change_state(can->can.dev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ struct net_device *ndev = can->can.dev;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&can->lock, irq_flags);
+ netif_stop_queue(can->can.dev);
+ spin_unlock_irqrestore(&can->lock, irq_flags);
+
+ /* Prevent CAN controller from auto recover from bus off */
+ if (!can->can.restart_ms) {
+ kvaser_pciefd_start_controller_flush(can);
+ can_bus_off(ndev);
+ }
+ }
+}
+
+static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
+ struct can_berr_counter *bec,
+ enum can_state *new_state,
+ enum can_state *tx_state,
+ enum can_state *rx_state)
+{
+ if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
+ p->header[0] & KVASER_PCIEFD_SPACK_IRM)
+ *new_state = CAN_STATE_BUS_OFF;
+ else if (bec->txerr >= 255 || bec->rxerr >= 255)
+ *new_state = CAN_STATE_BUS_OFF;
+ else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
+ *new_state = CAN_STATE_ERROR_PASSIVE;
+ else if (bec->txerr >= 128 || bec->rxerr >= 128)
+ *new_state = CAN_STATE_ERROR_PASSIVE;
+ else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR)
+ *new_state = CAN_STATE_ERROR_WARNING;
+ else if (bec->txerr >= 96 || bec->rxerr >= 96)
+ *new_state = CAN_STATE_ERROR_WARNING;
+ else
+ *new_state = CAN_STATE_ERROR_ACTIVE;
+
+ *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0;
+ *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0;
+}
+
+static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct can_berr_counter bec;
+ enum can_state old_state, new_state, tx_state, rx_state;
+ struct net_device *ndev = can->can.dev;
+ struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct net_device_stats *stats = &ndev->stats;
+
+ old_state = can->can.state;
+
+ bec.txerr = p->header[0] & 0xff;
+ bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
+
+ kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
+ &rx_state);
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ if (new_state != old_state) {
+ kvaser_pciefd_change_state(can, cf, new_state, tx_state,
+ rx_state);
+
+ if (old_state == CAN_STATE_BUS_OFF &&
+ new_state == CAN_STATE_ERROR_ACTIVE &&
+ can->can.restart_ms) {
+ can->can.can_stats.restarts++;
+ if (skb)
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+ }
+
+ can->err_rep_cnt++;
+ can->can.can_stats.bus_error++;
+ stats->rx_errors++;
+
+ can->bec.txerr = bec.txerr;
+ can->bec.rxerr = bec.rxerr;
+
+ if (!skb) {
+ stats->rx_dropped++;
+ return -ENOMEM;
+ }
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp =
+ ns_to_ktime(div_u64(p->timestamp * 1000,
+ can->kv_pcie->freq_to_ticks_div));
+ cf->can_id |= CAN_ERR_BUSERROR;
+
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ netif_rx(skb);
+ return 0;
+}
+
+static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct kvaser_pciefd_can *can;
+ u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+
+ if (ch_id >= pcie->nr_channels)
+ return -EIO;
+
+ can = pcie->can[ch_id];
+
+ kvaser_pciefd_rx_error_frame(can, p);
+ if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
+ /* Do not report more errors, until bec_poll_timer expires */
+ kvaser_pciefd_disable_err_gen(can);
+ /* Start polling the error counters */
+ mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
+ return 0;
+}
+
+static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct can_berr_counter bec;
+ enum can_state old_state, new_state, tx_state, rx_state;
+
+ old_state = can->can.state;
+
+ bec.txerr = p->header[0] & 0xff;
+ bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
+
+ kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
+ &rx_state);
+
+ if (new_state != old_state) {
+ struct net_device *ndev = can->can.dev;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ struct skb_shared_hwtstamps *shhwtstamps;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb) {
+ struct net_device_stats *stats = &ndev->stats;
+
+ stats->rx_dropped++;
+ return -ENOMEM;
+ }
+
+ kvaser_pciefd_change_state(can, cf, new_state, tx_state,
+ rx_state);
+
+ if (old_state == CAN_STATE_BUS_OFF &&
+ new_state == CAN_STATE_ERROR_ACTIVE &&
+ can->can.restart_ms) {
+ can->can.can_stats.restarts++;
+ cf->can_id |= CAN_ERR_RESTARTED;
+ }
+
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp =
+ ns_to_ktime(div_u64(p->timestamp * 1000,
+ can->kv_pcie->freq_to_ticks_div));
+
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+
+ netif_rx(skb);
+ }
+ can->bec.txerr = bec.txerr;
+ can->bec.rxerr = bec.rxerr;
+ /* Check if we need to poll the error counters */
+ if (bec.txerr || bec.rxerr)
+ mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
+
+ return 0;
+}
+
+static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct kvaser_pciefd_can *can;
+ u8 cmdseq;
+ u32 status;
+ u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+
+ if (ch_id >= pcie->nr_channels)
+ return -EIO;
+
+ can = pcie->can[ch_id];
+
+ status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
+ cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff;
+
+ /* Reset done, start abort and flush */
+ if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
+ p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
+ p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
+ cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
+ status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
+ u32 cmd;
+
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
+ can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+ cmd = KVASER_PCIEFD_KCAN_CMD_AT;
+ cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
+ iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
+
+ iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
+ can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
+ p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
+ cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
+ status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
+ /* Reset detected, send end of flush if no packet are in FIFO */
+ u8 count = ioread32(can->reg_base +
+ KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
+
+ if (!count)
+ iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
+ can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
+ } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
+ cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) {
+ /* Response to status request received */
+ kvaser_pciefd_handle_status_resp(can, p);
+ if (can->can.state != CAN_STATE_BUS_OFF &&
+ can->can.state != CAN_STATE_ERROR_ACTIVE) {
+ mod_timer(&can->bec_poll_timer,
+ KVASER_PCIEFD_BEC_POLL_FREQ);
+ }
+ } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
+ !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) {
+ /* Reset to bus on detected */
+ if (!completion_done(&can->start_comp))
+ complete(&can->start_comp);
+ }
+
+ return 0;
+}
+
+static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct kvaser_pciefd_can *can;
+ u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+
+ if (ch_id >= pcie->nr_channels)
+ return -EIO;
+
+ can = pcie->can[ch_id];
+
+ /* If this is the last flushed packet, send end of flush */
+ if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
+ u8 count = ioread32(can->reg_base +
+ KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
+
+ if (count == 0)
+ iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
+ can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
+ } else {
+ int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
+ int dlc = can_get_echo_skb(can->can.dev, echo_idx);
+ struct net_device_stats *stats = &can->can.dev->stats;
+
+ stats->tx_bytes += dlc;
+ stats->tx_packets++;
+
+ if (netif_queue_stopped(can->can.dev))
+ netif_wake_queue(can->can.dev);
+ }
+
+ return 0;
+}
+
+static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct sk_buff *skb;
+ struct net_device_stats *stats = &can->can.dev->stats;
+ struct can_frame *cf;
+
+ skb = alloc_can_err_skb(can->can.dev, &cf);
+
+ stats->tx_errors++;
+ if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
+ if (skb)
+ cf->can_id |= CAN_ERR_LOSTARB;
+ can->can.can_stats.arbitration_lost++;
+ } else if (skb) {
+ cf->can_id |= CAN_ERR_ACK;
+ }
+
+ if (skb) {
+ cf->can_id |= CAN_ERR_BUSERROR;
+ stats->rx_bytes += cf->can_dlc;
+ stats->rx_packets++;
+ netif_rx(skb);
+ } else {
+ stats->rx_dropped++;
+ netdev_warn(can->can.dev, "No memory left for err_skb\n");
+ }
+}
+
+static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct kvaser_pciefd_can *can;
+ bool one_shot_fail = false;
+ u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+
+ if (ch_id >= pcie->nr_channels)
+ return -EIO;
+
+ can = pcie->can[ch_id];
+ /* Ignore control packet ACK */
+ if (p->header[0] & KVASER_PCIEFD_APACKET_CT)
+ return 0;
+
+ if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) {
+ kvaser_pciefd_handle_nack_packet(can, p);
+ one_shot_fail = true;
+ }
+
+ if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
+ netdev_dbg(can->can.dev, "Packet was flushed\n");
+ } else {
+ int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
+ int dlc = can_get_echo_skb(can->can.dev, echo_idx);
+ u8 count = ioread32(can->reg_base +
+ KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
+
+ if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT &&
+ netif_queue_stopped(can->can.dev))
+ netif_wake_queue(can->can.dev);
+
+ if (!one_shot_fail) {
+ struct net_device_stats *stats = &can->can.dev->stats;
+
+ stats->tx_bytes += dlc;
+ stats->tx_packets++;
+ }
+ }
+
+ return 0;
+}
+
+static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie,
+ struct kvaser_pciefd_rx_packet *p)
+{
+ struct kvaser_pciefd_can *can;
+ u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
+
+ if (ch_id >= pcie->nr_channels)
+ return -EIO;
+
+ can = pcie->can[ch_id];
+
+ if (!completion_done(&can->flush_comp))
+ complete(&can->flush_comp);
+
+ return 0;
+}
+
+static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
+ int dma_buf)
+{
+ __le32 *buffer = pcie->dma_data[dma_buf];
+ __le64 timestamp;
+ struct kvaser_pciefd_rx_packet packet;
+ struct kvaser_pciefd_rx_packet *p = &packet;
+ u8 type;
+ int pos = *start_pos;
+ int size;
+ int ret = 0;
+
+ size = le32_to_cpu(buffer[pos++]);
+ if (!size) {
+ *start_pos = 0;
+ return 0;
+ }
+
+ p->header[0] = le32_to_cpu(buffer[pos++]);
+ p->header[1] = le32_to_cpu(buffer[pos++]);
+
+ /* Read 64-bit timestamp */
+ memcpy(&timestamp, &buffer[pos], sizeof(__le64));
+ pos += 2;
+ p->timestamp = le64_to_cpu(timestamp);
+
+ type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf;
+ switch (type) {
+ case KVASER_PCIEFD_PACK_TYPE_DATA:
+ ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
+ if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
+ u8 data_len;
+
+ data_len = can_dlc2len(p->header[1] >>
+ KVASER_PCIEFD_RPACKET_DLC_SHIFT);
+ pos += DIV_ROUND_UP(data_len, 4);
+ }
+ break;
+
+ case KVASER_PCIEFD_PACK_TYPE_ACK:
+ ret = kvaser_pciefd_handle_ack_packet(pcie, p);
+ break;
+
+ case KVASER_PCIEFD_PACK_TYPE_STATUS:
+ ret = kvaser_pciefd_handle_status_packet(pcie, p);
+ break;
+
+ case KVASER_PCIEFD_PACK_TYPE_ERROR:
+ ret = kvaser_pciefd_handle_error_packet(pcie, p);
+ break;
+
+ case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
+ ret = kvaser_pciefd_handle_eack_packet(pcie, p);
+ break;
+
+ case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK:
+ ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
+ break;
+
+ case KVASER_PCIEFD_PACK_TYPE_ACK_DATA:
+ case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD:
+ case KVASER_PCIEFD_PACK_TYPE_TXRQ:
+ dev_info(&pcie->pci->dev,
+ "Received unexpected packet type 0x%08X\n", type);
+ break;
+
+ default:
+ dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type);
+ ret = -EIO;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Position does not point to the end of the package,
+ * corrupted packet size?
+ */
+ if ((*start_pos + size) != pos)
+ return -EIO;
+
+ /* Point to the next packet header, if any */
+ *start_pos = pos;
+
+ return ret;
+}
+
+static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
+{
+ int pos = 0;
+ int res = 0;
+
+ do {
+ res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
+ } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
+
+ return res;
+}
+
+static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+{
+ u32 irq;
+
+ irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
+ kvaser_pciefd_read_buffer(pcie, 0);
+ /* Reset DMA buffer 0 */
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
+ pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ }
+
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
+ kvaser_pciefd_read_buffer(pcie, 1);
+ /* Reset DMA buffer 1 */
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
+ pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ }
+
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
+ dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
+
+ iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
+ return 0;
+}
+
+static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
+{
+ u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+
+ if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
+ netdev_err(can->can.dev, "Tx FIFO overflow\n");
+
+ if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
+ u8 count = ioread32(can->reg_base +
+ KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
+
+ if (count == 0)
+ iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
+ can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
+ }
+
+ if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
+ netdev_err(can->can.dev,
+ "Fail to change bittiming, when not in reset mode\n");
+
+ if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC)
+ netdev_err(can->can.dev, "CAN FD frame in CAN mode\n");
+
+ if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF)
+ netdev_err(can->can.dev, "Rx FIFO overflow\n");
+
+ iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
+ return 0;
+}
+
+static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
+{
+ struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
+ u32 board_irq;
+ int i;
+
+ board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
+
+ if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK))
+ return IRQ_NONE;
+
+ if (board_irq & KVASER_PCIEFD_IRQ_SRB)
+ kvaser_pciefd_receive_irq(pcie);
+
+ for (i = 0; i < pcie->nr_channels; i++) {
+ if (!pcie->can[i]) {
+ dev_err(&pcie->pci->dev,
+ "IRQ mask points to unallocated controller\n");
+ break;
+ }
+
+ /* Check that mask matches channel (i) IRQ mask */
+ if (board_irq & (1 << i))
+ kvaser_pciefd_transmit_irq(pcie->can[i]);
+ }
+
+ iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
+ return IRQ_HANDLED;
+}
+
+static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
+{
+ int i;
+ struct kvaser_pciefd_can *can;
+
+ for (i = 0; i < pcie->nr_channels; i++) {
+ can = pcie->can[i];
+ if (can) {
+ iowrite32(0,
+ can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ kvaser_pciefd_pwm_stop(can);
+ free_candev(can->can.dev);
+ }
+ }
+}
+
+static int kvaser_pciefd_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int err;
+ struct kvaser_pciefd *pcie;
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, pcie);
+ pcie->pci = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
+ if (err)
+ goto err_disable_pci;
+
+ pcie->reg_base = pci_iomap(pdev, 0, 0);
+ if (!pcie->reg_base) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ err = kvaser_pciefd_setup_board(pcie);
+ if (err)
+ goto err_pci_iounmap;
+
+ err = kvaser_pciefd_setup_dma(pcie);
+ if (err)
+ goto err_pci_iounmap;
+
+ pci_set_master(pdev);
+
+ err = kvaser_pciefd_setup_can_ctrls(pcie);
+ if (err)
+ goto err_teardown_can_ctrls;
+
+ iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
+ pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
+
+ iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 |
+ KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 |
+ KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1,
+ pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG);
+
+ /* Reset IRQ handling, expected to be off before */
+ iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
+ pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
+ iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
+ pcie->reg_base + KVASER_PCIEFD_IEN_REG);
+
+ /* Ready the DMA buffers */
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
+ pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
+ pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
+
+ err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
+ IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
+ if (err)
+ goto err_teardown_can_ctrls;
+
+ err = kvaser_pciefd_reg_candev(pcie);
+ if (err)
+ goto err_free_irq;
+
+ return 0;
+
+err_free_irq:
+ free_irq(pcie->pci->irq, pcie);
+
+err_teardown_can_ctrls:
+ kvaser_pciefd_teardown_can_ctrls(pcie);
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
+ pci_clear_master(pdev);
+
+err_pci_iounmap:
+ pci_iounmap(pdev, pcie->reg_base);
+
+err_release_regions:
+ pci_release_regions(pdev);
+
+err_disable_pci:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
+{
+ struct kvaser_pciefd_can *can;
+ int i;
+
+ for (i = 0; i < pcie->nr_channels; i++) {
+ can = pcie->can[i];
+ if (can) {
+ iowrite32(0,
+ can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
+ unregister_candev(can->can.dev);
+ del_timer(&can->bec_poll_timer);
+ kvaser_pciefd_pwm_stop(can);
+ free_candev(can->can.dev);
+ }
+ }
+}
+
+static void kvaser_pciefd_remove(struct pci_dev *pdev)
+{
+ struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
+
+ kvaser_pciefd_remove_all_ctrls(pcie);
+
+ /* Turn off IRQ generation */
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
+ iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
+ pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
+ iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
+
+ free_irq(pcie->pci->irq, pcie);
+
+ pci_clear_master(pdev);
+ pci_iounmap(pdev, pcie->reg_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver kvaser_pciefd = {
+ .name = KVASER_PCIEFD_DRV_NAME,
+ .id_table = kvaser_pciefd_id_table,
+ .probe = kvaser_pciefd_probe,
+ .remove = kvaser_pciefd_remove,
+};
+
+module_pci_driver(kvaser_pciefd)
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
index ec4b2e117f66..1ff0b7fe81d6 100644
--- a/drivers/net/can/m_can/Kconfig
+++ b/drivers/net/can/m_can/Kconfig
@@ -1,6 +1,24 @@
# SPDX-License-Identifier: GPL-2.0-only
config CAN_M_CAN
+ tristate "Bosch M_CAN support"
+ ---help---
+ Say Y here if you want support for Bosch M_CAN controller framework.
+ This is common support for devices that embed the Bosch M_CAN IP.
+
+config CAN_M_CAN_PLATFORM
+ tristate "Bosch M_CAN support for io-mapped devices"
depends on HAS_IOMEM
- tristate "Bosch M_CAN devices"
+ depends on CAN_M_CAN
+ ---help---
+ Say Y here if you want support for IO Mapped Bosch M_CAN controller.
+ This support is for devices that have the Bosch M_CAN controller
+ IP embedded into the device and the IP is IO Mapped to the processor.
+
+config CAN_M_CAN_TCAN4X5X
+ depends on CAN_M_CAN
+ depends on REGMAP_SPI
+ tristate "TCAN4X5X M_CAN device"
---help---
- Say Y here if you want to support for Bosch M_CAN controller.
+ Say Y here if you want support for Texas Instruments TCAN4x5x
+ M_CAN controller. This device is a peripherial device that uses the
+ SPI bus for communication.
diff --git a/drivers/net/can/m_can/Makefile b/drivers/net/can/m_can/Makefile
index 599ae69cb4a1..52a4a6fbe527 100644
--- a/drivers/net/can/m_can/Makefile
+++ b/drivers/net/can/m_can/Makefile
@@ -4,3 +4,5 @@
#
obj-$(CONFIG_CAN_M_CAN) += m_can.o
+obj-$(CONFIG_CAN_M_CAN_PLATFORM) += m_can_platform.o
+obj-$(CONFIG_CAN_M_CAN_TCAN4X5X) += tcan4x5x.o
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index deb274a19ba0..562c8317e3aa 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1,20 +1,14 @@
-/*
- * CAN bus driver for Bosch M_CAN controller
- *
- * Copyright (C) 2014 Freescale Semiconductor, Inc.
- * Dong Aisheng <b29396@freescale.com>
- *
- * Bosch M_CAN user manual can be obtained from:
+// SPDX-License-Identifier: GPL-2.0
+// CAN bus driver for Bosch M_CAN controller
+// Copyright (C) 2014 Freescale Semiconductor, Inc.
+// Dong Aisheng <b29396@freescale.com>
+// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
+
+/* Bosch M_CAN user manual can be obtained from:
* http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/
* mcan_users_manual_v302.pdf
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
-#include <linux/clk.h>
-#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -28,11 +22,7 @@
#include <linux/can/dev.h>
#include <linux/pinctrl/consumer.h>
-/* napi related */
-#define M_CAN_NAPI_WEIGHT 64
-
-/* message ram configuration data length */
-#define MRAM_CFG_LEN 8
+#include "m_can.h"
/* registers definition */
enum m_can_reg {
@@ -86,28 +76,11 @@ enum m_can_reg {
M_CAN_TXEFA = 0xf8,
};
-/* m_can lec values */
-enum m_can_lec_type {
- LEC_NO_ERROR = 0,
- LEC_STUFF_ERROR,
- LEC_FORM_ERROR,
- LEC_ACK_ERROR,
- LEC_BIT1_ERROR,
- LEC_BIT0_ERROR,
- LEC_CRC_ERROR,
- LEC_UNUSED,
-};
+/* napi related */
+#define M_CAN_NAPI_WEIGHT 64
-enum m_can_mram_cfg {
- MRAM_SIDF = 0,
- MRAM_XIDF,
- MRAM_RXF0,
- MRAM_RXF1,
- MRAM_RXB,
- MRAM_TXE,
- MRAM_TXB,
- MRAM_CFG_NUM,
-};
+/* message ram configuration data length */
+#define MRAM_CFG_LEN 8
/* Core Release Register (CREL) */
#define CREL_REL_SHIFT 28
@@ -347,90 +320,85 @@ enum m_can_mram_cfg {
#define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT
#define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT)
-/* address offset and element number for each FIFO/Buffer in the Message RAM */
-struct mram_cfg {
- u16 off;
- u8 num;
-};
-
-/* m_can private data structure */
-struct m_can_priv {
- struct can_priv can; /* must be the first member */
- struct napi_struct napi;
- struct net_device *dev;
- struct device *device;
- struct clk *hclk;
- struct clk *cclk;
- void __iomem *base;
- u32 irqstatus;
- int version;
-
- /* message ram configuration */
- void __iomem *mram_base;
- struct mram_cfg mcfg[MRAM_CFG_NUM];
-};
+static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
+{
+ return cdev->ops->read_reg(cdev, reg);
+}
-static inline u32 m_can_read(const struct m_can_priv *priv, enum m_can_reg reg)
+static inline void m_can_write(struct m_can_classdev *cdev, enum m_can_reg reg,
+ u32 val)
{
- return readl(priv->base + reg);
+ cdev->ops->write_reg(cdev, reg, val);
}
-static inline void m_can_write(const struct m_can_priv *priv,
- enum m_can_reg reg, u32 val)
+static u32 m_can_fifo_read(struct m_can_classdev *cdev,
+ u32 fgi, unsigned int offset)
{
- writel(val, priv->base + reg);
+ u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE +
+ offset;
+
+ return cdev->ops->read_fifo(cdev, addr_offset);
}
-static inline u32 m_can_fifo_read(const struct m_can_priv *priv,
- u32 fgi, unsigned int offset)
+static void m_can_fifo_write(struct m_can_classdev *cdev,
+ u32 fpi, unsigned int offset, u32 val)
{
- return readl(priv->mram_base + priv->mcfg[MRAM_RXF0].off +
- fgi * RXF0_ELEMENT_SIZE + offset);
+ u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE +
+ offset;
+
+ cdev->ops->write_fifo(cdev, addr_offset, val);
}
-static inline void m_can_fifo_write(const struct m_can_priv *priv,
- u32 fpi, unsigned int offset, u32 val)
+static inline void m_can_fifo_write_no_off(struct m_can_classdev *cdev,
+ u32 fpi, u32 val)
{
- writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off +
- fpi * TXB_ELEMENT_SIZE + offset);
+ cdev->ops->write_fifo(cdev, fpi, val);
}
-static inline u32 m_can_txe_fifo_read(const struct m_can_priv *priv,
- u32 fgi,
- u32 offset) {
- return readl(priv->mram_base + priv->mcfg[MRAM_TXE].off +
- fgi * TXE_ELEMENT_SIZE + offset);
+static u32 m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset)
+{
+ u32 addr_offset = cdev->mcfg[MRAM_TXE].off + fgi * TXE_ELEMENT_SIZE +
+ offset;
+
+ return cdev->ops->read_fifo(cdev, addr_offset);
}
-static inline bool m_can_tx_fifo_full(const struct m_can_priv *priv)
+static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev)
{
- return !!(m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQF);
+ return !!(m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQF);
}
-static inline void m_can_config_endisable(const struct m_can_priv *priv,
- bool enable)
+void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
{
- u32 cccr = m_can_read(priv, M_CAN_CCCR);
+ u32 cccr = m_can_read(cdev, M_CAN_CCCR);
u32 timeout = 10;
u32 val = 0;
+ /* Clear the Clock stop request if it was set */
+ if (cccr & CCCR_CSR)
+ cccr &= ~CCCR_CSR;
+
if (enable) {
+ /* Clear the Clock stop request if it was set */
+ if (cccr & CCCR_CSR)
+ cccr &= ~CCCR_CSR;
+
/* enable m_can configuration */
- m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT);
+ m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT);
udelay(5);
/* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
- m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
+ m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
} else {
- m_can_write(priv, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
+ m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
}
/* there's a delay for module initialization */
if (enable)
val = CCCR_INIT | CCCR_CCE;
- while ((m_can_read(priv, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
+ while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
if (timeout == 0) {
- netdev_warn(priv->dev, "Failed to init module\n");
+ netdev_warn(cdev->net, "Failed to init module\n");
return;
}
timeout--;
@@ -438,21 +406,38 @@ static inline void m_can_config_endisable(const struct m_can_priv *priv,
}
}
-static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv)
+static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
{
/* Only interrupt line 0 is used in this driver */
- m_can_write(priv, M_CAN_ILE, ILE_EINT0);
+ m_can_write(cdev, M_CAN_ILE, ILE_EINT0);
}
-static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv)
+static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
{
- m_can_write(priv, M_CAN_ILE, 0x0);
+ m_can_write(cdev, M_CAN_ILE, 0x0);
+}
+
+static void m_can_clean(struct net_device *net)
+{
+ struct m_can_classdev *cdev = netdev_priv(net);
+
+ if (cdev->tx_skb) {
+ int putidx = 0;
+
+ net->stats.tx_errors++;
+ if (cdev->version > 30)
+ putidx = ((m_can_read(cdev, M_CAN_TXFQS) &
+ TXFQS_TFQPI_MASK) >> TXFQS_TFQPI_SHIFT);
+
+ can_free_echo_skb(cdev->net, putidx);
+ cdev->tx_skb = NULL;
+ }
}
static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
{
struct net_device_stats *stats = &dev->stats;
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
struct canfd_frame *cf;
struct sk_buff *skb;
u32 id, fgi, dlc;
@@ -460,7 +445,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
/* calculate the fifo get index for where to read data */
fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_SHIFT;
- dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC);
+ dlc = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DLC);
if (dlc & RX_BUF_FDF)
skb = alloc_canfd_skb(dev, &cf);
else
@@ -475,7 +460,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
else
cf->len = get_can_dlc((dlc >> 16) & 0x0F);
- id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID);
+ id = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID);
if (id & RX_BUF_XTD)
cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
@@ -494,12 +479,12 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
for (i = 0; i < cf->len; i += 4)
*(u32 *)(cf->data + i) =
- m_can_fifo_read(priv, fgi,
+ m_can_fifo_read(cdev, fgi,
M_CAN_FIFO_DATA(i / 4));
}
/* acknowledge rx fifo 0 */
- m_can_write(priv, M_CAN_RXF0A, fgi);
+ m_can_write(cdev, M_CAN_RXF0A, fgi);
stats->rx_packets++;
stats->rx_bytes += cf->len;
@@ -509,11 +494,11 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
static int m_can_do_rx_poll(struct net_device *dev, int quota)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
u32 pkts = 0;
u32 rxfs;
- rxfs = m_can_read(priv, M_CAN_RXF0S);
+ rxfs = m_can_read(cdev, M_CAN_RXF0S);
if (!(rxfs & RXFS_FFL_MASK)) {
netdev_dbg(dev, "no messages in fifo0\n");
return 0;
@@ -527,7 +512,7 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
quota--;
pkts++;
- rxfs = m_can_read(priv, M_CAN_RXF0S);
+ rxfs = m_can_read(cdev, M_CAN_RXF0S);
}
if (pkts)
@@ -562,12 +547,12 @@ static int m_can_handle_lost_msg(struct net_device *dev)
static int m_can_handle_lec_err(struct net_device *dev,
enum m_can_lec_type lec_type)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
- priv->can.can_stats.bus_error++;
+ cdev->can.can_stats.bus_error++;
stats->rx_errors++;
/* propagate the error condition to the CAN stack */
@@ -619,47 +604,51 @@ static int m_can_handle_lec_err(struct net_device *dev,
static int __m_can_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
unsigned int ecr;
- ecr = m_can_read(priv, M_CAN_ECR);
+ ecr = m_can_read(cdev, M_CAN_ECR);
bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
bec->txerr = (ecr & ECR_TEC_MASK) >> ECR_TEC_SHIFT;
return 0;
}
-static int m_can_clk_start(struct m_can_priv *priv)
+static int m_can_clk_start(struct m_can_classdev *cdev)
{
int err;
- err = pm_runtime_get_sync(priv->device);
+ if (cdev->pm_clock_support == 0)
+ return 0;
+
+ err = pm_runtime_get_sync(cdev->dev);
if (err < 0) {
- pm_runtime_put_noidle(priv->device);
+ pm_runtime_put_noidle(cdev->dev);
return err;
}
return 0;
}
-static void m_can_clk_stop(struct m_can_priv *priv)
+static void m_can_clk_stop(struct m_can_classdev *cdev)
{
- pm_runtime_put_sync(priv->device);
+ if (cdev->pm_clock_support)
+ pm_runtime_put_sync(cdev->dev);
}
static int m_can_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
int err;
- err = m_can_clk_start(priv);
+ err = m_can_clk_start(cdev);
if (err)
return err;
__m_can_get_berr_counter(dev, bec);
- m_can_clk_stop(priv);
+ m_can_clk_stop(cdev);
return 0;
}
@@ -667,7 +656,7 @@ static int m_can_get_berr_counter(const struct net_device *dev,
static int m_can_handle_state_change(struct net_device *dev,
enum can_state new_state)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
@@ -677,19 +666,19 @@ static int m_can_handle_state_change(struct net_device *dev,
switch (new_state) {
case CAN_STATE_ERROR_ACTIVE:
/* error warning state */
- priv->can.can_stats.error_warning++;
- priv->can.state = CAN_STATE_ERROR_WARNING;
+ cdev->can.can_stats.error_warning++;
+ cdev->can.state = CAN_STATE_ERROR_WARNING;
break;
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
- priv->can.can_stats.error_passive++;
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ cdev->can.can_stats.error_passive++;
+ cdev->can.state = CAN_STATE_ERROR_PASSIVE;
break;
case CAN_STATE_BUS_OFF:
/* bus-off state */
- priv->can.state = CAN_STATE_BUS_OFF;
- m_can_disable_all_interrupts(priv);
- priv->can.can_stats.bus_off++;
+ cdev->can.state = CAN_STATE_BUS_OFF;
+ m_can_disable_all_interrupts(cdev);
+ cdev->can.can_stats.bus_off++;
can_bus_off(dev);
break;
default:
@@ -716,7 +705,7 @@ static int m_can_handle_state_change(struct net_device *dev,
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
cf->can_id |= CAN_ERR_CRTL;
- ecr = m_can_read(priv, M_CAN_ECR);
+ ecr = m_can_read(cdev, M_CAN_ECR);
if (ecr & ECR_RP)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if (bec.txerr > 127)
@@ -741,25 +730,22 @@ static int m_can_handle_state_change(struct net_device *dev,
static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
int work_done = 0;
- if ((psr & PSR_EW) &&
- (priv->can.state != CAN_STATE_ERROR_WARNING)) {
+ if (psr & PSR_EW && cdev->can.state != CAN_STATE_ERROR_WARNING) {
netdev_dbg(dev, "entered error warning state\n");
work_done += m_can_handle_state_change(dev,
CAN_STATE_ERROR_WARNING);
}
- if ((psr & PSR_EP) &&
- (priv->can.state != CAN_STATE_ERROR_PASSIVE)) {
+ if (psr & PSR_EP && cdev->can.state != CAN_STATE_ERROR_PASSIVE) {
netdev_dbg(dev, "entered error passive state\n");
work_done += m_can_handle_state_change(dev,
CAN_STATE_ERROR_PASSIVE);
}
- if ((psr & PSR_BO) &&
- (priv->can.state != CAN_STATE_BUS_OFF)) {
+ if (psr & PSR_BO && cdev->can.state != CAN_STATE_BUS_OFF) {
netdev_dbg(dev, "entered error bus off state\n");
work_done += m_can_handle_state_change(dev,
CAN_STATE_BUS_OFF);
@@ -794,14 +780,14 @@ static inline bool is_lec_err(u32 psr)
static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
u32 psr)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
int work_done = 0;
if (irqstatus & IR_RF0L)
work_done += m_can_handle_lost_msg(dev);
/* handle lec errors on the bus */
- if ((priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+ if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
is_lec_err(psr))
work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED);
@@ -811,14 +797,13 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
return work_done;
}
-static int m_can_poll(struct napi_struct *napi, int quota)
+static int m_can_rx_handler(struct net_device *dev, int quota)
{
- struct net_device *dev = napi->dev;
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
int work_done = 0;
u32 irqstatus, psr;
- irqstatus = priv->irqstatus | m_can_read(priv, M_CAN_IR);
+ irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR);
if (!irqstatus)
goto end;
@@ -832,18 +817,19 @@ static int m_can_poll(struct napi_struct *napi, int quota)
* whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127.
* In this case, reset MCAN_IR.MRAF. No further action is required.
*/
- if ((priv->version <= 31) && (irqstatus & IR_MRAF) &&
- (m_can_read(priv, M_CAN_ECR) & ECR_RP)) {
+ if (cdev->version <= 31 && irqstatus & IR_MRAF &&
+ m_can_read(cdev, M_CAN_ECR) & ECR_RP) {
struct can_berr_counter bec;
__m_can_get_berr_counter(dev, &bec);
if (bec.rxerr == 127) {
- m_can_write(priv, M_CAN_IR, IR_MRAF);
+ m_can_write(cdev, M_CAN_IR, IR_MRAF);
irqstatus &= ~IR_MRAF;
}
}
- psr = m_can_read(priv, M_CAN_PSR);
+ psr = m_can_read(cdev, M_CAN_PSR);
+
if (irqstatus & IR_ERR_STATE)
work_done += m_can_handle_state_errors(dev, psr);
@@ -852,13 +838,33 @@ static int m_can_poll(struct napi_struct *napi, int quota)
if (irqstatus & IR_RF0N)
work_done += m_can_do_rx_poll(dev, (quota - work_done));
+end:
+ return work_done;
+}
+
+static int m_can_rx_peripheral(struct net_device *dev)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ m_can_rx_handler(dev, 1);
+
+ m_can_enable_all_interrupts(cdev);
+
+ return 0;
+}
+static int m_can_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *dev = napi->dev;
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ int work_done;
+
+ work_done = m_can_rx_handler(dev, quota);
if (work_done < quota) {
napi_complete_done(napi, work_done);
- m_can_enable_all_interrupts(priv);
+ m_can_enable_all_interrupts(cdev);
}
-end:
return work_done;
}
@@ -870,11 +876,11 @@ static void m_can_echo_tx_event(struct net_device *dev)
int i = 0;
unsigned int msg_mark;
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
/* read tx event fifo status */
- m_can_txefs = m_can_read(priv, M_CAN_TXEFS);
+ m_can_txefs = m_can_read(cdev, M_CAN_TXEFS);
/* Get Tx Event fifo element count */
txe_count = (m_can_txefs & TXEFS_EFFL_MASK)
@@ -883,15 +889,15 @@ static void m_can_echo_tx_event(struct net_device *dev)
/* Get and process all sent elements */
for (i = 0; i < txe_count; i++) {
/* retrieve get index */
- fgi = (m_can_read(priv, M_CAN_TXEFS) & TXEFS_EFGI_MASK)
+ fgi = (m_can_read(cdev, M_CAN_TXEFS) & TXEFS_EFGI_MASK)
>> TXEFS_EFGI_SHIFT;
/* get message marker */
- msg_mark = (m_can_txe_fifo_read(priv, fgi, 4) &
+ msg_mark = (m_can_txe_fifo_read(cdev, fgi, 4) &
TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT;
/* ack txe element */
- m_can_write(priv, M_CAN_TXEFA, (TXEFA_EFAI_MASK &
+ m_can_write(cdev, M_CAN_TXEFA, (TXEFA_EFAI_MASK &
(fgi << TXEFA_EFAI_SHIFT)));
/* update stats */
@@ -903,17 +909,20 @@ static void m_can_echo_tx_event(struct net_device *dev)
static irqreturn_t m_can_isr(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
u32 ir;
- ir = m_can_read(priv, M_CAN_IR);
+ ir = m_can_read(cdev, M_CAN_IR);
if (!ir)
return IRQ_NONE;
/* ACK all irqs */
if (ir & IR_ALL_INT)
- m_can_write(priv, M_CAN_IR, ir);
+ m_can_write(cdev, M_CAN_IR, ir);
+
+ if (cdev->ops->clear_interrupts)
+ cdev->ops->clear_interrupts(cdev);
/* schedule NAPI in case of
* - rx IRQ
@@ -921,12 +930,15 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
* - bus error IRQ and bus error reporting
*/
if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) {
- priv->irqstatus = ir;
- m_can_disable_all_interrupts(priv);
- napi_schedule(&priv->napi);
+ cdev->irqstatus = ir;
+ m_can_disable_all_interrupts(cdev);
+ if (!cdev->is_peripheral)
+ napi_schedule(&cdev->napi);
+ else
+ m_can_rx_peripheral(dev);
}
- if (priv->version == 30) {
+ if (cdev->version == 30) {
if (ir & IR_TC) {
/* Transmission Complete Interrupt*/
stats->tx_bytes += can_get_echo_skb(dev, 0);
@@ -940,7 +952,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
m_can_echo_tx_event(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
if (netif_queue_stopped(dev) &&
- !m_can_tx_fifo_full(priv))
+ !m_can_tx_fifo_full(cdev))
netif_wake_queue(dev);
}
}
@@ -998,9 +1010,9 @@ static const struct can_bittiming_const m_can_data_bittiming_const_31X = {
static int m_can_set_bittiming(struct net_device *dev)
{
- struct m_can_priv *priv = netdev_priv(dev);
- const struct can_bittiming *bt = &priv->can.bittiming;
- const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ const struct can_bittiming *bt = &cdev->can.bittiming;
+ const struct can_bittiming *dbt = &cdev->can.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
u32 reg_btp;
@@ -1010,9 +1022,9 @@ static int m_can_set_bittiming(struct net_device *dev)
tseg2 = bt->phase_seg2 - 1;
reg_btp = (brp << NBTP_NBRP_SHIFT) | (sjw << NBTP_NSJW_SHIFT) |
(tseg1 << NBTP_NTSEG1_SHIFT) | (tseg2 << NBTP_NTSEG2_SHIFT);
- m_can_write(priv, M_CAN_NBTP, reg_btp);
+ m_can_write(cdev, M_CAN_NBTP, reg_btp);
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
reg_btp = 0;
brp = dbt->brp - 1;
sjw = dbt->sjw - 1;
@@ -1034,7 +1046,7 @@ static int m_can_set_bittiming(struct net_device *dev)
/* Equation based on Bosch's M_CAN User Manual's
* Transmitter Delay Compensation Section
*/
- tdco = (priv->can.clock.freq / 1000) *
+ tdco = (cdev->can.clock.freq / 1000) *
ssp / dbt->bitrate;
/* Max valid TDCO value is 127 */
@@ -1045,7 +1057,7 @@ static int m_can_set_bittiming(struct net_device *dev)
}
reg_btp |= DBTP_TDC;
- m_can_write(priv, M_CAN_TDCR,
+ m_can_write(cdev, M_CAN_TDCR,
tdco << TDCR_TDCO_SHIFT);
}
@@ -1054,7 +1066,7 @@ static int m_can_set_bittiming(struct net_device *dev)
(tseg1 << DBTP_DTSEG1_SHIFT) |
(tseg2 << DBTP_DTSEG2_SHIFT);
- m_can_write(priv, M_CAN_DBTP, reg_btp);
+ m_can_write(cdev, M_CAN_DBTP, reg_btp);
}
return 0;
@@ -1071,63 +1083,63 @@ static int m_can_set_bittiming(struct net_device *dev)
*/
static void m_can_chip_config(struct net_device *dev)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
u32 cccr, test;
- m_can_config_endisable(priv, true);
+ m_can_config_endisable(cdev, true);
/* RX Buffer/FIFO Element Size 64 bytes data field */
- m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_64BYTES);
+ m_can_write(cdev, M_CAN_RXESC, M_CAN_RXESC_64BYTES);
/* Accept Non-matching Frames Into FIFO 0 */
- m_can_write(priv, M_CAN_GFC, 0x0);
+ m_can_write(cdev, M_CAN_GFC, 0x0);
- if (priv->version == 30) {
+ if (cdev->version == 30) {
/* only support one Tx Buffer currently */
- m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) |
- priv->mcfg[MRAM_TXB].off);
+ m_can_write(cdev, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) |
+ cdev->mcfg[MRAM_TXB].off);
} else {
/* TX FIFO is used for newer IP Core versions */
- m_can_write(priv, M_CAN_TXBC,
- (priv->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) |
- (priv->mcfg[MRAM_TXB].off));
+ m_can_write(cdev, M_CAN_TXBC,
+ (cdev->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) |
+ (cdev->mcfg[MRAM_TXB].off));
}
/* support 64 bytes payload */
- m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES);
+ m_can_write(cdev, M_CAN_TXESC, TXESC_TBDS_64BYTES);
/* TX Event FIFO */
- if (priv->version == 30) {
- m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) |
- priv->mcfg[MRAM_TXE].off);
+ if (cdev->version == 30) {
+ m_can_write(cdev, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) |
+ cdev->mcfg[MRAM_TXE].off);
} else {
/* Full TX Event FIFO is used */
- m_can_write(priv, M_CAN_TXEFC,
- ((priv->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT)
+ m_can_write(cdev, M_CAN_TXEFC,
+ ((cdev->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT)
& TXEFC_EFS_MASK) |
- priv->mcfg[MRAM_TXE].off);
+ cdev->mcfg[MRAM_TXE].off);
}
/* rx fifo configuration, blocking mode, fifo size 1 */
- m_can_write(priv, M_CAN_RXF0C,
- (priv->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) |
- priv->mcfg[MRAM_RXF0].off);
+ m_can_write(cdev, M_CAN_RXF0C,
+ (cdev->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) |
+ cdev->mcfg[MRAM_RXF0].off);
- m_can_write(priv, M_CAN_RXF1C,
- (priv->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) |
- priv->mcfg[MRAM_RXF1].off);
+ m_can_write(cdev, M_CAN_RXF1C,
+ (cdev->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) |
+ cdev->mcfg[MRAM_RXF1].off);
- cccr = m_can_read(priv, M_CAN_CCCR);
- test = m_can_read(priv, M_CAN_TEST);
+ cccr = m_can_read(cdev, M_CAN_CCCR);
+ test = m_can_read(cdev, M_CAN_TEST);
test &= ~TEST_LBCK;
- if (priv->version == 30) {
+ if (cdev->version == 30) {
/* Version 3.0.x */
cccr &= ~(CCCR_TEST | CCCR_MON |
(CCCR_CMR_MASK << CCCR_CMR_SHIFT) |
(CCCR_CME_MASK << CCCR_CME_SHIFT));
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_FD)
cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT;
} else {
@@ -1136,64 +1148,68 @@ static void m_can_chip_config(struct net_device *dev)
CCCR_NISO);
/* Only 3.2.x has NISO Bit implemented */
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
cccr |= CCCR_NISO;
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_FD)
cccr |= (CCCR_BRSE | CCCR_FDOE);
}
/* Loopback Mode */
- if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
cccr |= CCCR_TEST | CCCR_MON;
test |= TEST_LBCK;
}
/* Enable Monitoring (all versions) */
- if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
cccr |= CCCR_MON;
/* Write config */
- m_can_write(priv, M_CAN_CCCR, cccr);
- m_can_write(priv, M_CAN_TEST, test);
+ m_can_write(cdev, M_CAN_CCCR, cccr);
+ m_can_write(cdev, M_CAN_TEST, test);
/* Enable interrupts */
- m_can_write(priv, M_CAN_IR, IR_ALL_INT);
- if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
- if (priv->version == 30)
- m_can_write(priv, M_CAN_IE, IR_ALL_INT &
+ m_can_write(cdev, M_CAN_IR, IR_ALL_INT);
+ if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+ if (cdev->version == 30)
+ m_can_write(cdev, M_CAN_IE, IR_ALL_INT &
~(IR_ERR_LEC_30X));
else
- m_can_write(priv, M_CAN_IE, IR_ALL_INT &
+ m_can_write(cdev, M_CAN_IE, IR_ALL_INT &
~(IR_ERR_LEC_31X));
else
- m_can_write(priv, M_CAN_IE, IR_ALL_INT);
+ m_can_write(cdev, M_CAN_IE, IR_ALL_INT);
/* route all interrupts to INT0 */
- m_can_write(priv, M_CAN_ILS, ILS_ALL_INT0);
+ m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0);
/* set bittiming params */
m_can_set_bittiming(dev);
- m_can_config_endisable(priv, false);
+ m_can_config_endisable(cdev, false);
+
+ if (cdev->ops->init)
+ cdev->ops->init(cdev);
}
static void m_can_start(struct net_device *dev)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
/* basic m_can configuration */
m_can_chip_config(dev);
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ cdev->can.state = CAN_STATE_ERROR_ACTIVE;
- m_can_enable_all_interrupts(priv);
+ m_can_enable_all_interrupts(cdev);
}
static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
{
switch (mode) {
case CAN_MODE_START:
+ m_can_clean(dev);
m_can_start(dev);
netif_wake_queue(dev);
break;
@@ -1209,20 +1225,17 @@ static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
* else it returns the release and step coded as:
* return value = 10 * <release> + 1 * <step>
*/
-static int m_can_check_core_release(void __iomem *m_can_base)
+static int m_can_check_core_release(struct m_can_classdev *cdev)
{
u32 crel_reg;
u8 rel;
u8 step;
int res;
- struct m_can_priv temp_priv = {
- .base = m_can_base
- };
/* Read Core Release Version and split into version number
* Example: Version 3.2.1 => rel = 3; step = 2; substep = 1;
*/
- crel_reg = m_can_read(&temp_priv, M_CAN_CREL);
+ crel_reg = m_can_read(cdev, M_CAN_CREL);
rel = (u8)((crel_reg & CREL_REL_MASK) >> CREL_REL_SHIFT);
step = (u8)((crel_reg & CREL_STEP_MASK) >> CREL_STEP_SHIFT);
@@ -1240,152 +1253,142 @@ static int m_can_check_core_release(void __iomem *m_can_base)
/* Selectable Non ISO support only in version 3.2.x
* This function checks if the bit is writable.
*/
-static bool m_can_niso_supported(const struct m_can_priv *priv)
+static bool m_can_niso_supported(struct m_can_classdev *cdev)
{
- u32 cccr_reg, cccr_poll;
- int niso_timeout;
+ u32 cccr_reg, cccr_poll = 0;
+ int niso_timeout = -ETIMEDOUT;
+ int i;
- m_can_config_endisable(priv, true);
- cccr_reg = m_can_read(priv, M_CAN_CCCR);
+ m_can_config_endisable(cdev, true);
+ cccr_reg = m_can_read(cdev, M_CAN_CCCR);
cccr_reg |= CCCR_NISO;
- m_can_write(priv, M_CAN_CCCR, cccr_reg);
+ m_can_write(cdev, M_CAN_CCCR, cccr_reg);
- niso_timeout = readl_poll_timeout((priv->base + M_CAN_CCCR), cccr_poll,
- (cccr_poll == cccr_reg), 0, 10);
+ for (i = 0; i <= 10; i++) {
+ cccr_poll = m_can_read(cdev, M_CAN_CCCR);
+ if (cccr_poll == cccr_reg) {
+ niso_timeout = 0;
+ break;
+ }
+
+ usleep_range(1, 5);
+ }
/* Clear NISO */
cccr_reg &= ~(CCCR_NISO);
- m_can_write(priv, M_CAN_CCCR, cccr_reg);
+ m_can_write(cdev, M_CAN_CCCR, cccr_reg);
- m_can_config_endisable(priv, false);
+ m_can_config_endisable(cdev, false);
/* return false if time out (-ETIMEDOUT), else return true */
return !niso_timeout;
}
-static int m_can_dev_setup(struct platform_device *pdev, struct net_device *dev,
- void __iomem *addr)
+static int m_can_dev_setup(struct m_can_classdev *m_can_dev)
{
- struct m_can_priv *priv;
+ struct net_device *dev = m_can_dev->net;
int m_can_version;
- m_can_version = m_can_check_core_release(addr);
+ m_can_version = m_can_check_core_release(m_can_dev);
/* return if unsupported version */
if (!m_can_version) {
- dev_err(&pdev->dev, "Unsupported version number: %2d",
+ dev_err(m_can_dev->dev, "Unsupported version number: %2d",
m_can_version);
return -EINVAL;
}
- priv = netdev_priv(dev);
- netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT);
+ if (!m_can_dev->is_peripheral)
+ netif_napi_add(dev, &m_can_dev->napi,
+ m_can_poll, M_CAN_NAPI_WEIGHT);
/* Shared properties of all M_CAN versions */
- priv->version = m_can_version;
- priv->dev = dev;
- priv->base = addr;
- priv->can.do_set_mode = m_can_set_mode;
- priv->can.do_get_berr_counter = m_can_get_berr_counter;
+ m_can_dev->version = m_can_version;
+ m_can_dev->can.do_set_mode = m_can_set_mode;
+ m_can_dev->can.do_get_berr_counter = m_can_get_berr_counter;
/* Set M_CAN supported operations */
- priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ m_can_dev->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD;
/* Set properties depending on M_CAN version */
- switch (priv->version) {
+ switch (m_can_dev->version) {
case 30:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
- priv->can.bittiming_const = &m_can_bittiming_const_30X;
- priv->can.data_bittiming_const =
- &m_can_data_bittiming_const_30X;
+ m_can_dev->can.bittiming_const = m_can_dev->bit_timing ?
+ m_can_dev->bit_timing : &m_can_bittiming_const_30X;
+
+ m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ?
+ m_can_dev->data_timing :
+ &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
- priv->can.bittiming_const = &m_can_bittiming_const_31X;
- priv->can.data_bittiming_const =
- &m_can_data_bittiming_const_31X;
+ m_can_dev->can.bittiming_const = m_can_dev->bit_timing ?
+ m_can_dev->bit_timing : &m_can_bittiming_const_31X;
+
+ m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ?
+ m_can_dev->data_timing :
+ &m_can_data_bittiming_const_31X;
break;
case 32:
- priv->can.bittiming_const = &m_can_bittiming_const_31X;
- priv->can.data_bittiming_const =
- &m_can_data_bittiming_const_31X;
- priv->can.ctrlmode_supported |= (m_can_niso_supported(priv)
+ m_can_dev->can.bittiming_const = m_can_dev->bit_timing ?
+ m_can_dev->bit_timing : &m_can_bittiming_const_31X;
+
+ m_can_dev->can.data_bittiming_const = m_can_dev->data_timing ?
+ m_can_dev->data_timing :
+ &m_can_data_bittiming_const_31X;
+
+ m_can_dev->can.ctrlmode_supported |=
+ (m_can_niso_supported(m_can_dev)
? CAN_CTRLMODE_FD_NON_ISO
: 0);
break;
default:
- dev_err(&pdev->dev, "Unsupported version number: %2d",
- priv->version);
+ dev_err(m_can_dev->dev, "Unsupported version number: %2d",
+ m_can_dev->version);
return -EINVAL;
}
- return 0;
-}
-
-static int m_can_open(struct net_device *dev)
-{
- struct m_can_priv *priv = netdev_priv(dev);
- int err;
-
- err = m_can_clk_start(priv);
- if (err)
- return err;
-
- /* open the can device */
- err = open_candev(dev);
- if (err) {
- netdev_err(dev, "failed to open can device\n");
- goto exit_disable_clks;
- }
-
- /* register interrupt handler */
- err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
- dev);
- if (err < 0) {
- netdev_err(dev, "failed to request interrupt\n");
- goto exit_irq_fail;
- }
-
- /* start the m_can controller */
- m_can_start(dev);
-
- can_led_event(dev, CAN_LED_EVENT_OPEN);
- napi_enable(&priv->napi);
- netif_start_queue(dev);
+ if (m_can_dev->ops->init)
+ m_can_dev->ops->init(m_can_dev);
return 0;
-
-exit_irq_fail:
- close_candev(dev);
-exit_disable_clks:
- m_can_clk_stop(priv);
- return err;
}
static void m_can_stop(struct net_device *dev)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
/* disable all interrupts */
- m_can_disable_all_interrupts(priv);
+ m_can_disable_all_interrupts(cdev);
/* set the state as STOPPED */
- priv->can.state = CAN_STATE_STOPPED;
+ cdev->can.state = CAN_STATE_STOPPED;
}
static int m_can_close(struct net_device *dev)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
netif_stop_queue(dev);
- napi_disable(&priv->napi);
+
+ if (!cdev->is_peripheral)
+ napi_disable(&cdev->napi);
+
m_can_stop(dev);
- m_can_clk_stop(priv);
+ m_can_clk_stop(cdev);
free_irq(dev->irq, dev);
+
+ if (cdev->is_peripheral) {
+ cdev->tx_skb = NULL;
+ destroy_workqueue(cdev->tx_wq);
+ cdev->tx_wq = NULL;
+ }
+
close_candev(dev);
can_led_event(dev, CAN_LED_EVENT_STOP);
@@ -1394,30 +1397,27 @@ static int m_can_close(struct net_device *dev)
static int m_can_next_echo_skb_occupied(struct net_device *dev, int putidx)
{
- struct m_can_priv *priv = netdev_priv(dev);
+ struct m_can_classdev *cdev = netdev_priv(dev);
/*get wrap around for loopback skb index */
- unsigned int wrap = priv->can.echo_skb_max;
+ unsigned int wrap = cdev->can.echo_skb_max;
int next_idx;
/* calculate next index */
next_idx = (++putidx >= wrap ? 0 : putidx);
/* check if occupied */
- return !!priv->can.echo_skb[next_idx];
+ return !!cdev->can.echo_skb[next_idx];
}
-static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
{
- struct m_can_priv *priv = netdev_priv(dev);
- struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ struct canfd_frame *cf = (struct canfd_frame *)cdev->tx_skb->data;
+ struct net_device *dev = cdev->net;
+ struct sk_buff *skb = cdev->tx_skb;
u32 id, cccr, fdflags;
int i;
int putidx;
- if (can_dropped_invalid_skb(dev, skb))
- return NETDEV_TX_OK;
-
/* Generate ID field for TX buffer Element */
/* Common to all supported M_CAN versions */
if (cf->can_id & CAN_EFF_FLAG) {
@@ -1430,23 +1430,23 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
if (cf->can_id & CAN_RTR_FLAG)
id |= TX_BUF_RTR;
- if (priv->version == 30) {
+ if (cdev->version == 30) {
netif_stop_queue(dev);
/* message ram configuration */
- m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id);
- m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC,
+ m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, id);
+ m_can_fifo_write(cdev, 0, M_CAN_FIFO_DLC,
can_len2dlc(cf->len) << 16);
for (i = 0; i < cf->len; i += 4)
- m_can_fifo_write(priv, 0,
+ m_can_fifo_write(cdev, 0,
M_CAN_FIFO_DATA(i / 4),
*(u32 *)(cf->data + i));
can_put_echo_skb(skb, dev, 0);
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
- cccr = m_can_read(priv, M_CAN_CCCR);
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
+ cccr = m_can_read(cdev, M_CAN_CCCR);
cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT);
if (can_is_canfd_skb(skb)) {
if (cf->flags & CANFD_BRS)
@@ -1458,28 +1458,35 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
} else {
cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT;
}
- m_can_write(priv, M_CAN_CCCR, cccr);
+ m_can_write(cdev, M_CAN_CCCR, cccr);
}
- m_can_write(priv, M_CAN_TXBTIE, 0x1);
- m_can_write(priv, M_CAN_TXBAR, 0x1);
+ m_can_write(cdev, M_CAN_TXBTIE, 0x1);
+ m_can_write(cdev, M_CAN_TXBAR, 0x1);
/* End of xmit function for version 3.0.x */
} else {
/* Transmit routine for version >= v3.1.x */
/* Check if FIFO full */
- if (m_can_tx_fifo_full(priv)) {
+ if (m_can_tx_fifo_full(cdev)) {
/* This shouldn't happen */
netif_stop_queue(dev);
netdev_warn(dev,
"TX queue active although FIFO is full.");
- return NETDEV_TX_BUSY;
+
+ if (cdev->is_peripheral) {
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ } else {
+ return NETDEV_TX_BUSY;
+ }
}
/* get put index for frame */
- putidx = ((m_can_read(priv, M_CAN_TXFQS) & TXFQS_TFQPI_MASK)
+ putidx = ((m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQPI_MASK)
>> TXFQS_TFQPI_SHIFT);
/* Write ID Field to FIFO Element */
- m_can_fifo_write(priv, putidx, M_CAN_FIFO_ID, id);
+ m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, id);
/* get CAN FD configuration of frame */
fdflags = 0;
@@ -1494,14 +1501,14 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
* it is used in TX interrupt for
* sending the correct echo frame
*/
- m_can_fifo_write(priv, putidx, M_CAN_FIFO_DLC,
+ m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DLC,
((putidx << TX_BUF_MM_SHIFT) &
TX_BUF_MM_MASK) |
(can_len2dlc(cf->len) << 16) |
fdflags | TX_BUF_EFC);
for (i = 0; i < cf->len; i += 4)
- m_can_fifo_write(priv, putidx, M_CAN_FIFO_DATA(i / 4),
+ m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DATA(i / 4),
*(u32 *)(cf->data + i));
/* Push loopback echo.
@@ -1510,17 +1517,123 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
can_put_echo_skb(skb, dev, putidx);
/* Enable TX FIFO element to start transfer */
- m_can_write(priv, M_CAN_TXBAR, (1 << putidx));
+ m_can_write(cdev, M_CAN_TXBAR, (1 << putidx));
/* stop network queue if fifo full */
- if (m_can_tx_fifo_full(priv) ||
- m_can_next_echo_skb_occupied(dev, putidx))
- netif_stop_queue(dev);
+ if (m_can_tx_fifo_full(cdev) ||
+ m_can_next_echo_skb_occupied(dev, putidx))
+ netif_stop_queue(dev);
}
return NETDEV_TX_OK;
}
+static void m_can_tx_work_queue(struct work_struct *ws)
+{
+ struct m_can_classdev *cdev = container_of(ws, struct m_can_classdev,
+ tx_work);
+
+ m_can_tx_handler(cdev);
+ cdev->tx_skb = NULL;
+}
+
+static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ if (cdev->is_peripheral) {
+ if (cdev->tx_skb) {
+ netdev_err(dev, "hard_xmit called while tx busy\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ if (cdev->can.state == CAN_STATE_BUS_OFF) {
+ m_can_clean(dev);
+ } else {
+ /* Need to stop the queue to avoid numerous requests
+ * from being sent. Suggested improvement is to create
+ * a queueing mechanism that will queue the skbs and
+ * process them in order.
+ */
+ cdev->tx_skb = skb;
+ netif_stop_queue(cdev->net);
+ queue_work(cdev->tx_wq, &cdev->tx_work);
+ }
+ } else {
+ cdev->tx_skb = skb;
+ return m_can_tx_handler(cdev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int m_can_open(struct net_device *dev)
+{
+ struct m_can_classdev *cdev = netdev_priv(dev);
+ int err;
+
+ err = m_can_clk_start(cdev);
+ if (err)
+ return err;
+
+ /* open the can device */
+ err = open_candev(dev);
+ if (err) {
+ netdev_err(dev, "failed to open can device\n");
+ goto exit_disable_clks;
+ }
+
+ /* register interrupt handler */
+ if (cdev->is_peripheral) {
+ cdev->tx_skb = NULL;
+ cdev->tx_wq = alloc_workqueue("mcan_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
+ if (!cdev->tx_wq) {
+ err = -ENOMEM;
+ goto out_wq_fail;
+ }
+
+ INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
+
+ err = request_threaded_irq(dev->irq, NULL, m_can_isr,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ dev->name, dev);
+ } else {
+ err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
+ dev);
+ }
+
+ if (err < 0) {
+ netdev_err(dev, "failed to request interrupt\n");
+ goto exit_irq_fail;
+ }
+
+ /* start the m_can controller */
+ m_can_start(dev);
+
+ can_led_event(dev, CAN_LED_EVENT_OPEN);
+
+ if (!cdev->is_peripheral)
+ napi_enable(&cdev->napi);
+
+ netif_start_queue(dev);
+
+ return 0;
+
+exit_irq_fail:
+ if (cdev->is_peripheral)
+ destroy_workqueue(cdev->tx_wq);
+out_wq_fail:
+ close_candev(dev);
+exit_disable_clks:
+ m_can_clk_stop(cdev);
+ return err;
+}
+
static const struct net_device_ops m_can_netdev_ops = {
.ndo_open = m_can_open,
.ndo_stop = m_can_close,
@@ -1536,114 +1649,91 @@ static int register_m_can_dev(struct net_device *dev)
return register_candev(dev);
}
-static void m_can_init_ram(struct m_can_priv *priv)
-{
- int end, i, start;
-
- /* initialize the entire Message RAM in use to avoid possible
- * ECC/parity checksum errors when reading an uninitialized buffer
- */
- start = priv->mcfg[MRAM_SIDF].off;
- end = priv->mcfg[MRAM_TXB].off +
- priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
- for (i = start; i < end; i += 4)
- writel(0x0, priv->mram_base + i);
-}
-
-static void m_can_of_parse_mram(struct m_can_priv *priv,
+static void m_can_of_parse_mram(struct m_can_classdev *cdev,
const u32 *mram_config_vals)
{
- priv->mcfg[MRAM_SIDF].off = mram_config_vals[0];
- priv->mcfg[MRAM_SIDF].num = mram_config_vals[1];
- priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off +
- priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE;
- priv->mcfg[MRAM_XIDF].num = mram_config_vals[2];
- priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off +
- priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE;
- priv->mcfg[MRAM_RXF0].num = mram_config_vals[3] &
+ cdev->mcfg[MRAM_SIDF].off = mram_config_vals[0];
+ cdev->mcfg[MRAM_SIDF].num = mram_config_vals[1];
+ cdev->mcfg[MRAM_XIDF].off = cdev->mcfg[MRAM_SIDF].off +
+ cdev->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE;
+ cdev->mcfg[MRAM_XIDF].num = mram_config_vals[2];
+ cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off +
+ cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE;
+ cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] &
(RXFC_FS_MASK >> RXFC_FS_SHIFT);
- priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off +
- priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE;
- priv->mcfg[MRAM_RXF1].num = mram_config_vals[4] &
+ cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off +
+ cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE;
+ cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] &
(RXFC_FS_MASK >> RXFC_FS_SHIFT);
- priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off +
- priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE;
- priv->mcfg[MRAM_RXB].num = mram_config_vals[5];
- priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off +
- priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE;
- priv->mcfg[MRAM_TXE].num = mram_config_vals[6];
- priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off +
- priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE;
- priv->mcfg[MRAM_TXB].num = mram_config_vals[7] &
+ cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off +
+ cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE;
+ cdev->mcfg[MRAM_RXB].num = mram_config_vals[5];
+ cdev->mcfg[MRAM_TXE].off = cdev->mcfg[MRAM_RXB].off +
+ cdev->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE;
+ cdev->mcfg[MRAM_TXE].num = mram_config_vals[6];
+ cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off +
+ cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE;
+ cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] &
(TXBC_NDTB_MASK >> TXBC_NDTB_SHIFT);
- dev_dbg(priv->device,
- "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
- priv->mram_base,
- priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num,
- priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num,
- priv->mcfg[MRAM_RXF0].off, priv->mcfg[MRAM_RXF0].num,
- priv->mcfg[MRAM_RXF1].off, priv->mcfg[MRAM_RXF1].num,
- priv->mcfg[MRAM_RXB].off, priv->mcfg[MRAM_RXB].num,
- priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num,
- priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num);
-
- m_can_init_ram(priv);
+ dev_dbg(cdev->dev,
+ "sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
+ cdev->mcfg[MRAM_SIDF].off, cdev->mcfg[MRAM_SIDF].num,
+ cdev->mcfg[MRAM_XIDF].off, cdev->mcfg[MRAM_XIDF].num,
+ cdev->mcfg[MRAM_RXF0].off, cdev->mcfg[MRAM_RXF0].num,
+ cdev->mcfg[MRAM_RXF1].off, cdev->mcfg[MRAM_RXF1].num,
+ cdev->mcfg[MRAM_RXB].off, cdev->mcfg[MRAM_RXB].num,
+ cdev->mcfg[MRAM_TXE].off, cdev->mcfg[MRAM_TXE].num,
+ cdev->mcfg[MRAM_TXB].off, cdev->mcfg[MRAM_TXB].num);
}
-static int m_can_plat_probe(struct platform_device *pdev)
+void m_can_init_ram(struct m_can_classdev *cdev)
{
- struct net_device *dev;
- struct m_can_priv *priv;
- struct resource *res;
- void __iomem *addr;
- void __iomem *mram_addr;
- struct clk *hclk, *cclk;
- int irq, ret;
- struct device_node *np;
- u32 mram_config_vals[MRAM_CFG_LEN];
- u32 tx_fifo_size;
-
- np = pdev->dev.of_node;
+ int end, i, start;
- hclk = devm_clk_get(&pdev->dev, "hclk");
- cclk = devm_clk_get(&pdev->dev, "cclk");
+ /* initialize the entire Message RAM in use to avoid possible
+ * ECC/parity checksum errors when reading an uninitialized buffer
+ */
+ start = cdev->mcfg[MRAM_SIDF].off;
+ end = cdev->mcfg[MRAM_TXB].off +
+ cdev->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
- if (IS_ERR(hclk) || IS_ERR(cclk)) {
- dev_err(&pdev->dev, "no clock found\n");
- ret = -ENODEV;
- goto failed_ret;
- }
+ for (i = start; i < end; i += 4)
+ m_can_fifo_write_no_off(cdev, i, 0x0);
+}
+EXPORT_SYMBOL_GPL(m_can_init_ram);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
- addr = devm_ioremap_resource(&pdev->dev, res);
- irq = platform_get_irq_byname(pdev, "int0");
+int m_can_class_get_clocks(struct m_can_classdev *m_can_dev)
+{
+ int ret = 0;
- if (IS_ERR(addr) || irq < 0) {
- ret = -EINVAL;
- goto failed_ret;
- }
+ m_can_dev->hclk = devm_clk_get(m_can_dev->dev, "hclk");
+ m_can_dev->cclk = devm_clk_get(m_can_dev->dev, "cclk");
- /* message ram could be shared */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
- if (!res) {
+ if (IS_ERR(m_can_dev->cclk)) {
+ dev_err(m_can_dev->dev, "no clock found\n");
ret = -ENODEV;
- goto failed_ret;
}
- mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!mram_addr) {
- ret = -ENOMEM;
- goto failed_ret;
- }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(m_can_class_get_clocks);
- /* get message ram configuration */
- ret = of_property_read_u32_array(np, "bosch,mram-cfg",
- mram_config_vals,
- sizeof(mram_config_vals) / 4);
+struct m_can_classdev *m_can_class_allocate_dev(struct device *dev)
+{
+ struct m_can_classdev *class_dev = NULL;
+ u32 mram_config_vals[MRAM_CFG_LEN];
+ struct net_device *net_dev;
+ u32 tx_fifo_size;
+ int ret;
+
+ ret = fwnode_property_read_u32_array(dev_fwnode(dev),
+ "bosch,mram-cfg",
+ mram_config_vals,
+ sizeof(mram_config_vals) / 4);
if (ret) {
- dev_err(&pdev->dev, "Could not get Message RAM configuration.");
- goto failed_ret;
+ dev_err(dev, "Could not get Message RAM configuration.");
+ goto out;
}
/* Get TX FIFO size
@@ -1652,101 +1742,110 @@ static int m_can_plat_probe(struct platform_device *pdev)
tx_fifo_size = mram_config_vals[7];
/* allocate the m_can device */
- dev = alloc_candev(sizeof(*priv), tx_fifo_size);
- if (!dev) {
- ret = -ENOMEM;
- goto failed_ret;
+ net_dev = alloc_candev(sizeof(*class_dev), tx_fifo_size);
+ if (!net_dev) {
+ dev_err(dev, "Failed to allocate CAN device");
+ goto out;
}
- priv = netdev_priv(dev);
- dev->irq = irq;
- priv->device = &pdev->dev;
- priv->hclk = hclk;
- priv->cclk = cclk;
- priv->can.clock.freq = clk_get_rate(cclk);
- priv->mram_base = mram_addr;
+ class_dev = netdev_priv(net_dev);
+ if (!class_dev) {
+ dev_err(dev, "Failed to init netdev cdevate");
+ goto out;
+ }
- platform_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
+ class_dev->net = net_dev;
+ class_dev->dev = dev;
+ SET_NETDEV_DEV(net_dev, dev);
- /* Enable clocks. Necessary to read Core Release in order to determine
- * M_CAN version
- */
- pm_runtime_enable(&pdev->dev);
- ret = m_can_clk_start(priv);
- if (ret)
- goto pm_runtime_fail;
+ m_can_of_parse_mram(class_dev, mram_config_vals);
+out:
+ return class_dev;
+}
+EXPORT_SYMBOL_GPL(m_can_class_allocate_dev);
+
+int m_can_class_register(struct m_can_classdev *m_can_dev)
+{
+ int ret;
- ret = m_can_dev_setup(pdev, dev, addr);
+ if (m_can_dev->pm_clock_support) {
+ pm_runtime_enable(m_can_dev->dev);
+ ret = m_can_clk_start(m_can_dev);
+ if (ret)
+ goto pm_runtime_fail;
+ }
+
+ ret = m_can_dev_setup(m_can_dev);
if (ret)
goto clk_disable;
- ret = register_m_can_dev(dev);
+ ret = register_m_can_dev(m_can_dev->net);
if (ret) {
- dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
- KBUILD_MODNAME, ret);
+ dev_err(m_can_dev->dev, "registering %s failed (err=%d)\n",
+ m_can_dev->net->name, ret);
goto clk_disable;
}
- m_can_of_parse_mram(priv, mram_config_vals);
-
- devm_can_led_init(dev);
+ devm_can_led_init(m_can_dev->net);
- of_can_transceiver(dev);
+ of_can_transceiver(m_can_dev->net);
- dev_info(&pdev->dev, "%s device registered (irq=%d, version=%d)\n",
- KBUILD_MODNAME, dev->irq, priv->version);
+ dev_info(m_can_dev->dev, "%s device registered (irq=%d, version=%d)\n",
+ KBUILD_MODNAME, m_can_dev->net->irq, m_can_dev->version);
/* Probe finished
* Stop clocks. They will be reactivated once the M_CAN device is opened
*/
clk_disable:
- m_can_clk_stop(priv);
+ m_can_clk_stop(m_can_dev);
pm_runtime_fail:
if (ret) {
- pm_runtime_disable(&pdev->dev);
- free_candev(dev);
+ if (m_can_dev->pm_clock_support)
+ pm_runtime_disable(m_can_dev->dev);
+ free_candev(m_can_dev->net);
}
-failed_ret:
+
return ret;
}
+EXPORT_SYMBOL_GPL(m_can_class_register);
-static __maybe_unused int m_can_suspend(struct device *dev)
+int m_can_class_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
- struct m_can_priv *priv = netdev_priv(ndev);
+ struct m_can_classdev *cdev = netdev_priv(ndev);
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
m_can_stop(ndev);
- m_can_clk_stop(priv);
+ m_can_clk_stop(cdev);
}
pinctrl_pm_select_sleep_state(dev);
- priv->can.state = CAN_STATE_SLEEPING;
+ cdev->can.state = CAN_STATE_SLEEPING;
return 0;
}
+EXPORT_SYMBOL_GPL(m_can_class_suspend);
-static __maybe_unused int m_can_resume(struct device *dev)
+int m_can_class_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
- struct m_can_priv *priv = netdev_priv(ndev);
+ struct m_can_classdev *cdev = netdev_priv(ndev);
pinctrl_pm_select_default_state(dev);
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ cdev->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) {
int ret;
- ret = m_can_clk_start(priv);
+ ret = m_can_clk_start(cdev);
if (ret)
return ret;
- m_can_init_ram(priv);
+ m_can_init_ram(cdev);
m_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);
@@ -1754,79 +1853,19 @@ static __maybe_unused int m_can_resume(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(m_can_class_resume);
-static void unregister_m_can_dev(struct net_device *dev)
+void m_can_class_unregister(struct m_can_classdev *m_can_dev)
{
- unregister_candev(dev);
-}
+ unregister_candev(m_can_dev->net);
-static int m_can_plat_remove(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
+ m_can_clk_stop(m_can_dev);
- unregister_m_can_dev(dev);
-
- pm_runtime_disable(&pdev->dev);
-
- platform_set_drvdata(pdev, NULL);
-
- free_candev(dev);
-
- return 0;
-}
-
-static int __maybe_unused m_can_runtime_suspend(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct m_can_priv *priv = netdev_priv(ndev);
-
- clk_disable_unprepare(priv->cclk);
- clk_disable_unprepare(priv->hclk);
-
- return 0;
-}
-
-static int __maybe_unused m_can_runtime_resume(struct device *dev)
-{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct m_can_priv *priv = netdev_priv(ndev);
- int err;
-
- err = clk_prepare_enable(priv->hclk);
- if (err)
- return err;
-
- err = clk_prepare_enable(priv->cclk);
- if (err)
- clk_disable_unprepare(priv->hclk);
-
- return err;
+ free_candev(m_can_dev->net);
}
-
-static const struct dev_pm_ops m_can_pmops = {
- SET_RUNTIME_PM_OPS(m_can_runtime_suspend,
- m_can_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume)
-};
-
-static const struct of_device_id m_can_of_table[] = {
- { .compatible = "bosch,m_can", .data = NULL },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, m_can_of_table);
-
-static struct platform_driver m_can_plat_driver = {
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = m_can_of_table,
- .pm = &m_can_pmops,
- },
- .probe = m_can_plat_probe,
- .remove = m_can_plat_remove,
-};
-
-module_platform_driver(m_can_plat_driver);
+EXPORT_SYMBOL_GPL(m_can_class_unregister);
MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
new file mode 100644
index 000000000000..49f42b50627a
--- /dev/null
+++ b/drivers/net/can/m_can/m_can.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* CAN bus driver for Bosch M_CAN controller
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef _CAN_M_CAN_H_
+#define _CAN_M_CAN_H_
+
+#include <linux/can/core.h>
+#include <linux/can/led.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/freezer.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/iopoll.h>
+#include <linux/can/dev.h>
+#include <linux/pinctrl/consumer.h>
+
+/* m_can lec values */
+enum m_can_lec_type {
+ LEC_NO_ERROR = 0,
+ LEC_STUFF_ERROR,
+ LEC_FORM_ERROR,
+ LEC_ACK_ERROR,
+ LEC_BIT1_ERROR,
+ LEC_BIT0_ERROR,
+ LEC_CRC_ERROR,
+ LEC_UNUSED,
+};
+
+enum m_can_mram_cfg {
+ MRAM_SIDF = 0,
+ MRAM_XIDF,
+ MRAM_RXF0,
+ MRAM_RXF1,
+ MRAM_RXB,
+ MRAM_TXE,
+ MRAM_TXB,
+ MRAM_CFG_NUM,
+};
+
+/* address offset and element number for each FIFO/Buffer in the Message RAM */
+struct mram_cfg {
+ u16 off;
+ u8 num;
+};
+
+struct m_can_classdev;
+struct m_can_ops {
+ /* Device specific call backs */
+ int (*clear_interrupts)(struct m_can_classdev *cdev);
+ u32 (*read_reg)(struct m_can_classdev *cdev, int reg);
+ int (*write_reg)(struct m_can_classdev *cdev, int reg, int val);
+ u32 (*read_fifo)(struct m_can_classdev *cdev, int addr_offset);
+ int (*write_fifo)(struct m_can_classdev *cdev, int addr_offset,
+ int val);
+ int (*init)(struct m_can_classdev *cdev);
+};
+
+struct m_can_classdev {
+ struct can_priv can;
+ struct napi_struct napi;
+ struct net_device *net;
+ struct device *dev;
+ struct clk *hclk;
+ struct clk *cclk;
+
+ struct workqueue_struct *tx_wq;
+ struct work_struct tx_work;
+ struct sk_buff *tx_skb;
+
+ struct can_bittiming_const *bit_timing;
+ struct can_bittiming_const *data_timing;
+
+ struct m_can_ops *ops;
+
+ void *device_data;
+
+ int version;
+ int freq;
+ u32 irqstatus;
+
+ int pm_clock_support;
+ int is_peripheral;
+
+ struct mram_cfg mcfg[MRAM_CFG_NUM];
+};
+
+struct m_can_classdev *m_can_class_allocate_dev(struct device *dev);
+int m_can_class_register(struct m_can_classdev *cdev);
+void m_can_class_unregister(struct m_can_classdev *cdev);
+int m_can_class_get_clocks(struct m_can_classdev *cdev);
+void m_can_init_ram(struct m_can_classdev *priv);
+void m_can_config_endisable(struct m_can_classdev *priv, bool enable);
+
+int m_can_class_suspend(struct device *dev);
+int m_can_class_resume(struct device *dev);
+#endif /* _CAN_M_H_ */
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
new file mode 100644
index 000000000000..6ac4c35f247a
--- /dev/null
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+// IOMapped CAN bus driver for Bosch M_CAN controller
+// Copyright (C) 2014 Freescale Semiconductor, Inc.
+// Dong Aisheng <b29396@freescale.com>
+//
+// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
+
+#include <linux/platform_device.h>
+
+#include "m_can.h"
+
+struct m_can_plat_priv {
+ void __iomem *base;
+ void __iomem *mram_base;
+};
+
+static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
+{
+ struct m_can_plat_priv *priv = cdev->device_data;
+
+ return readl(priv->base + reg);
+}
+
+static u32 iomap_read_fifo(struct m_can_classdev *cdev, int offset)
+{
+ struct m_can_plat_priv *priv = cdev->device_data;
+
+ return readl(priv->mram_base + offset);
+}
+
+static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val)
+{
+ struct m_can_plat_priv *priv = cdev->device_data;
+
+ writel(val, priv->base + reg);
+
+ return 0;
+}
+
+static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, int val)
+{
+ struct m_can_plat_priv *priv = cdev->device_data;
+
+ writel(val, priv->mram_base + offset);
+
+ return 0;
+}
+
+static struct m_can_ops m_can_plat_ops = {
+ .read_reg = iomap_read_reg,
+ .write_reg = iomap_write_reg,
+ .write_fifo = iomap_write_fifo,
+ .read_fifo = iomap_read_fifo,
+};
+
+static int m_can_plat_probe(struct platform_device *pdev)
+{
+ struct m_can_classdev *mcan_class;
+ struct m_can_plat_priv *priv;
+ struct resource *res;
+ void __iomem *addr;
+ void __iomem *mram_addr;
+ int irq, ret = 0;
+
+ mcan_class = m_can_class_allocate_dev(&pdev->dev);
+ if (!mcan_class)
+ return -ENOMEM;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mcan_class->device_data = priv;
+
+ m_can_class_get_clocks(mcan_class);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ irq = platform_get_irq_byname(pdev, "int0");
+ if (IS_ERR(addr) || irq < 0) {
+ ret = -EINVAL;
+ goto failed_ret;
+ }
+
+ /* message ram could be shared */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
+ if (!res) {
+ ret = -ENODEV;
+ goto failed_ret;
+ }
+
+ mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!mram_addr) {
+ ret = -ENOMEM;
+ goto failed_ret;
+ }
+
+ priv->base = addr;
+ priv->mram_base = mram_addr;
+
+ mcan_class->net->irq = irq;
+ mcan_class->pm_clock_support = 1;
+ mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk);
+ mcan_class->dev = &pdev->dev;
+
+ mcan_class->ops = &m_can_plat_ops;
+
+ mcan_class->is_peripheral = false;
+
+ platform_set_drvdata(pdev, mcan_class->dev);
+
+ m_can_init_ram(mcan_class);
+
+ ret = m_can_class_register(mcan_class);
+
+failed_ret:
+ return ret;
+}
+
+static __maybe_unused int m_can_suspend(struct device *dev)
+{
+ return m_can_class_suspend(dev);
+}
+
+static __maybe_unused int m_can_resume(struct device *dev)
+{
+ return m_can_class_resume(dev);
+}
+
+static int m_can_plat_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct m_can_classdev *mcan_class = netdev_priv(dev);
+
+ m_can_class_unregister(mcan_class);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int __maybe_unused m_can_runtime_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct m_can_classdev *mcan_class = netdev_priv(ndev);
+
+ m_can_class_suspend(dev);
+
+ clk_disable_unprepare(mcan_class->cclk);
+ clk_disable_unprepare(mcan_class->hclk);
+
+ return 0;
+}
+
+static int __maybe_unused m_can_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct m_can_classdev *mcan_class = netdev_priv(ndev);
+ int err;
+
+ err = clk_prepare_enable(mcan_class->hclk);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(mcan_class->cclk);
+ if (err)
+ clk_disable_unprepare(mcan_class->hclk);
+
+ m_can_class_resume(dev);
+
+ return err;
+}
+
+static const struct dev_pm_ops m_can_pmops = {
+ SET_RUNTIME_PM_OPS(m_can_runtime_suspend,
+ m_can_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume)
+};
+
+static const struct of_device_id m_can_of_table[] = {
+ { .compatible = "bosch,m_can", .data = NULL },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, m_can_of_table);
+
+static struct platform_driver m_can_plat_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = m_can_of_table,
+ .pm = &m_can_pmops,
+ },
+ .probe = m_can_plat_probe,
+ .remove = m_can_plat_remove,
+};
+
+module_platform_driver(m_can_plat_driver);
+
+MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("M_CAN driver for IO Mapped Bosch controllers");
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
new file mode 100644
index 000000000000..a697996d81b4
--- /dev/null
+++ b/drivers/net/can/m_can/tcan4x5x.c
@@ -0,0 +1,527 @@
+// SPDX-License-Identifier: GPL-2.0
+// SPI to CAN driver for the Texas Instruments TCAN4x5x
+// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
+
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
+
+#include "m_can.h"
+
+#define DEVICE_NAME "tcan4x5x"
+#define TCAN4X5X_EXT_CLK_DEF 40000000
+
+#define TCAN4X5X_DEV_ID0 0x00
+#define TCAN4X5X_DEV_ID1 0x04
+#define TCAN4X5X_REV 0x08
+#define TCAN4X5X_STATUS 0x0C
+#define TCAN4X5X_ERROR_STATUS 0x10
+#define TCAN4X5X_CONTROL 0x14
+
+#define TCAN4X5X_CONFIG 0x800
+#define TCAN4X5X_TS_PRESCALE 0x804
+#define TCAN4X5X_TEST_REG 0x808
+#define TCAN4X5X_INT_FLAGS 0x820
+#define TCAN4X5X_MCAN_INT_REG 0x824
+#define TCAN4X5X_INT_EN 0x830
+
+/* Interrupt bits */
+#define TCAN4X5X_CANBUSTERMOPEN_INT_EN BIT(30)
+#define TCAN4X5X_CANHCANL_INT_EN BIT(29)
+#define TCAN4X5X_CANHBAT_INT_EN BIT(28)
+#define TCAN4X5X_CANLGND_INT_EN BIT(27)
+#define TCAN4X5X_CANBUSOPEN_INT_EN BIT(26)
+#define TCAN4X5X_CANBUSGND_INT_EN BIT(25)
+#define TCAN4X5X_CANBUSBAT_INT_EN BIT(24)
+#define TCAN4X5X_UVSUP_INT_EN BIT(22)
+#define TCAN4X5X_UVIO_INT_EN BIT(21)
+#define TCAN4X5X_TSD_INT_EN BIT(19)
+#define TCAN4X5X_ECCERR_INT_EN BIT(16)
+#define TCAN4X5X_CANINT_INT_EN BIT(15)
+#define TCAN4X5X_LWU_INT_EN BIT(14)
+#define TCAN4X5X_CANSLNT_INT_EN BIT(10)
+#define TCAN4X5X_CANDOM_INT_EN BIT(8)
+#define TCAN4X5X_CANBUS_ERR_INT_EN BIT(5)
+#define TCAN4X5X_BUS_FAULT BIT(4)
+#define TCAN4X5X_MCAN_INT BIT(1)
+#define TCAN4X5X_ENABLE_TCAN_INT \
+ (TCAN4X5X_MCAN_INT | TCAN4X5X_BUS_FAULT | \
+ TCAN4X5X_CANBUS_ERR_INT_EN | TCAN4X5X_CANINT_INT_EN)
+
+/* MCAN Interrupt bits */
+#define TCAN4X5X_MCAN_IR_ARA BIT(29)
+#define TCAN4X5X_MCAN_IR_PED BIT(28)
+#define TCAN4X5X_MCAN_IR_PEA BIT(27)
+#define TCAN4X5X_MCAN_IR_WD BIT(26)
+#define TCAN4X5X_MCAN_IR_BO BIT(25)
+#define TCAN4X5X_MCAN_IR_EW BIT(24)
+#define TCAN4X5X_MCAN_IR_EP BIT(23)
+#define TCAN4X5X_MCAN_IR_ELO BIT(22)
+#define TCAN4X5X_MCAN_IR_BEU BIT(21)
+#define TCAN4X5X_MCAN_IR_BEC BIT(20)
+#define TCAN4X5X_MCAN_IR_DRX BIT(19)
+#define TCAN4X5X_MCAN_IR_TOO BIT(18)
+#define TCAN4X5X_MCAN_IR_MRAF BIT(17)
+#define TCAN4X5X_MCAN_IR_TSW BIT(16)
+#define TCAN4X5X_MCAN_IR_TEFL BIT(15)
+#define TCAN4X5X_MCAN_IR_TEFF BIT(14)
+#define TCAN4X5X_MCAN_IR_TEFW BIT(13)
+#define TCAN4X5X_MCAN_IR_TEFN BIT(12)
+#define TCAN4X5X_MCAN_IR_TFE BIT(11)
+#define TCAN4X5X_MCAN_IR_TCF BIT(10)
+#define TCAN4X5X_MCAN_IR_TC BIT(9)
+#define TCAN4X5X_MCAN_IR_HPM BIT(8)
+#define TCAN4X5X_MCAN_IR_RF1L BIT(7)
+#define TCAN4X5X_MCAN_IR_RF1F BIT(6)
+#define TCAN4X5X_MCAN_IR_RF1W BIT(5)
+#define TCAN4X5X_MCAN_IR_RF1N BIT(4)
+#define TCAN4X5X_MCAN_IR_RF0L BIT(3)
+#define TCAN4X5X_MCAN_IR_RF0F BIT(2)
+#define TCAN4X5X_MCAN_IR_RF0W BIT(1)
+#define TCAN4X5X_MCAN_IR_RF0N BIT(0)
+#define TCAN4X5X_ENABLE_MCAN_INT \
+ (TCAN4X5X_MCAN_IR_TC | TCAN4X5X_MCAN_IR_RF0N | \
+ TCAN4X5X_MCAN_IR_RF1N | TCAN4X5X_MCAN_IR_RF0F | \
+ TCAN4X5X_MCAN_IR_RF1F)
+
+#define TCAN4X5X_MRAM_START 0x8000
+#define TCAN4X5X_MCAN_OFFSET 0x1000
+#define TCAN4X5X_MAX_REGISTER 0x8fff
+
+#define TCAN4X5X_CLEAR_ALL_INT 0xffffffff
+#define TCAN4X5X_SET_ALL_INT 0xffffffff
+
+#define TCAN4X5X_WRITE_CMD (0x61 << 24)
+#define TCAN4X5X_READ_CMD (0x41 << 24)
+
+#define TCAN4X5X_MODE_SEL_MASK (BIT(7) | BIT(6))
+#define TCAN4X5X_MODE_SLEEP 0x00
+#define TCAN4X5X_MODE_STANDBY BIT(6)
+#define TCAN4X5X_MODE_NORMAL BIT(7)
+
+#define TCAN4X5X_SW_RESET BIT(2)
+
+#define TCAN4X5X_MCAN_CONFIGURED BIT(5)
+#define TCAN4X5X_WATCHDOG_EN BIT(3)
+#define TCAN4X5X_WD_60_MS_TIMER 0
+#define TCAN4X5X_WD_600_MS_TIMER BIT(28)
+#define TCAN4X5X_WD_3_S_TIMER BIT(29)
+#define TCAN4X5X_WD_6_S_TIMER (BIT(28) | BIT(29))
+
+struct tcan4x5x_priv {
+ struct regmap *regmap;
+ struct spi_device *spi;
+
+ struct m_can_classdev *mcan_dev;
+
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *interrupt_gpio;
+ struct gpio_desc *device_wake_gpio;
+ struct gpio_desc *device_state_gpio;
+ struct regulator *power;
+
+ /* Register based ip */
+ int mram_start;
+ int reg_offset;
+};
+
+static struct can_bittiming_const tcan4x5x_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 31,
+ .tseg2_min = 2,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 32,
+ .brp_inc = 1,
+};
+
+static struct can_bittiming_const tcan4x5x_data_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 32,
+ .brp_inc = 1,
+};
+
+static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
+{
+ int wake_state = 0;
+
+ if (priv->device_state_gpio)
+ wake_state = gpiod_get_value(priv->device_state_gpio);
+
+ if (priv->device_wake_gpio && wake_state) {
+ gpiod_set_value(priv->device_wake_gpio, 0);
+ usleep_range(5, 50);
+ gpiod_set_value(priv->device_wake_gpio, 1);
+ }
+}
+
+static int regmap_spi_gather_write(void *context, const void *reg,
+ size_t reg_len, const void *val,
+ size_t val_len)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_message m;
+ u32 addr;
+ struct spi_transfer t[2] = {
+ { .tx_buf = &addr, .len = reg_len, .cs_change = 0,},
+ { .tx_buf = val, .len = val_len, },
+ };
+
+ addr = TCAN4X5X_WRITE_CMD | (*((u16 *)reg) << 8) | val_len >> 2;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ return spi_sync(spi, &m);
+}
+
+static int tcan4x5x_regmap_write(void *context, const void *data, size_t count)
+{
+ u16 *reg = (u16 *)(data);
+ const u32 *val = data + 4;
+
+ return regmap_spi_gather_write(context, reg, 4, val, count - 4);
+}
+
+static int regmap_spi_async_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len,
+ struct regmap_async *a)
+{
+ return -ENOTSUPP;
+}
+
+static struct regmap_async *regmap_spi_async_alloc(void)
+{
+ return NULL;
+}
+
+static int tcan4x5x_regmap_read(void *context,
+ const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ u32 addr = TCAN4X5X_READ_CMD | (*((u16 *)reg) << 8) | val_size >> 2;
+
+ return spi_write_then_read(spi, &addr, reg_size, (u32 *)val, val_size);
+}
+
+static struct regmap_bus tcan4x5x_bus = {
+ .write = tcan4x5x_regmap_write,
+ .gather_write = regmap_spi_gather_write,
+ .async_write = regmap_spi_async_write,
+ .async_alloc = regmap_spi_async_alloc,
+ .read = tcan4x5x_regmap_read,
+ .read_flag_mask = 0x00,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static u32 tcan4x5x_read_reg(struct m_can_classdev *cdev, int reg)
+{
+ struct tcan4x5x_priv *priv = cdev->device_data;
+ u32 val;
+
+ tcan4x5x_check_wake(priv);
+
+ regmap_read(priv->regmap, priv->reg_offset + reg, &val);
+
+ return val;
+}
+
+static u32 tcan4x5x_read_fifo(struct m_can_classdev *cdev, int addr_offset)
+{
+ struct tcan4x5x_priv *priv = cdev->device_data;
+ u32 val;
+
+ tcan4x5x_check_wake(priv);
+
+ regmap_read(priv->regmap, priv->mram_start + addr_offset, &val);
+
+ return val;
+}
+
+static int tcan4x5x_write_reg(struct m_can_classdev *cdev, int reg, int val)
+{
+ struct tcan4x5x_priv *priv = cdev->device_data;
+
+ tcan4x5x_check_wake(priv);
+
+ return regmap_write(priv->regmap, priv->reg_offset + reg, val);
+}
+
+static int tcan4x5x_write_fifo(struct m_can_classdev *cdev,
+ int addr_offset, int val)
+{
+ struct tcan4x5x_priv *priv = cdev->device_data;
+
+ tcan4x5x_check_wake(priv);
+
+ return regmap_write(priv->regmap, priv->mram_start + addr_offset, val);
+}
+
+static int tcan4x5x_power_enable(struct regulator *reg, int enable)
+{
+ if (IS_ERR_OR_NULL(reg))
+ return 0;
+
+ if (enable)
+ return regulator_enable(reg);
+ else
+ return regulator_disable(reg);
+}
+
+static int tcan4x5x_write_tcan_reg(struct m_can_classdev *cdev,
+ int reg, int val)
+{
+ struct tcan4x5x_priv *priv = cdev->device_data;
+
+ tcan4x5x_check_wake(priv);
+
+ return regmap_write(priv->regmap, reg, val);
+}
+
+static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+ int ret;
+
+ tcan4x5x_check_wake(tcan4x5x);
+
+ ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_STATUS,
+ TCAN4X5X_CLEAR_ALL_INT);
+ if (ret)
+ return ret;
+
+ ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_MCAN_INT_REG,
+ TCAN4X5X_ENABLE_MCAN_INT);
+ if (ret)
+ return ret;
+
+ ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS,
+ TCAN4X5X_CLEAR_ALL_INT);
+ if (ret)
+ return ret;
+
+ ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS,
+ TCAN4X5X_CLEAR_ALL_INT);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int tcan4x5x_init(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+ int ret;
+
+ tcan4x5x_check_wake(tcan4x5x);
+
+ ret = tcan4x5x_clear_interrupts(cdev);
+ if (ret)
+ return ret;
+
+ ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_EN,
+ TCAN4X5X_ENABLE_TCAN_INT);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
+ TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL);
+ if (ret)
+ return ret;
+
+ /* Zero out the MCAN buffers */
+ m_can_init_ram(cdev);
+
+ return ret;
+}
+
+static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
+
+ tcan4x5x->interrupt_gpio = devm_gpiod_get(cdev->dev, "data-ready",
+ GPIOD_IN);
+ if (IS_ERR(tcan4x5x->interrupt_gpio)) {
+ dev_err(cdev->dev, "data-ready gpio not defined\n");
+ return -EINVAL;
+ }
+
+ tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(tcan4x5x->device_wake_gpio)) {
+ dev_err(cdev->dev, "device-wake gpio not defined\n");
+ return -EINVAL;
+ }
+
+ tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(tcan4x5x->reset_gpio))
+ tcan4x5x->reset_gpio = NULL;
+
+ tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
+ "device-state",
+ GPIOD_IN);
+ if (IS_ERR(tcan4x5x->device_state_gpio))
+ tcan4x5x->device_state_gpio = NULL;
+
+ cdev->net->irq = gpiod_to_irq(tcan4x5x->interrupt_gpio);
+
+ tcan4x5x->power = devm_regulator_get_optional(cdev->dev,
+ "vsup");
+ if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+
+static const struct regmap_config tcan4x5x_regmap = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .cache_type = REGCACHE_NONE,
+ .max_register = TCAN4X5X_MAX_REGISTER,
+};
+
+static struct m_can_ops tcan4x5x_ops = {
+ .init = tcan4x5x_init,
+ .read_reg = tcan4x5x_read_reg,
+ .write_reg = tcan4x5x_write_reg,
+ .write_fifo = tcan4x5x_write_fifo,
+ .read_fifo = tcan4x5x_read_fifo,
+ .clear_interrupts = tcan4x5x_clear_interrupts,
+};
+
+static int tcan4x5x_can_probe(struct spi_device *spi)
+{
+ struct tcan4x5x_priv *priv;
+ struct m_can_classdev *mcan_class;
+ int freq, ret;
+
+ mcan_class = m_can_class_allocate_dev(&spi->dev);
+ if (!mcan_class)
+ return -ENOMEM;
+
+ priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mcan_class->device_data = priv;
+
+ m_can_class_get_clocks(mcan_class);
+ if (IS_ERR(mcan_class->cclk)) {
+ dev_err(&spi->dev, "no CAN clock source defined\n");
+ freq = TCAN4X5X_EXT_CLK_DEF;
+ } else {
+ freq = clk_get_rate(mcan_class->cclk);
+ }
+
+ /* Sanity check */
+ if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF)
+ return -ERANGE;
+
+ priv->reg_offset = TCAN4X5X_MCAN_OFFSET;
+ priv->mram_start = TCAN4X5X_MRAM_START;
+ priv->spi = spi;
+ priv->mcan_dev = mcan_class;
+
+ mcan_class->pm_clock_support = 0;
+ mcan_class->can.clock.freq = freq;
+ mcan_class->dev = &spi->dev;
+ mcan_class->ops = &tcan4x5x_ops;
+ mcan_class->is_peripheral = true;
+ mcan_class->bit_timing = &tcan4x5x_bittiming_const;
+ mcan_class->data_timing = &tcan4x5x_data_bittiming_const;
+
+ spi_set_drvdata(spi, priv);
+
+ ret = tcan4x5x_parse_config(mcan_class);
+ if (ret)
+ goto out_clk;
+
+ /* Configure the SPI bus */
+ spi->bits_per_word = 32;
+ ret = spi_setup(spi);
+ if (ret)
+ goto out_clk;
+
+ priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
+ &spi->dev, &tcan4x5x_regmap);
+
+ tcan4x5x_power_enable(priv->power, 1);
+
+ ret = m_can_class_register(mcan_class);
+ if (ret)
+ goto out_power;
+
+ netdev_info(mcan_class->net, "TCAN4X5X successfully initialized.\n");
+ return 0;
+
+out_power:
+ tcan4x5x_power_enable(priv->power, 0);
+out_clk:
+ if (!IS_ERR(mcan_class->cclk)) {
+ clk_disable_unprepare(mcan_class->cclk);
+ clk_disable_unprepare(mcan_class->hclk);
+ }
+
+ dev_err(&spi->dev, "Probe failed, err=%d\n", ret);
+ return ret;
+}
+
+static int tcan4x5x_can_remove(struct spi_device *spi)
+{
+ struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
+
+ tcan4x5x_power_enable(priv->power, 0);
+
+ m_can_class_unregister(priv->mcan_dev);
+
+ return 0;
+}
+
+static const struct of_device_id tcan4x5x_of_match[] = {
+ { .compatible = "ti,tcan4x5x", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tcan4x5x_of_match);
+
+static const struct spi_device_id tcan4x5x_id_table[] = {
+ {
+ .name = "tcan4x5x",
+ .driver_data = 0,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, tcan4x5x_id_table);
+
+static struct spi_driver tcan4x5x_can_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = tcan4x5x_of_match,
+ .pm = NULL,
+ },
+ .id_table = tcan4x5x_id_table,
+ .probe = tcan4x5x_can_probe,
+ .remove = tcan4x5x_can_remove,
+};
+module_spi_driver(tcan4x5x_can_driver);
+
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments TCAN4x5x CAN driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 13e66297b65f..bf5adea9c0a3 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -15,11 +15,17 @@
#include <linux/can/led.h>
#include <linux/can/dev.h>
#include <linux/clk.h>
-#include <linux/can/platform/rcar_can.h>
#include <linux/of.h>
#define RCAR_CAN_DRV_NAME "rcar_can"
+/* Clock Select Register settings */
+enum CLKR {
+ CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */
+ CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */
+ CLKR_CLKEXT = 3, /* Externally input clock */
+};
+
#define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \
BIT(CLKR_CLKEXT))
@@ -736,7 +742,6 @@ static const char * const clock_names[] = {
static int rcar_can_probe(struct platform_device *pdev)
{
- struct rcar_can_platform_data *pdata;
struct rcar_can_priv *priv;
struct net_device *ndev;
struct resource *mem;
@@ -745,21 +750,11 @@ static int rcar_can_probe(struct platform_device *pdev)
int err = -ENODEV;
int irq;
- if (pdev->dev.of_node) {
- of_property_read_u32(pdev->dev.of_node,
- "renesas,can-clock-select", &clock_select);
- } else {
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "No platform data provided!\n");
- goto fail;
- }
- clock_select = pdata->clock_select;
- }
+ of_property_read_u32(pdev->dev.of_node, "renesas,can-clock-select",
+ &clock_select);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "No IRQ resource\n");
err = irq;
goto fail;
}
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index de34a4b82d4a..edaa1ca972c1 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1652,14 +1652,12 @@ static int rcar_canfd_probe(struct platform_device *pdev)
ch_irq = platform_get_irq(pdev, 0);
if (ch_irq < 0) {
- dev_err(&pdev->dev, "no Channel IRQ resource\n");
err = ch_irq;
goto fail_dev;
}
g_irq = platform_get_irq(pdev, 1);
if (g_irq < 0) {
- dev_err(&pdev->dev, "no Global IRQ resource\n");
err = g_irq;
goto fail_dev;
}
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 6b72da2f18a6..32d242dc0d9f 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -1,26 +1,18 @@
# SPDX-License-Identifier: GPL-2.0-only
+
menuconfig CAN_SJA1000
tristate "Philips/NXP SJA1000 devices"
depends on HAS_IOMEM
if CAN_SJA1000
-config CAN_SJA1000_ISA
- tristate "ISA Bus based legacy SJA1000 driver"
- ---help---
- This driver adds legacy support for SJA1000 chips connected to
- the ISA bus using I/O port, memory mapped or indirect access.
-
-config CAN_SJA1000_PLATFORM
- tristate "Generic Platform Bus based SJA1000 driver"
+config CAN_EMS_PCI
+ tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card"
+ depends on PCI
---help---
- This driver adds support for the SJA1000 chips connected to
- the "platform bus" (Linux abstraction for directly to the
- processor attached devices). Which can be found on various
- boards from Phytec (http://www.phytec.de) like the PCM027,
- PCM038. It also provides the OpenFirmware "platform bus" found
- on embedded systems with OpenFirmware bindings, e.g. if you
- have a PowerPC based system you may want to enable this option.
+ This driver is for the one, two or four channel CPC-PCI,
+ CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
+ (http://www.ems-wuensche.de).
config CAN_EMS_PCMCIA
tristate "EMS CPC-CARD Card"
@@ -29,23 +21,22 @@ config CAN_EMS_PCMCIA
This driver is for the one or two channel CPC-CARD cards from
EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
-config CAN_EMS_PCI
- tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card"
+config CAN_F81601
+ tristate "Fintek F81601 PCIE to 2 CAN Controller"
depends on PCI
- ---help---
- This driver is for the one, two or four channel CPC-PCI,
- CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
- (http://www.ems-wuensche.de).
+ help
+ This driver adds support for Fintek F81601 PCIE to 2 CAN
+ Controller. It had internal 24MHz clock source, but it can
+ be changed by manufacturer. Use modinfo to get usage for
+ parameters. Visit http://www.fintek.com.tw to get more
+ information.
-config CAN_PEAK_PCMCIA
- tristate "PEAK PCAN-PC Card"
- depends on PCMCIA
- depends on HAS_IOPORT_MAP
+config CAN_KVASER_PCI
+ tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
+ depends on PCI
---help---
- This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
- from PEAK-System (http://www.peak-system.com). To compile this
- driver as a module, choose M here: the module will be called
- peak_pcmcia.
+ This driver is for the PCIcanx and PCIcan cards (1, 2 or
+ 4 channel) from Kvaser (http://www.kvaser.com).
config CAN_PEAK_PCI
tristate "PEAK PCAN-PCI/PCIe/miniPCI Cards"
@@ -66,12 +57,15 @@ config CAN_PEAK_PCIEC
Technik. This will also automatically select I2C and I2C_ALGO
configuration options.
-config CAN_KVASER_PCI
- tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
- depends on PCI
+config CAN_PEAK_PCMCIA
+ tristate "PEAK PCAN-PC Card"
+ depends on PCMCIA
+ depends on HAS_IOPORT_MAP
---help---
- This driver is for the PCIcanx and PCIcan cards (1, 2 or
- 4 channel) from Kvaser (http://www.kvaser.com).
+ This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
+ from PEAK-System (http://www.peak-system.com). To compile this
+ driver as a module, choose M here: the module will be called
+ peak_pcmcia.
config CAN_PLX_PCI
tristate "PLX90xx PCI-bridge based Cards"
@@ -91,6 +85,23 @@ config CAN_PLX_PCI
- Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card (http://www.connecttech.com)
- ASEM CAN raw - 2 isolated CAN channels (www.asem.it)
+config CAN_SJA1000_ISA
+ tristate "ISA Bus based legacy SJA1000 driver"
+ ---help---
+ This driver adds legacy support for SJA1000 chips connected to
+ the ISA bus using I/O port, memory mapped or indirect access.
+
+config CAN_SJA1000_PLATFORM
+ tristate "Generic Platform Bus based SJA1000 driver"
+ ---help---
+ This driver adds support for the SJA1000 chips connected to
+ the "platform bus" (Linux abstraction for directly to the
+ processor attached devices). Which can be found on various
+ boards from Phytec (http://www.phytec.de) like the PCM027,
+ PCM038. It also provides the OpenFirmware "platform bus" found
+ on embedded systems with OpenFirmware bindings, e.g. if you
+ have a PowerPC based system you may want to enable this option.
+
config CAN_TSCAN1
tristate "TS-CAN1 PC104 boards"
depends on ISA
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index 9253aaf9e739..500ce1dddaec 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -3,13 +3,14 @@
# Makefile for the SJA1000 CAN controller drivers.
#
-obj-$(CONFIG_CAN_SJA1000) += sja1000.o
-obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o
-obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
-obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
+obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o
+obj-$(CONFIG_CAN_F81601) += f81601.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
-obj-$(CONFIG_CAN_PEAK_PCMCIA) += peak_pcmcia.o
obj-$(CONFIG_CAN_PEAK_PCI) += peak_pci.o
+obj-$(CONFIG_CAN_PEAK_PCMCIA) += peak_pcmcia.o
obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
+obj-$(CONFIG_CAN_SJA1000) += sja1000.o
+obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o
+obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
diff --git a/drivers/net/can/sja1000/f81601.c b/drivers/net/can/sja1000/f81601.c
new file mode 100644
index 000000000000..8f25e95814ef
--- /dev/null
+++ b/drivers/net/can/sja1000/f81601.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Fintek F81601 PCIE to 2 CAN controller driver
+ *
+ * Copyright (C) 2019 Peter Hong <peter_hong@fintek.com.tw>
+ * Copyright (C) 2019 Linux Foundation
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/can/dev.h>
+#include <linux/io.h>
+
+#include "sja1000.h"
+
+#define F81601_PCI_MAX_CHAN 2
+
+#define F81601_DECODE_REG 0x209
+#define F81601_IO_MODE BIT(7)
+#define F81601_MEM_MODE BIT(6)
+#define F81601_CFG_MODE BIT(5)
+#define F81601_CAN2_INTERNAL_CLK BIT(3)
+#define F81601_CAN1_INTERNAL_CLK BIT(2)
+#define F81601_CAN2_EN BIT(1)
+#define F81601_CAN1_EN BIT(0)
+
+#define F81601_TRAP_REG 0x20a
+#define F81601_CAN2_HAS_EN BIT(4)
+
+struct f81601_pci_card {
+ void __iomem *addr;
+ spinlock_t lock; /* use this spin lock only for write access */
+ struct pci_dev *dev;
+ struct net_device *net_dev[F81601_PCI_MAX_CHAN];
+};
+
+static const struct pci_device_id f81601_pci_tbl[] = {
+ { PCI_DEVICE(0x1c29, 0x1703) },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(pci, f81601_pci_tbl);
+
+static bool internal_clk = true;
+module_param(internal_clk, bool, 0444);
+MODULE_PARM_DESC(internal_clk, "Use internal clock, default true (24MHz)");
+
+static unsigned int external_clk;
+module_param(external_clk, uint, 0444);
+MODULE_PARM_DESC(external_clk, "External clock when internal_clk disabled");
+
+static u8 f81601_pci_read_reg(const struct sja1000_priv *priv, int port)
+{
+ return readb(priv->reg_base + port);
+}
+
+static void f81601_pci_write_reg(const struct sja1000_priv *priv, int port,
+ u8 val)
+{
+ struct f81601_pci_card *card = priv->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ writeb(val, priv->reg_base + port);
+ readb(priv->reg_base);
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
+static void f81601_pci_remove(struct pci_dev *pdev)
+{
+ struct f81601_pci_card *card = pci_get_drvdata(pdev);
+ struct net_device *dev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(card->net_dev); i++) {
+ dev = card->net_dev[i];
+ if (!dev)
+ continue;
+
+ dev_info(&pdev->dev, "%s: Removing %s\n", __func__, dev->name);
+
+ unregister_sja1000dev(dev);
+ free_sja1000dev(dev);
+ }
+}
+
+/* Probe F81601 based device for the SJA1000 chips and register each
+ * available CAN channel to SJA1000 Socket-CAN subsystem.
+ */
+static int f81601_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct sja1000_priv *priv;
+ struct net_device *dev;
+ struct f81601_pci_card *card;
+ int err, i, count;
+ u8 tmp;
+
+ if (pcim_enable_device(pdev) < 0) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return -ENODEV;
+ }
+
+ dev_info(&pdev->dev, "Detected card at slot #%i\n",
+ PCI_SLOT(pdev->devfn));
+
+ card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL);
+ if (!card)
+ return -ENOMEM;
+
+ card->dev = pdev;
+ spin_lock_init(&card->lock);
+
+ pci_set_drvdata(pdev, card);
+
+ tmp = F81601_IO_MODE | F81601_MEM_MODE | F81601_CFG_MODE |
+ F81601_CAN2_EN | F81601_CAN1_EN;
+
+ if (internal_clk) {
+ tmp |= F81601_CAN2_INTERNAL_CLK | F81601_CAN1_INTERNAL_CLK;
+
+ dev_info(&pdev->dev,
+ "F81601 running with internal clock: 24Mhz\n");
+ } else {
+ dev_info(&pdev->dev,
+ "F81601 running with external clock: %dMhz\n",
+ external_clk / 1000000);
+ }
+
+ pci_write_config_byte(pdev, F81601_DECODE_REG, tmp);
+
+ card->addr = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
+
+ if (!card->addr) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "%s: Failed to remap BAR\n", __func__);
+ goto failure_cleanup;
+ }
+
+ /* read CAN2_HW_EN strap pin to detect how many CANBUS do we have */
+ count = ARRAY_SIZE(card->net_dev);
+ pci_read_config_byte(pdev, F81601_TRAP_REG, &tmp);
+ if (!(tmp & F81601_CAN2_HAS_EN))
+ count = 1;
+
+ for (i = 0; i < count; i++) {
+ dev = alloc_sja1000dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto failure_cleanup;
+ }
+
+ priv = netdev_priv(dev);
+ priv->priv = card;
+ priv->irq_flags = IRQF_SHARED;
+ priv->reg_base = card->addr + 0x80 * i;
+ priv->read_reg = f81601_pci_read_reg;
+ priv->write_reg = f81601_pci_write_reg;
+
+ if (internal_clk)
+ priv->can.clock.freq = 24000000 / 2;
+ else
+ priv->can.clock.freq = external_clk / 2;
+
+ priv->ocr = OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL;
+ priv->cdr = CDR_CBP;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev->dev_id = i;
+ dev->irq = pdev->irq;
+
+ /* Register SJA1000 device */
+ err = register_sja1000dev(dev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "%s: Registering device failed: %x\n", __func__,
+ err);
+ free_sja1000dev(dev);
+ goto failure_cleanup;
+ }
+
+ card->net_dev[i] = dev;
+ dev_info(&pdev->dev, "Channel #%d, %s at 0x%p, irq %d\n", i,
+ dev->name, priv->reg_base, dev->irq);
+ }
+
+ return 0;
+
+ failure_cleanup:
+ dev_err(&pdev->dev, "%s: failed: %d. Cleaning Up.\n", __func__, err);
+ f81601_pci_remove(pdev);
+
+ return err;
+}
+
+static struct pci_driver f81601_pci_driver = {
+ .name = "f81601",
+ .id_table = f81601_pci_tbl,
+ .probe = f81601_pci_probe,
+ .remove = f81601_pci_remove,
+};
+
+MODULE_DESCRIPTION("Fintek F81601 PCIE to 2 CANBUS adaptor driver");
+MODULE_AUTHOR("Peter Hong <peter_hong@fintek.com.tw>");
+MODULE_LICENSE("GPL v2");
+
+module_pci_driver(f81601_pci_driver);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 68366d57916c..8c0244f51059 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -417,7 +417,7 @@ static void peak_pciec_write_reg(const struct sja1000_priv *priv,
peak_pci_write_reg(priv, port, val);
}
-static struct i2c_algo_bit_data peak_pciec_i2c_bit_ops = {
+static const struct i2c_algo_bit_data peak_pciec_i2c_bit_ops = {
.setsda = pita_setsda,
.setscl = pita_setscl,
.getsda = pita_getsda,
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 03a711c3221b..73d48c3b8ded 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -21,7 +21,6 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/dma-mapping.h>
#include <linux/freezer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -126,10 +125,6 @@
#define DEVICE_NAME "hi3110"
-static int hi3110_enable_dma = 1; /* Enable SPI DMA. Default: 1 (On) */
-module_param(hi3110_enable_dma, int, 0444);
-MODULE_PARM_DESC(hi3110_enable_dma, "Enable SPI DMA. Default: 1 (On)");
-
static const struct can_bittiming_const hi3110_bittiming_const = {
.name = DEVICE_NAME,
.tseg1_min = 2,
@@ -156,8 +151,6 @@ struct hi3110_priv {
u8 *spi_tx_buf;
u8 *spi_rx_buf;
- dma_addr_t spi_tx_dma;
- dma_addr_t spi_rx_dma;
struct sk_buff *tx_skb;
int tx_len;
@@ -184,8 +177,7 @@ static void hi3110_clean(struct net_device *net)
if (priv->tx_skb || priv->tx_len)
net->stats.tx_errors++;
- if (priv->tx_skb)
- dev_kfree_skb(priv->tx_skb);
+ dev_kfree_skb(priv->tx_skb);
if (priv->tx_len)
can_free_echo_skb(priv->net, 0);
priv->tx_skb = NULL;
@@ -217,13 +209,6 @@ static int hi3110_spi_trans(struct spi_device *spi, int len)
int ret;
spi_message_init(&m);
-
- if (hi3110_enable_dma) {
- t.tx_dma = priv->spi_tx_dma;
- t.rx_dma = priv->spi_rx_dma;
- m.is_dma_mapped = 1;
- }
-
spi_message_add_tail(&t, &m);
ret = spi_sync(spi, &m);
@@ -915,43 +900,18 @@ static int hi3110_can_probe(struct spi_device *spi)
priv->spi = spi;
mutex_init(&priv->hi3110_lock);
- /* If requested, allocate DMA buffers */
- if (hi3110_enable_dma) {
- spi->dev.coherent_dma_mask = ~0;
-
- /* Minimum coherent DMA allocation is PAGE_SIZE, so allocate
- * that much and share it between Tx and Rx DMA buffers.
- */
- priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev,
- PAGE_SIZE,
- &priv->spi_tx_dma,
- GFP_DMA);
-
- if (priv->spi_tx_buf) {
- priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
- priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
- (PAGE_SIZE / 2));
- } else {
- /* Fall back to non-DMA */
- hi3110_enable_dma = 0;
- }
+ priv->spi_tx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN,
+ GFP_KERNEL);
+ if (!priv->spi_tx_buf) {
+ ret = -ENOMEM;
+ goto error_probe;
}
+ priv->spi_rx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN,
+ GFP_KERNEL);
- /* Allocate non-DMA buffers */
- if (!hi3110_enable_dma) {
- priv->spi_tx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN,
- GFP_KERNEL);
- if (!priv->spi_tx_buf) {
- ret = -ENOMEM;
- goto error_probe;
- }
- priv->spi_rx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN,
- GFP_KERNEL);
-
- if (!priv->spi_rx_buf) {
- ret = -ENOMEM;
- goto error_probe;
- }
+ if (!priv->spi_rx_buf) {
+ ret = -ENOMEM;
+ goto error_probe;
}
SET_NETDEV_DEV(net, &spi->dev);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 12358f06d194..58992fd61cb9 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * CAN bus driver for Microchip 251x/25625 CAN Controller with SPI Interface
+/* CAN bus driver for Microchip 251x/25625 CAN Controller with SPI Interface
*
* MCP2510 support and bug fixes by Christian Pellegrin
* <chripell@evolware.org>
@@ -48,7 +47,6 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/dma-mapping.h>
#include <linux/freezer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -75,7 +73,6 @@
#define RTS_TXB2 0x04
#define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07))
-
/* MPC251x registers */
#define CANSTAT 0x0e
#define CANCTRL 0x0f
@@ -191,8 +188,7 @@
#define SET_BYTE(val, byte) \
(((val) & 0xff) << ((byte) * 8))
-/*
- * Buffer size required for the largest SPI transfer (i.e., reading a
+/* Buffer size required for the largest SPI transfer (i.e., reading a
* frame)
*/
#define CAN_FRAME_MAX_DATA_LEN 8
@@ -205,10 +201,6 @@
#define DEVICE_NAME "mcp251x"
-static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
-module_param(mcp251x_enable_dma, int, 0444);
-MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)");
-
static const struct can_bittiming_const mcp251x_bittiming_const = {
.name = DEVICE_NAME,
.tseg1_min = 3,
@@ -237,8 +229,6 @@ struct mcp251x_priv {
u8 *spi_tx_buf;
u8 *spi_rx_buf;
- dma_addr_t spi_tx_dma;
- dma_addr_t spi_rx_dma;
struct sk_buff *tx_skb;
int tx_len;
@@ -274,16 +264,14 @@ static void mcp251x_clean(struct net_device *net)
if (priv->tx_skb || priv->tx_len)
net->stats.tx_errors++;
- if (priv->tx_skb)
- dev_kfree_skb(priv->tx_skb);
+ dev_kfree_skb(priv->tx_skb);
if (priv->tx_len)
can_free_echo_skb(priv->net, 0);
priv->tx_skb = NULL;
priv->tx_len = 0;
}
-/*
- * Note about handling of error return of mcp251x_spi_trans: accessing
+/* Note about handling of error return of mcp251x_spi_trans: accessing
* registers via SPI is not really different conceptually than using
* normal I/O assembler instructions, although it's much more
* complicated from a practical POV. So it's not advisable to always
@@ -308,13 +296,6 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len)
int ret;
spi_message_init(&m);
-
- if (mcp251x_enable_dma) {
- t.tx_dma = priv->spi_tx_dma;
- t.rx_dma = priv->spi_rx_dma;
- m.is_dma_mapped = 1;
- }
-
spi_message_add_tail(&t, &m);
ret = spi_sync(spi, &m);
@@ -323,7 +304,7 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len)
return ret;
}
-static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
+static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
u8 val = 0;
@@ -337,8 +318,7 @@ static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
return val;
}
-static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg,
- uint8_t *v1, uint8_t *v2)
+static void mcp251x_read_2regs(struct spi_device *spi, u8 reg, u8 *v1, u8 *v2)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -351,7 +331,7 @@ static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg,
*v2 = priv->spi_rx_buf[3];
}
-static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
+static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -363,7 +343,7 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
}
static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
- u8 mask, uint8_t val)
+ u8 mask, u8 val)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -565,8 +545,7 @@ static int mcp251x_set_normal_mode(struct spi_device *spi)
while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) {
schedule();
if (time_after(jiffies, timeout)) {
- dev_err(&spi->dev, "MCP251x didn't"
- " enter in normal mode\n");
+ dev_err(&spi->dev, "MCP251x didn't enter in normal mode\n");
return -EBUSY;
}
}
@@ -612,7 +591,7 @@ static int mcp251x_setup(struct net_device *net, struct spi_device *spi)
static int mcp251x_hw_reset(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
- u8 reg;
+ unsigned long timeout;
int ret;
/* Wait for oscillator startup timer after power up */
@@ -626,10 +605,19 @@ static int mcp251x_hw_reset(struct spi_device *spi)
/* Wait for oscillator startup timer after reset */
mdelay(MCP251X_OST_DELAY_MS);
- reg = mcp251x_read_reg(spi, CANSTAT);
- if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
- return -ENODEV;
-
+ /* Wait for reset to finish */
+ timeout = jiffies + HZ;
+ while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
+ CANCTRL_REQOP_CONF) {
+ usleep_range(MCP251X_OST_DELAY_MS * 1000,
+ MCP251X_OST_DELAY_MS * 1000 * 2);
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(&spi->dev,
+ "MCP251x didn't enter in conf mode after reset\n");
+ return -EBUSY;
+ }
+ }
return 0;
}
@@ -799,7 +787,8 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
* (The MCP2515/25625 does this automatically.)
*/
if (mcp251x_is_2510(spi))
- mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00);
+ mcp251x_write_bits(spi, CANINTF,
+ CANINTF_RX0IF, 0x00);
}
/* receive buffer 1 */
@@ -900,7 +889,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
}
netif_wake_queue(net);
}
-
}
mutex_unlock(&priv->mcp_lock);
return IRQ_HANDLED;
@@ -910,7 +898,7 @@ static int mcp251x_open(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING;
+ unsigned long flags = 0;
int ret;
ret = open_candev(net);
@@ -926,8 +914,12 @@ static int mcp251x_open(struct net_device *net)
priv->tx_skb = NULL;
priv->tx_len = 0;
+ if (!spi->dev.of_node)
+ flags = IRQF_TRIGGER_FALLING;
+
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
- flags | IRQF_ONESHOT, DEVICE_NAME, priv);
+ flags | IRQF_ONESHOT, dev_name(&spi->dev),
+ priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
goto out_close;
@@ -1090,43 +1082,18 @@ static int mcp251x_can_probe(struct spi_device *spi)
priv->spi = spi;
mutex_init(&priv->mcp_lock);
- /* If requested, allocate DMA buffers */
- if (mcp251x_enable_dma) {
- spi->dev.coherent_dma_mask = ~0;
-
- /*
- * Minimum coherent DMA allocation is PAGE_SIZE, so allocate
- * that much and share it between Tx and Rx DMA buffers.
- */
- priv->spi_tx_buf = dmam_alloc_coherent(&spi->dev,
- PAGE_SIZE,
- &priv->spi_tx_dma,
- GFP_DMA);
-
- if (priv->spi_tx_buf) {
- priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
- priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
- (PAGE_SIZE / 2));
- } else {
- /* Fall back to non-DMA */
- mcp251x_enable_dma = 0;
- }
+ priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+ GFP_KERNEL);
+ if (!priv->spi_tx_buf) {
+ ret = -ENOMEM;
+ goto error_probe;
}
- /* Allocate non-DMA buffers */
- if (!mcp251x_enable_dma) {
- priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
- GFP_KERNEL);
- if (!priv->spi_tx_buf) {
- ret = -ENOMEM;
- goto error_probe;
- }
- priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
- GFP_KERNEL);
- if (!priv->spi_rx_buf) {
- ret = -ENOMEM;
- goto error_probe;
- }
+ priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN,
+ GFP_KERNEL);
+ if (!priv->spi_rx_buf) {
+ ret = -ENOMEM;
+ goto error_probe;
}
SET_NETDEV_DEV(net, &spi->dev);
@@ -1135,7 +1102,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
ret = mcp251x_hw_probe(spi);
if (ret) {
if (ret == -ENODEV)
- dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n", priv->model);
+ dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n",
+ priv->model);
goto error_probe;
}
@@ -1189,8 +1157,7 @@ static int __maybe_unused mcp251x_can_suspend(struct device *dev)
priv->force_quit = 1;
disable_irq(spi->irq);
- /*
- * Note: at this point neither IST nor workqueues are running.
+ /* Note: at this point neither IST nor workqueues are running.
* open/stop cannot be called anyway so locking is not needed
*/
if (netif_running(net)) {
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 093fc9a529f0..f4cd88196404 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -787,7 +787,6 @@ static int sun4ican_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "could not get a valid irq\n");
err = -ENODEV;
goto exit;
}
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index db6ea936dc3f..f8b19eef5d26 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -5,6 +5,7 @@
* specs for the same is available at <http://www.ti.com>
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2019 Jeroen Hofstee <jhofstee@victronenergy.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -34,6 +35,7 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include <linux/can/led.h>
+#include <linux/can/rx-offload.h>
#define DRV_NAME "ti_hecc"
#define HECC_MODULE_VERSION "0.7"
@@ -44,8 +46,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
#define HECC_MAX_MAILBOXES 32 /* hardware mailboxes - do not change */
#define MAX_TX_PRIO 0x3F /* hardware value - do not change */
-/*
- * Important Note: TX mailbox configuration
+/* Important Note: TX mailbox configuration
* TX mailboxes should be restricted to the number of SKB buffers to avoid
* maintaining SKB buffers separately. TX mailboxes should be a power of 2
* for the mailbox logic to work. Top mailbox numbers are reserved for RX
@@ -63,29 +64,15 @@ MODULE_VERSION(HECC_MODULE_VERSION);
#define HECC_TX_PRIO_MASK (MAX_TX_PRIO << HECC_MB_TX_SHIFT)
#define HECC_TX_MB_MASK (HECC_MAX_TX_MBOX - 1)
#define HECC_TX_MASK ((HECC_MAX_TX_MBOX - 1) | HECC_TX_PRIO_MASK)
-#define HECC_TX_MBOX_MASK (~(BIT(HECC_MAX_TX_MBOX) - 1))
-#define HECC_DEF_NAPI_WEIGHT HECC_MAX_RX_MBOX
-/*
- * Important Note: RX mailbox configuration
- * RX mailboxes are further logically split into two - main and buffer
- * mailboxes. The goal is to get all packets into main mailboxes as
- * driven by mailbox number and receive priority (higher to lower) and
- * buffer mailboxes are used to receive pkts while main mailboxes are being
- * processed. This ensures in-order packet reception.
+/* RX mailbox configuration
*
- * Here are the recommended values for buffer mailbox. Note that RX mailboxes
- * start after TX mailboxes:
- *
- * HECC_MAX_RX_MBOX HECC_RX_BUFFER_MBOX No of buffer mailboxes
- * 28 12 8
- * 16 20 4
+ * The remaining mailboxes are used for reception and are delivered
+ * based on their timestamp, to avoid a hardware race when CANME is
+ * changed while CAN-bus traffic is being received.
*/
-
#define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
-#define HECC_RX_BUFFER_MBOX 12 /* as per table above */
#define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1)
-#define HECC_RX_HIGH_MBOX_MASK (~(BIT(HECC_RX_BUFFER_MBOX) - 1))
/* TI HECC module registers */
#define HECC_CANME 0x0 /* Mailbox enable */
@@ -117,6 +104,9 @@ MODULE_VERSION(HECC_MODULE_VERSION);
#define HECC_CANTIOCE 0x68 /* SCC only:Enhanced TX I/O control */
#define HECC_CANRIOCE 0x6C /* SCC only:Enhanced RX I/O control */
+/* TI HECC RAM registers */
+#define HECC_CANMOTS 0x80 /* Message object time stamp */
+
/* Mailbox registers */
#define HECC_CANMID 0x0
#define HECC_CANMCF 0x4
@@ -193,7 +183,7 @@ static const struct can_bittiming_const ti_hecc_bittiming_const = {
struct ti_hecc_priv {
struct can_priv can; /* MUST be first member/field */
- struct napi_struct napi;
+ struct can_rx_offload offload;
struct net_device *ndev;
struct clk *clk;
void __iomem *base;
@@ -203,7 +193,6 @@ struct ti_hecc_priv {
spinlock_t mbx_lock; /* CANME register needs protection */
u32 tx_head;
u32 tx_tail;
- u32 rx_next;
struct regulator *reg_xceiver;
};
@@ -227,8 +216,13 @@ static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val)
__raw_writel(val, priv->hecc_ram + mbxno * 4);
}
+static inline u32 hecc_read_stamp(struct ti_hecc_priv *priv, u32 mbxno)
+{
+ return __raw_readl(priv->hecc_ram + HECC_CANMOTS + mbxno * 4);
+}
+
static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno,
- u32 reg, u32 val)
+ u32 reg, u32 val)
{
__raw_writel(val, priv->mbx + mbxno * 0x10 + reg);
}
@@ -249,13 +243,13 @@ static inline u32 hecc_read(struct ti_hecc_priv *priv, int reg)
}
static inline void hecc_set_bit(struct ti_hecc_priv *priv, int reg,
- u32 bit_mask)
+ u32 bit_mask)
{
hecc_write(priv, reg, hecc_read(priv, reg) | bit_mask);
}
static inline void hecc_clear_bit(struct ti_hecc_priv *priv, int reg,
- u32 bit_mask)
+ u32 bit_mask)
{
hecc_write(priv, reg, hecc_read(priv, reg) & ~bit_mask);
}
@@ -277,8 +271,8 @@ static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
if (bit_timing->brp > 4)
can_btc |= HECC_CANBTC_SAM;
else
- netdev_warn(priv->ndev, "WARN: Triple"
- "sampling not set due to h/w limitations");
+ netdev_warn(priv->ndev,
+ "WARN: Triple sampling not set due to h/w limitations");
}
can_btc |= ((bit_timing->sjw - 1) & 0x3) << 8;
can_btc |= ((bit_timing->brp - 1) & 0xFF) << 16;
@@ -314,8 +308,7 @@ static void ti_hecc_reset(struct net_device *ndev)
/* Set change control request and wait till enabled */
hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
- /*
- * INFO: It has been observed that at times CCE bit may not be
+ /* INFO: It has been observed that at times CCE bit may not be
* set and hw seems to be ok even if this bit is not set so
* timing out with a timing of 1ms to respect the specs
*/
@@ -325,8 +318,7 @@ static void ti_hecc_reset(struct net_device *ndev)
udelay(10);
}
- /*
- * Note: On HECC, BTC can be programmed only in initialization mode, so
+ /* Note: On HECC, BTC can be programmed only in initialization mode, so
* it is expected that the can bittiming parameters are set via ip
* utility before the device is opened
*/
@@ -335,13 +327,11 @@ static void ti_hecc_reset(struct net_device *ndev)
/* Clear CCR (and CANMC register) and wait for CCE = 0 enable */
hecc_write(priv, HECC_CANMC, 0);
- /*
- * INFO: CAN net stack handles bus off and hence disabling auto-bus-on
+ /* INFO: CAN net stack handles bus off and hence disabling auto-bus-on
* hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_ABO);
*/
- /*
- * INFO: It has been observed that at times CCE bit may not be
+ /* INFO: It has been observed that at times CCE bit may not be
* set and hw seems to be ok even if this bit is not set so
*/
cnt = HECC_CCE_WAIT_COUNT;
@@ -374,8 +364,8 @@ static void ti_hecc_start(struct net_device *ndev)
/* put HECC in initialization mode and set btc */
ti_hecc_reset(ndev);
- priv->tx_head = priv->tx_tail = HECC_TX_MASK;
- priv->rx_next = HECC_RX_FIRST_MBOX;
+ priv->tx_head = HECC_TX_MASK;
+ priv->tx_tail = HECC_TX_MASK;
/* Enable local and global acceptance mask registers */
hecc_write(priv, HECC_CANGAM, HECC_SET_REG);
@@ -401,7 +391,7 @@ static void ti_hecc_start(struct net_device *ndev)
} else {
hecc_write(priv, HECC_CANMIL, 0);
hecc_write(priv, HECC_CANGIM,
- HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN);
+ HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN);
}
priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
@@ -435,7 +425,7 @@ static int ti_hecc_do_set_mode(struct net_device *ndev, enum can_mode mode)
}
static int ti_hecc_get_berr_counter(const struct net_device *ndev,
- struct can_berr_counter *bec)
+ struct can_berr_counter *bec)
{
struct ti_hecc_priv *priv = netdev_priv(ndev);
@@ -445,8 +435,7 @@ static int ti_hecc_get_berr_counter(const struct net_device *ndev,
return 0;
}
-/*
- * ti_hecc_xmit: HECC Transmit
+/* ti_hecc_xmit: HECC Transmit
*
* The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the
* priority of the mailbox for tranmission is dependent upon priority setting
@@ -484,8 +473,8 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irqrestore(&priv->mbx_lock, flags);
netif_stop_queue(ndev);
netdev_err(priv->ndev,
- "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n",
- priv->tx_head, priv->tx_tail);
+ "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n",
+ priv->tx_head, priv->tx_tail);
return NETDEV_TX_BUSY;
}
spin_unlock_irqrestore(&priv->mbx_lock, flags);
@@ -502,10 +491,10 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
data = (cf->can_id & CAN_SFF_MASK) << 18;
hecc_write_mbx(priv, mbxno, HECC_CANMID, data);
hecc_write_mbx(priv, mbxno, HECC_CANMDL,
- be32_to_cpu(*(__be32 *)(cf->data)));
+ be32_to_cpu(*(__be32 *)(cf->data)));
if (cf->can_dlc > 4)
hecc_write_mbx(priv, mbxno, HECC_CANMDH,
- be32_to_cpu(*(__be32 *)(cf->data + 4)));
+ be32_to_cpu(*(__be32 *)(cf->data + 4)));
else
*(u32 *)(cf->data + 4) = 0;
can_put_echo_skb(skb, ndev, mbxno);
@@ -513,7 +502,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_lock_irqsave(&priv->mbx_lock, flags);
--priv->tx_head;
if ((hecc_read(priv, HECC_CANME) & BIT(get_tx_head_mb(priv))) ||
- (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) {
+ (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) {
netif_stop_queue(ndev);
}
hecc_set_bit(priv, HECC_CANME, mbx_mask);
@@ -526,139 +515,57 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
-static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
+static inline
+struct ti_hecc_priv *rx_offload_to_priv(struct can_rx_offload *offload)
{
- struct net_device_stats *stats = &priv->ndev->stats;
- struct can_frame *cf;
- struct sk_buff *skb;
- u32 data, mbx_mask;
- unsigned long flags;
+ return container_of(offload, struct ti_hecc_priv, offload);
+}
- skb = alloc_can_skb(priv->ndev, &cf);
- if (!skb) {
- if (printk_ratelimit())
- netdev_err(priv->ndev,
- "ti_hecc_rx_pkt: alloc_can_skb() failed\n");
- return -ENOMEM;
- }
+static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
+ struct can_frame *cf,
+ u32 *timestamp, unsigned int mbxno)
+{
+ struct ti_hecc_priv *priv = rx_offload_to_priv(offload);
+ u32 data;
- mbx_mask = BIT(mbxno);
data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
if (data & HECC_CANMID_IDE)
cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
cf->can_id = (data >> 18) & CAN_SFF_MASK;
+
data = hecc_read_mbx(priv, mbxno, HECC_CANMCF);
if (data & HECC_CANMCF_RTR)
cf->can_id |= CAN_RTR_FLAG;
cf->can_dlc = get_can_dlc(data & 0xF);
+
data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
*(__be32 *)(cf->data) = cpu_to_be32(data);
if (cf->can_dlc > 4) {
data = hecc_read_mbx(priv, mbxno, HECC_CANMDH);
*(__be32 *)(cf->data + 4) = cpu_to_be32(data);
}
- spin_lock_irqsave(&priv->mbx_lock, flags);
- hecc_clear_bit(priv, HECC_CANME, mbx_mask);
- hecc_write(priv, HECC_CANRMP, mbx_mask);
- /* enable mailbox only if it is part of rx buffer mailboxes */
- if (priv->rx_next < HECC_RX_BUFFER_MBOX)
- hecc_set_bit(priv, HECC_CANME, mbx_mask);
- spin_unlock_irqrestore(&priv->mbx_lock, flags);
- stats->rx_bytes += cf->can_dlc;
- can_led_event(priv->ndev, CAN_LED_EVENT_RX);
- netif_receive_skb(skb);
- stats->rx_packets++;
+ *timestamp = hecc_read_stamp(priv, mbxno);
- return 0;
-}
-
-/*
- * ti_hecc_rx_poll - HECC receive pkts
- *
- * The receive mailboxes start from highest numbered mailbox till last xmit
- * mailbox. On CAN frame reception the hardware places the data into highest
- * numbered mailbox that matches the CAN ID filter. Since all receive mailboxes
- * have same filtering (ALL CAN frames) packets will arrive in the highest
- * available RX mailbox and we need to ensure in-order packet reception.
- *
- * To ensure the packets are received in the right order we logically divide
- * the RX mailboxes into main and buffer mailboxes. Packets are received as per
- * mailbox priotity (higher to lower) in the main bank and once it is full we
- * disable further reception into main mailboxes. While the main mailboxes are
- * processed in NAPI, further packets are received in buffer mailboxes.
- *
- * We maintain a RX next mailbox counter to process packets and once all main
- * mailboxe packets are passed to the upper stack we enable all of them but
- * continue to process packets received in buffer mailboxes. With each packet
- * received from buffer mailbox we enable it immediately so as to handle the
- * overflow from higher mailboxes.
- */
-static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
-{
- struct net_device *ndev = napi->dev;
- struct ti_hecc_priv *priv = netdev_priv(ndev);
- u32 num_pkts = 0;
- u32 mbx_mask;
- unsigned long pending_pkts, flags;
-
- if (!netif_running(ndev))
- return 0;
-
- while ((pending_pkts = hecc_read(priv, HECC_CANRMP)) &&
- num_pkts < quota) {
- mbx_mask = BIT(priv->rx_next); /* next rx mailbox to process */
- if (mbx_mask & pending_pkts) {
- if (ti_hecc_rx_pkt(priv, priv->rx_next) < 0)
- return num_pkts;
- ++num_pkts;
- } else if (priv->rx_next > HECC_RX_BUFFER_MBOX) {
- break; /* pkt not received yet */
- }
- --priv->rx_next;
- if (priv->rx_next == HECC_RX_BUFFER_MBOX) {
- /* enable high bank mailboxes */
- spin_lock_irqsave(&priv->mbx_lock, flags);
- mbx_mask = hecc_read(priv, HECC_CANME);
- mbx_mask |= HECC_RX_HIGH_MBOX_MASK;
- hecc_write(priv, HECC_CANME, mbx_mask);
- spin_unlock_irqrestore(&priv->mbx_lock, flags);
- } else if (priv->rx_next == HECC_MAX_TX_MBOX - 1) {
- priv->rx_next = HECC_RX_FIRST_MBOX;
- break;
- }
- }
-
- /* Enable packet interrupt if all pkts are handled */
- if (hecc_read(priv, HECC_CANRMP) == 0) {
- napi_complete(napi);
- /* Re-enable RX mailbox interrupts */
- mbx_mask = hecc_read(priv, HECC_CANMIM);
- mbx_mask |= HECC_TX_MBOX_MASK;
- hecc_write(priv, HECC_CANMIM, mbx_mask);
- } else {
- /* repoll is done only if whole budget is used */
- num_pkts = quota;
- }
-
- return num_pkts;
+ return 1;
}
static int ti_hecc_error(struct net_device *ndev, int int_status,
- int err_status)
+ int err_status)
{
struct ti_hecc_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
+ u32 timestamp;
/* propagate the error condition to the can stack */
skb = alloc_can_err_skb(ndev, &cf);
if (!skb) {
if (printk_ratelimit())
netdev_err(priv->ndev,
- "ti_hecc_error: alloc_can_err_skb() failed\n");
+ "%s: alloc_can_err_skb() failed\n",
+ __func__);
return -ENOMEM;
}
@@ -692,8 +599,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
}
- /*
- * Need to check busoff condition in error status register too to
+ /* Need to check busoff condition in error status register too to
* ensure warning interrupts don't hog the system
*/
if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) {
@@ -732,9 +638,8 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
}
}
- stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
- netif_rx(skb);
+ timestamp = hecc_read(priv, HECC_CANLNT);
+ can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
return 0;
}
@@ -744,19 +649,20 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
struct net_device *ndev = (struct net_device *)dev_id;
struct ti_hecc_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
- u32 mbxno, mbx_mask, int_status, err_status;
- unsigned long ack, flags;
+ u32 mbxno, mbx_mask, int_status, err_status, stamp;
+ unsigned long flags, rx_pending;
int_status = hecc_read(priv,
- (priv->use_hecc1int) ? HECC_CANGIF1 : HECC_CANGIF0);
+ priv->use_hecc1int ?
+ HECC_CANGIF1 : HECC_CANGIF0);
if (!int_status)
return IRQ_NONE;
err_status = hecc_read(priv, HECC_CANES);
if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO |
- HECC_CANES_EP | HECC_CANES_EW))
- ti_hecc_error(ndev, int_status, err_status);
+ HECC_CANES_EP | HECC_CANES_EW))
+ ti_hecc_error(ndev, int_status, err_status);
if (int_status & HECC_CANGIF_GMIF) {
while (priv->tx_tail - priv->tx_head > 0) {
@@ -769,27 +675,27 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
spin_lock_irqsave(&priv->mbx_lock, flags);
hecc_clear_bit(priv, HECC_CANME, mbx_mask);
spin_unlock_irqrestore(&priv->mbx_lock, flags);
- stats->tx_bytes += hecc_read_mbx(priv, mbxno,
- HECC_CANMCF) & 0xF;
+ stamp = hecc_read_stamp(priv, mbxno);
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb(&priv->offload,
+ mbxno, stamp);
stats->tx_packets++;
can_led_event(ndev, CAN_LED_EVENT_TX);
- can_get_echo_skb(ndev, mbxno);
--priv->tx_tail;
}
/* restart queue if wrap-up or if queue stalled on last pkt */
- if (((priv->tx_head == priv->tx_tail) &&
- ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) ||
- (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) &&
- ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK)))
+ if ((priv->tx_head == priv->tx_tail &&
+ ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) ||
+ (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) &&
+ ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK)))
netif_wake_queue(ndev);
- /* Disable RX mailbox interrupts and let NAPI reenable them */
- if (hecc_read(priv, HECC_CANRMP)) {
- ack = hecc_read(priv, HECC_CANMIM);
- ack &= BIT(HECC_MAX_TX_MBOX) - 1;
- hecc_write(priv, HECC_CANMIM, ack);
- napi_schedule(&priv->napi);
+ /* offload RX mailboxes and let NAPI deliver them */
+ while ((rx_pending = hecc_read(priv, HECC_CANRMP))) {
+ can_rx_offload_irq_offload_timestamp(&priv->offload,
+ rx_pending);
+ hecc_write(priv, HECC_CANRMP, rx_pending);
}
}
@@ -811,7 +717,7 @@ static int ti_hecc_open(struct net_device *ndev)
int err;
err = request_irq(ndev->irq, ti_hecc_interrupt, IRQF_SHARED,
- ndev->name, ndev);
+ ndev->name, ndev);
if (err) {
netdev_err(ndev, "error requesting interrupt\n");
return err;
@@ -831,7 +737,7 @@ static int ti_hecc_open(struct net_device *ndev)
can_led_event(ndev, CAN_LED_EVENT_OPEN);
ti_hecc_start(ndev);
- napi_enable(&priv->napi);
+ can_rx_offload_enable(&priv->offload);
netif_start_queue(ndev);
return 0;
@@ -842,7 +748,7 @@ static int ti_hecc_close(struct net_device *ndev)
struct ti_hecc_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev);
- napi_disable(&priv->napi);
+ can_rx_offload_disable(&priv->offload);
ti_hecc_stop(ndev);
free_irq(ndev->irq, ndev);
close_candev(ndev);
@@ -962,8 +868,6 @@ static int ti_hecc_probe(struct platform_device *pdev)
goto probe_exit_candev;
}
priv->can.clock.freq = clk_get_rate(priv->clk);
- netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
- HECC_DEF_NAPI_WEIGHT);
err = clk_prepare_enable(priv->clk);
if (err) {
@@ -971,19 +875,30 @@ static int ti_hecc_probe(struct platform_device *pdev)
goto probe_exit_clk;
}
+ priv->offload.mailbox_read = ti_hecc_mailbox_read;
+ priv->offload.mb_first = HECC_RX_FIRST_MBOX;
+ priv->offload.mb_last = HECC_MAX_TX_MBOX;
+ err = can_rx_offload_add_timestamp(ndev, &priv->offload);
+ if (err) {
+ dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n");
+ goto probe_exit_clk;
+ }
+
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev, "register_candev() failed\n");
- goto probe_exit_clk;
+ goto probe_exit_offload;
}
devm_can_led_init(ndev);
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
- priv->base, (u32) ndev->irq);
+ priv->base, (u32)ndev->irq);
return 0;
+probe_exit_offload:
+ can_rx_offload_del(&priv->offload);
probe_exit_clk:
clk_put(priv->clk);
probe_exit_candev:
@@ -1000,6 +915,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
unregister_candev(ndev);
clk_disable_unprepare(priv->clk);
clk_put(priv->clk);
+ can_rx_offload_del(&priv->offload);
free_candev(ndev);
return 0;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index c89c7d4900d7..0f1d3e807d63 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -643,8 +643,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev,
return err;
}
- netdev = alloc_candev(sizeof(*priv) +
- dev->max_tx_urbs * sizeof(*priv->tx_contexts),
+ netdev = alloc_candev(struct_size(priv, tx_contexts, dev->max_tx_urbs),
dev->max_tx_urbs);
if (!netdev) {
dev_err(&dev->intf->dev, "Cannot alloc candev\n");
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index d200a5b0651c..daf27133887b 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -1,5 +1,4 @@
-/*
- * vcan.c - Virtual CAN interface
+/* vcan.c - Virtual CAN interface
*
* Copyright (c) 2002-2017 Volkswagen Group Electronic Research
* All rights reserved.
@@ -39,6 +38,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -57,9 +58,7 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
MODULE_ALIAS_RTNL_LINK(DRV_NAME);
-
-/*
- * CAN test feature:
+/* CAN test feature:
* Enable the echo on driver level for testing the CAN core echo modes.
* See Documentation/networking/can.rst for details.
*/
@@ -68,7 +67,6 @@ static bool echo; /* echo testing. Default: 0 (Off) */
module_param(echo, bool, 0444);
MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
-
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
@@ -101,10 +99,8 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
if (!echo) {
/* no echo handling available inside this driver */
-
if (loop) {
- /*
- * only count the packets here, because the
+ /* only count the packets here, because the
* CAN core already did the echo for us
*/
stats->rx_packets++;
@@ -117,7 +113,6 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
/* perform standard echo handling for CAN network interfaces */
if (loop) {
-
skb = can_create_echo_skb(skb);
if (!skb)
return NETDEV_TX_OK;
@@ -173,10 +168,10 @@ static struct rtnl_link_ops vcan_link_ops __read_mostly = {
static __init int vcan_init_module(void)
{
- pr_info("vcan: Virtual CAN interface driver\n");
+ pr_info("Virtual CAN interface driver\n");
if (echo)
- printk(KERN_INFO "vcan: enabled echo on driver level.\n");
+ pr_info("enabled echo on driver level.\n");
return rtnl_link_register(&vcan_link_ops);
}
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 63203ff452b5..911b34316c9d 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -50,6 +50,10 @@ enum xcan_reg {
XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */
/* only on CAN FD cores */
+ XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate
+ * Prescalar
+ */
+ XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */
XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
@@ -62,6 +66,7 @@ enum xcan_reg {
#define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04)
#define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08)
#define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C)
+#define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08)
#define XCAN_CANFD_FRAME_SIZE 0x48
#define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \
@@ -118,8 +123,12 @@ enum xcan_reg {
#define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */
#define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */
#define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */
+#define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */
#define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */
#define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */
+#define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */
+#define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */
+#define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
@@ -133,6 +142,7 @@ enum xcan_reg {
/* CAN frame length constants */
#define XCAN_FRAME_MAX_DATA_LEN 8
+#define XCANFD_DW_BYTES 4
#define XCAN_TIMEOUT (1 * HZ)
/* TX-FIFO-empty interrupt available */
@@ -149,7 +159,15 @@ enum xcan_reg {
#define XCAN_FLAG_RX_FIFO_MULTI 0x0010
#define XCAN_FLAG_CANFD_2 0x0020
+enum xcan_ip_type {
+ XAXI_CAN = 0,
+ XZYNQ_CANPS,
+ XAXI_CANFD,
+ XAXI_CANFD_2_0,
+};
+
struct xcan_devtype_data {
+ enum xcan_ip_type cantype;
unsigned int flags;
const struct can_bittiming_const *bittiming_const;
const char *bus_clk_name;
@@ -183,7 +201,7 @@ struct xcan_priv {
struct napi_struct napi;
u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
- u32 val);
+ u32 val);
struct device *dev;
void __iomem *reg_base;
unsigned long irq_flags;
@@ -205,6 +223,7 @@ static const struct can_bittiming_const xcan_bittiming_const = {
.brp_inc = 1,
};
+/* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */
static const struct can_bittiming_const xcan_bittiming_const_canfd = {
.name = DRIVER_NAME,
.tseg1_min = 1,
@@ -217,6 +236,20 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = {
.brp_inc = 1,
};
+/* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
+static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
+ .name = DRIVER_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 8,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+/* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */
static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
.name = DRIVER_NAME,
.tseg1_min = 1,
@@ -229,6 +262,19 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
.brp_inc = 1,
};
+/* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
+static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
+ .name = DRIVER_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -238,7 +284,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
* Write data to the paricular CAN register
*/
static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
- u32 val)
+ u32 val)
{
iowrite32(val, priv->reg_base + reg);
}
@@ -265,7 +311,7 @@ static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
* Write data to the paricular CAN register
*/
static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
- u32 val)
+ u32 val)
{
iowrite32be(val, priv->reg_base + reg);
}
@@ -343,6 +389,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct can_bittiming *bt = &priv->can.bittiming;
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
u32 btr0, btr1;
u32 is_config_mode;
@@ -372,9 +419,27 @@ static int xcan_set_bittiming(struct net_device *ndev)
priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
+ if (priv->devtype.cantype == XAXI_CANFD ||
+ priv->devtype.cantype == XAXI_CANFD_2_0) {
+ /* Setting Baud Rate prescalar value in F_BRPR Register */
+ btr0 = dbt->brp - 1;
+
+ /* Setting Time Segment 1 in BTR Register */
+ btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
+
+ /* Setting Time Segment 2 in BTR Register */
+ btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
+
+ /* Setting Synchronous jump width in BTR Register */
+ btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift;
+
+ priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0);
+ priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1);
+ }
+
netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
- priv->read_reg(priv, XCAN_BRPR_OFFSET),
- priv->read_reg(priv, XCAN_BTR_OFFSET));
+ priv->read_reg(priv, XCAN_BRPR_OFFSET),
+ priv->read_reg(priv, XCAN_BTR_OFFSET));
return 0;
}
@@ -392,9 +457,8 @@ static int xcan_set_bittiming(struct net_device *ndev)
static int xcan_chip_start(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
- u32 reg_msr, reg_sr_mask;
+ u32 reg_msr;
int err;
- unsigned long timeout;
u32 ier;
/* Check if it is in reset mode */
@@ -420,10 +484,8 @@ static int xcan_chip_start(struct net_device *ndev)
/* Check whether it is loopback mode or normal mode */
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
reg_msr = XCAN_MSR_LBACK_MASK;
- reg_sr_mask = XCAN_SR_LBACK_MASK;
} else {
reg_msr = 0x0;
- reg_sr_mask = XCAN_SR_NORMAL_MASK;
}
/* enable the first extended filter, if any, as cores with extended
@@ -435,16 +497,8 @@ static int xcan_chip_start(struct net_device *ndev)
priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
- timeout = jiffies + XCAN_TIMEOUT;
- while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) {
- if (time_after(jiffies, timeout)) {
- netdev_warn(ndev,
- "timed out for correct mode\n");
- return -ETIMEDOUT;
- }
- }
netdev_dbg(ndev, "status:#x%08x\n",
- priv->read_reg(priv, XCAN_SR_OFFSET));
+ priv->read_reg(priv, XCAN_SR_OFFSET));
priv->can.state = CAN_STATE_ERROR_ACTIVE;
return 0;
@@ -483,6 +537,7 @@ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
/**
* xcan_write_frame - Write a frame to HW
+ * @priv: Driver private data structure
* @skb: sk_buff pointer that contains data to be Txed
* @frame_offset: Register offset to write the frame to
*/
@@ -490,7 +545,8 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
int frame_offset)
{
u32 id, dlc, data[2] = {0, 0};
- struct can_frame *cf = (struct can_frame *)skb->data;
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u32 ramoff, dwindex = 0, i;
/* Watch carefully on the bit sequence */
if (cf->can_id & CAN_EFF_FLAG) {
@@ -498,7 +554,7 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
XCAN_IDR_ID2_MASK;
id |= (((cf->can_id & CAN_EFF_MASK) >>
- (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) <<
+ (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) <<
XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
/* The substibute remote TX request bit should be "1"
@@ -519,31 +575,51 @@ static void xcan_write_frame(struct xcan_priv *priv, struct sk_buff *skb,
id |= XCAN_IDR_SRR_MASK;
}
- dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT;
-
- if (cf->can_dlc > 0)
- data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
- if (cf->can_dlc > 4)
- data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
+ dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
+ if (can_is_canfd_skb(skb)) {
+ if (cf->flags & CANFD_BRS)
+ dlc |= XCAN_DLCR_BRS_MASK;
+ dlc |= XCAN_DLCR_EDL_MASK;
+ }
priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
/* If the CAN frame is RTR frame this write triggers transmission
* (not on CAN FD)
*/
priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
- if (!(cf->can_id & CAN_RTR_FLAG)) {
- priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset),
- data[0]);
- /* If the CAN frame is Standard/Extended frame this
- * write triggers transmission (not on CAN FD)
- */
- priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset),
- data[1]);
+ if (priv->devtype.cantype == XAXI_CANFD ||
+ priv->devtype.cantype == XAXI_CANFD_2_0) {
+ for (i = 0; i < cf->len; i += 4) {
+ ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) +
+ (dwindex * XCANFD_DW_BYTES);
+ priv->write_reg(priv, ramoff,
+ be32_to_cpup((__be32 *)(cf->data + i)));
+ dwindex++;
+ }
+ } else {
+ if (cf->len > 0)
+ data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
+ if (cf->len > 4)
+ data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
+
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ priv->write_reg(priv,
+ XCAN_FRAME_DW1_OFFSET(frame_offset),
+ data[0]);
+ /* If the CAN frame is Standard/Extended frame this
+ * write triggers transmission (not on CAN FD)
+ */
+ priv->write_reg(priv,
+ XCAN_FRAME_DW2_OFFSET(frame_offset),
+ data[1]);
+ }
}
}
/**
* xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
*
* Return: 0 on success, -ENOSPC if FIFO is full.
*/
@@ -580,6 +656,8 @@ static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
/**
* xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
*
* Return: 0 on success, -ENOSPC if there is no space
*/
@@ -712,6 +790,88 @@ static int xcan_rx(struct net_device *ndev, int frame_base)
}
/**
+ * xcanfd_rx - Is called from CAN isr to complete the received
+ * frame processing
+ * @ndev: Pointer to net_device structure
+ * @frame_base: Register offset to the frame to be read
+ *
+ * This function is invoked from the CAN isr(poll) to process the Rx frames. It
+ * does minimal processing and invokes "netif_receive_skb" to complete further
+ * processing.
+ * Return: 1 on success and 0 on failure.
+ */
+static int xcanfd_rx(struct net_device *ndev, int frame_base)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset;
+
+ id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
+ dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base));
+ if (dlc & XCAN_DLCR_EDL_MASK)
+ skb = alloc_canfd_skb(ndev, &cf);
+ else
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
+
+ if (unlikely(!skb)) {
+ stats->rx_dropped++;
+ return 0;
+ }
+
+ /* Change Xilinx CANFD data length format to socketCAN data
+ * format
+ */
+ if (dlc & XCAN_DLCR_EDL_MASK)
+ cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
+ XCAN_DLCR_DLC_SHIFT);
+ else
+ cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >>
+ XCAN_DLCR_DLC_SHIFT);
+
+ /* Change Xilinx CAN ID format to socketCAN ID format */
+ if (id_xcan & XCAN_IDR_IDE_MASK) {
+ /* The received frame is an Extended format frame */
+ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
+ cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
+ XCAN_IDR_ID2_SHIFT;
+ cf->can_id |= CAN_EFF_FLAG;
+ if (id_xcan & XCAN_IDR_RTR_MASK)
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ /* The received frame is a standard format frame */
+ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
+ XCAN_IDR_ID1_SHIFT;
+ if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan &
+ XCAN_IDR_SRR_MASK))
+ cf->can_id |= CAN_RTR_FLAG;
+ }
+
+ /* Check the frame received is FD or not*/
+ if (dlc & XCAN_DLCR_EDL_MASK) {
+ for (i = 0; i < cf->len; i += 4) {
+ dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) +
+ (dwindex * XCANFD_DW_BYTES);
+ data[0] = priv->read_reg(priv, dw_offset);
+ *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
+ dwindex++;
+ }
+ } else {
+ for (i = 0; i < cf->len; i += 4) {
+ dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base);
+ data[0] = priv->read_reg(priv, dw_offset + i);
+ *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
+ }
+ }
+ stats->rx_bytes += cf->len;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
+/**
* xcan_current_error_state - Get current error state from HW
* @ndev: Pointer to net_device structure
*
@@ -924,7 +1084,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
}
}
- priv->can.can_stats.bus_error++;
+ priv->can.can_stats.bus_error++;
}
if (skb) {
@@ -934,7 +1094,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
}
netdev_dbg(ndev, "%s: error status register:0x%x\n",
- __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
+ __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
}
/**
@@ -960,6 +1120,7 @@ static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
/**
* xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
+ * @priv: Driver private data structure
*
* Return: Register offset of the next frame in RX FIFO.
*/
@@ -968,7 +1129,7 @@ static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
int offset;
if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
- u32 fsr;
+ u32 fsr, mask;
/* clear RXOK before the is-empty check so that any newly
* received frame will reassert it without a race
@@ -978,13 +1139,20 @@ static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
/* check if RX FIFO is empty */
- if (!(fsr & XCAN_FSR_FL_MASK))
+ if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
+ mask = XCAN_2_FSR_FL_MASK;
+ else
+ mask = XCAN_FSR_FL_MASK;
+
+ if (!(fsr & mask))
return -ENOENT;
if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
- offset = XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
+ offset =
+ XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK);
else
- offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
+ offset =
+ XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
} else {
/* check if RX FIFO is empty */
@@ -1019,7 +1187,10 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
(work_done < quota)) {
- work_done += xcan_rx(ndev, frame_offset);
+ if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK)
+ work_done += xcanfd_rx(ndev, frame_offset);
+ else
+ work_done += xcan_rx(ndev, frame_offset);
if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
/* increment read index */
@@ -1094,8 +1265,10 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
* via TXFEMP handling as we read TXFEMP *after* TXOK
* clear to satisfy (1).
*/
- while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
- priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+ while ((isr & XCAN_IXR_TXOK_MASK) &&
+ !WARN_ON(++retries == 100)) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET,
+ XCAN_IXR_TXOK_MASK);
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
@@ -1208,12 +1381,12 @@ static int xcan_open(struct net_device *ndev)
ret = pm_runtime_get_sync(priv->dev);
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
- __func__, ret);
+ __func__, ret);
return ret;
}
ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
- ndev->name, ndev);
+ ndev->name, ndev);
if (ret < 0) {
netdev_err(ndev, "irq allocation for CAN failed\n");
goto err;
@@ -1284,7 +1457,7 @@ static int xcan_close(struct net_device *ndev)
* Return: 0 on success and failure value on error
*/
static int xcan_get_berr_counter(const struct net_device *ndev,
- struct can_berr_counter *bec)
+ struct can_berr_counter *bec)
{
struct xcan_priv *priv = netdev_priv(ndev);
int ret;
@@ -1292,7 +1465,7 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
ret = pm_runtime_get_sync(priv->dev);
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
- __func__, ret);
+ __func__, ret);
return ret;
}
@@ -1305,7 +1478,6 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
return 0;
}
-
static const struct net_device_ops xcan_netdev_ops = {
.ndo_open = xcan_open,
.ndo_stop = xcan_close,
@@ -1417,6 +1589,8 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
};
static const struct xcan_devtype_data xcan_zynq_data = {
+ .cantype = XZYNQ_CANPS,
+ .flags = XCAN_FLAG_TXFEMP,
.bittiming_const = &xcan_bittiming_const,
.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
@@ -1424,6 +1598,8 @@ static const struct xcan_devtype_data xcan_zynq_data = {
};
static const struct xcan_devtype_data xcan_axi_data = {
+ .cantype = XAXI_CAN,
+ .flags = XCAN_FLAG_TXFEMP,
.bittiming_const = &xcan_bittiming_const,
.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
@@ -1431,6 +1607,7 @@ static const struct xcan_devtype_data xcan_axi_data = {
};
static const struct xcan_devtype_data xcan_canfd_data = {
+ .cantype = XAXI_CANFD,
.flags = XCAN_FLAG_EXT_FILTERS |
XCAN_FLAG_RXMNF |
XCAN_FLAG_TX_MAILBOXES |
@@ -1442,6 +1619,7 @@ static const struct xcan_devtype_data xcan_canfd_data = {
};
static const struct xcan_devtype_data xcan_canfd2_data = {
+ .cantype = XAXI_CANFD_2_0,
.flags = XCAN_FLAG_EXT_FILTERS |
XCAN_FLAG_RXMNF |
XCAN_FLAG_TX_MAILBOXES |
@@ -1554,6 +1732,19 @@ static int xcan_probe(struct platform_device *pdev)
priv->can.do_get_berr_counter = xcan_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
+
+ if (devtype->cantype == XAXI_CANFD)
+ priv->can.data_bittiming_const =
+ &xcan_data_bittiming_const_canfd;
+
+ if (devtype->cantype == XAXI_CANFD_2_0)
+ priv->can.data_bittiming_const =
+ &xcan_data_bittiming_const_canfd2;
+
+ if (devtype->cantype == XAXI_CANFD ||
+ devtype->cantype == XAXI_CANFD_2_0)
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+
priv->reg_base = addr;
priv->tx_max = tx_max;
priv->devtype = *devtype;
@@ -1570,7 +1761,8 @@ static int xcan_probe(struct platform_device *pdev)
/* Getting the CAN can_clk info */
priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
if (IS_ERR(priv->can_clk)) {
- dev_err(&pdev->dev, "Device clock not found.\n");
+ if (PTR_ERR(priv->can_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Device clock not found.\n");
ret = PTR_ERR(priv->can_clk);
goto err_free;
}
@@ -1589,7 +1781,7 @@ static int xcan_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
- __func__, ret);
+ __func__, ret);
goto err_pmdisable;
}
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 907af62846ba..7d328a5f0161 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -510,10 +510,15 @@ EXPORT_SYMBOL(b53_imp_vlan_setup);
int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct b53_device *dev = ds->priv;
- unsigned int cpu_port = ds->ports[port].cpu_dp->index;
+ unsigned int cpu_port;
int ret = 0;
u16 pvlan;
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+ cpu_port = ds->ports[port].cpu_dp->index;
+
if (dev->ops->irq_enable)
ret = dev->ops->irq_enable(dev, port);
if (ret)
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index d9c56a779c08..0a1be5259be0 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -536,7 +536,6 @@ static void b53_srab_mux_init(struct platform_device *pdev)
struct b53_device *dev = platform_get_drvdata(pdev);
struct b53_srab_priv *priv = dev->priv;
struct b53_srab_port_priv *p;
- struct resource *r;
unsigned int port;
u32 reg, off = 0;
int ret;
@@ -544,8 +543,7 @@ static void b53_srab_mux_init(struct platform_device *pdev)
if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID)
return;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->mux_config = devm_ioremap_resource(&pdev->dev, r);
+ priv->mux_config = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->mux_config))
return;
@@ -593,7 +591,6 @@ static int b53_srab_probe(struct platform_device *pdev)
const struct of_device_id *of_id = NULL;
struct b53_srab_priv *priv;
struct b53_device *dev;
- struct resource *r;
if (dn)
of_id = of_match_node(b53_srab_of_match, dn);
@@ -610,8 +607,7 @@ static int b53_srab_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(&pdev->dev, r);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return -ENOMEM;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 28c963a21dac..26509fa37a50 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -157,6 +157,9 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
unsigned int i;
u32 reg;
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
/* Clear the memory power down */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
reg &= ~P_TXQ_PSM_VDD(port);
@@ -1047,7 +1050,6 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
struct b53_device *dev;
struct dsa_switch *ds;
void __iomem **base;
- struct resource *r;
unsigned int i;
u32 reg, rev;
int ret;
@@ -1113,8 +1115,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
base = &priv->core;
for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
- r = platform_get_resource(pdev, IORESOURCE_MEM, i);
- *base = devm_ioremap_resource(&pdev->dev, r);
+ *base = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(*base)) {
pr_err("unable to find register: %s\n", reg_names[i]);
return PTR_ERR(*base);
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index 7a2063e7737a..bbec86b9418e 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1079,6 +1079,9 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port,
{
struct lan9303 *chip = ds->priv;
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
return lan9303_enable_processing_port(chip, port);
}
@@ -1086,6 +1089,9 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
{
struct lan9303 *chip = ds->priv;
+ if (!dsa_is_user_port(ds, port))
+ return;
+
lan9303_disable_processing_port(chip, port);
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
}
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 4e64835deac2..a69c9b9878b7 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -642,6 +642,9 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
struct gswip_priv *priv = ds->priv;
int err;
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
if (!dsa_is_cpu_port(ds, port)) {
err = gswip_add_single_port_br(priv, port, true);
if (err)
@@ -678,6 +681,9 @@ static void gswip_port_disable(struct dsa_switch *ds, int port)
{
struct gswip_priv *priv = ds->priv;
+ if (!dsa_is_user_port(ds, port))
+ return;
+
if (!dsa_is_cpu_port(ds, port)) {
gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN,
GSWIP_MDIO_PHY_LINK_MASK,
@@ -1822,7 +1828,6 @@ remove_gphy:
static int gswip_probe(struct platform_device *pdev)
{
struct gswip_priv *priv;
- struct resource *gswip_res, *mdio_res, *mii_res;
struct device_node *mdio_np, *gphy_fw_np;
struct device *dev = &pdev->dev;
int err;
@@ -1833,18 +1838,15 @@ static int gswip_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- gswip_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->gswip = devm_ioremap_resource(dev, gswip_res);
+ priv->gswip = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->gswip))
return PTR_ERR(priv->gswip);
- mdio_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->mdio = devm_ioremap_resource(dev, mdio_res);
+ priv->mdio = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->mdio))
return PTR_ERR(priv->mdio);
- mii_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- priv->mii = devm_ioremap_resource(dev, mii_res);
+ priv->mii = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(priv->mii))
return PTR_ERR(priv->mii);
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index fe0a13b79c4b..e1c23d1e91e6 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -5,7 +5,6 @@ config NET_DSA_MICROCHIP_KSZ_COMMON
menuconfig NET_DSA_MICROCHIP_KSZ9477
tristate "Microchip KSZ9477 series switch support"
depends on NET_DSA
- select NET_DSA_TAG_KSZ9477
select NET_DSA_MICROCHIP_KSZ_COMMON
help
This driver adds support for Microchip KSZ9477 switch chips.
@@ -16,3 +15,20 @@ config NET_DSA_MICROCHIP_KSZ9477_SPI
select REGMAP_SPI
help
Select to enable support for registering switches configured through SPI.
+
+menuconfig NET_DSA_MICROCHIP_KSZ8795
+ tristate "Microchip KSZ8795 series switch support"
+ depends on NET_DSA
+ select NET_DSA_MICROCHIP_KSZ_COMMON
+ help
+ This driver adds support for Microchip KSZ8795 switch chips.
+
+config NET_DSA_MICROCHIP_KSZ8795_SPI
+ tristate "KSZ8795 series SPI connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ8795 && SPI
+ select REGMAP_SPI
+ help
+ This driver accesses KSZ8795 chip through SPI.
+
+ It is required to use the KSZ8795 switch driver as the only access
+ is through SPI.
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 68451b02f775..e3d799b95d7d 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -2,3 +2,5 @@
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_common.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477) += ksz9477.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI) += ksz9477_spi.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795) += ksz8795.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI) += ksz8795_spi.o
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
new file mode 100644
index 000000000000..a23d3ffdf0c4
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -0,0 +1,1310 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip KSZ8795 switch driver
+ *
+ * Copyright (C) 2017 Microchip Technology Inc.
+ * Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/microchip-ksz.h>
+#include <linux/phy.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "ksz_common.h"
+#include "ksz8795_reg.h"
+
+static const struct {
+ char string[ETH_GSTRING_LEN];
+} mib_names[TOTAL_SWITCH_COUNTER_NUM] = {
+ { "rx_hi" },
+ { "rx_undersize" },
+ { "rx_fragments" },
+ { "rx_oversize" },
+ { "rx_jabbers" },
+ { "rx_symbol_err" },
+ { "rx_crc_err" },
+ { "rx_align_err" },
+ { "rx_mac_ctrl" },
+ { "rx_pause" },
+ { "rx_bcast" },
+ { "rx_mcast" },
+ { "rx_ucast" },
+ { "rx_64_or_less" },
+ { "rx_65_127" },
+ { "rx_128_255" },
+ { "rx_256_511" },
+ { "rx_512_1023" },
+ { "rx_1024_1522" },
+ { "rx_1523_2000" },
+ { "rx_2001" },
+ { "tx_hi" },
+ { "tx_late_col" },
+ { "tx_pause" },
+ { "tx_bcast" },
+ { "tx_mcast" },
+ { "tx_ucast" },
+ { "tx_deferred" },
+ { "tx_total_col" },
+ { "tx_exc_col" },
+ { "tx_single_col" },
+ { "tx_mult_col" },
+ { "rx_total" },
+ { "tx_total" },
+ { "rx_discards" },
+ { "tx_discards" },
+};
+
+static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+{
+ regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
+}
+
+static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
+ bool set)
+{
+ regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset),
+ bits, set ? bits : 0);
+}
+
+static int ksz8795_reset_switch(struct ksz_device *dev)
+{
+ /* reset switch */
+ ksz_write8(dev, REG_POWER_MANAGEMENT_1,
+ SW_SOFTWARE_POWER_DOWN << SW_POWER_MANAGEMENT_MODE_S);
+ ksz_write8(dev, REG_POWER_MANAGEMENT_1, 0);
+
+ return 0;
+}
+
+static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
+{
+ u8 hi, lo;
+
+ /* Number of queues can only be 1, 2, or 4. */
+ switch (queue) {
+ case 4:
+ case 3:
+ queue = PORT_QUEUE_SPLIT_4;
+ break;
+ case 2:
+ queue = PORT_QUEUE_SPLIT_2;
+ break;
+ default:
+ queue = PORT_QUEUE_SPLIT_1;
+ }
+ ksz_pread8(dev, port, REG_PORT_CTRL_0, &lo);
+ ksz_pread8(dev, port, P_DROP_TAG_CTRL, &hi);
+ lo &= ~PORT_QUEUE_SPLIT_L;
+ if (queue & PORT_QUEUE_SPLIT_2)
+ lo |= PORT_QUEUE_SPLIT_L;
+ hi &= ~PORT_QUEUE_SPLIT_H;
+ if (queue & PORT_QUEUE_SPLIT_4)
+ hi |= PORT_QUEUE_SPLIT_H;
+ ksz_pwrite8(dev, port, REG_PORT_CTRL_0, lo);
+ ksz_pwrite8(dev, port, P_DROP_TAG_CTRL, hi);
+
+ /* Default is port based for egress rate limit. */
+ if (queue != PORT_QUEUE_SPLIT_1)
+ ksz_cfg(dev, REG_SW_CTRL_19, SW_OUT_RATE_LIMIT_QUEUE_BASED,
+ true);
+}
+
+static void ksz8795_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
+ u64 *cnt)
+{
+ u16 ctrl_addr;
+ u32 data;
+ u8 check;
+ int loop;
+
+ ctrl_addr = addr + SWITCH_COUNTER_NUM * port;
+ ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr);
+
+ /* It is almost guaranteed to always read the valid bit because of
+ * slow SPI speed.
+ */
+ for (loop = 2; loop > 0; loop--) {
+ ksz_read8(dev, REG_IND_MIB_CHECK, &check);
+
+ if (check & MIB_COUNTER_VALID) {
+ ksz_read32(dev, REG_IND_DATA_LO, &data);
+ if (check & MIB_COUNTER_OVERFLOW)
+ *cnt += MIB_COUNTER_VALUE + 1;
+ *cnt += data & MIB_COUNTER_VALUE;
+ break;
+ }
+ }
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
+{
+ u16 ctrl_addr;
+ u32 data;
+ u8 check;
+ int loop;
+
+ addr -= SWITCH_COUNTER_NUM;
+ ctrl_addr = (KS_MIB_TOTAL_RX_1 - KS_MIB_TOTAL_RX_0) * port;
+ ctrl_addr += addr + KS_MIB_TOTAL_RX_0;
+ ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr);
+
+ /* It is almost guaranteed to always read the valid bit because of
+ * slow SPI speed.
+ */
+ for (loop = 2; loop > 0; loop--) {
+ ksz_read8(dev, REG_IND_MIB_CHECK, &check);
+
+ if (check & MIB_COUNTER_VALID) {
+ ksz_read32(dev, REG_IND_DATA_LO, &data);
+ if (addr < 2) {
+ u64 total;
+
+ total = check & MIB_TOTAL_BYTES_H;
+ total <<= 32;
+ *cnt += total;
+ *cnt += data;
+ if (check & MIB_COUNTER_OVERFLOW) {
+ total = MIB_TOTAL_BYTES_H + 1;
+ total <<= 32;
+ *cnt += total;
+ }
+ } else {
+ if (check & MIB_COUNTER_OVERFLOW)
+ *cnt += MIB_PACKET_DROPPED + 1;
+ *cnt += data & MIB_PACKET_DROPPED;
+ }
+ break;
+ }
+ }
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static void ksz8795_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+{
+ /* enable the port for flush/freeze function */
+ if (freeze)
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
+ ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FREEZE, freeze);
+
+ /* disable the port after freeze is done */
+ if (!freeze)
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
+}
+
+static void ksz8795_port_init_cnt(struct ksz_device *dev, int port)
+{
+ struct ksz_port_mib *mib = &dev->ports[port].mib;
+
+ /* flush all enabled port MIB counters */
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), true);
+ ksz_cfg(dev, REG_SW_CTRL_6, SW_MIB_COUNTER_FLUSH, true);
+ ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
+
+ mib->cnt_ptr = 0;
+
+ /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
+ while (mib->cnt_ptr < dev->reg_mib_cnt) {
+ dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
+ &mib->counters[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+
+ /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
+ while (mib->cnt_ptr < dev->mib_cnt) {
+ dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
+ NULL, &mib->counters[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+ mib->cnt_ptr = 0;
+ memset(mib->counters, 0, dev->mib_cnt * sizeof(u64));
+}
+
+static void ksz8795_r_table(struct ksz_device *dev, int table, u16 addr,
+ u64 *data)
+{
+ u16 ctrl_addr;
+
+ ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr);
+ ksz_read64(dev, REG_IND_DATA_HI, data);
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static void ksz8795_w_table(struct ksz_device *dev, int table, u16 addr,
+ u64 data)
+{
+ u16 ctrl_addr;
+
+ ctrl_addr = IND_ACC_TABLE(table) | addr;
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write64(dev, REG_IND_DATA_HI, data);
+ ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr);
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static int ksz8795_valid_dyn_entry(struct ksz_device *dev, u8 *data)
+{
+ int timeout = 100;
+
+ do {
+ ksz_read8(dev, REG_IND_DATA_CHECK, data);
+ timeout--;
+ } while ((*data & DYNAMIC_MAC_TABLE_NOT_READY) && timeout);
+
+ /* Entry is not ready for accessing. */
+ if (*data & DYNAMIC_MAC_TABLE_NOT_READY) {
+ return -EAGAIN;
+ /* Entry is ready for accessing. */
+ } else {
+ ksz_read8(dev, REG_IND_DATA_8, data);
+
+ /* There is no valid entry in the table. */
+ if (*data & DYNAMIC_MAC_TABLE_MAC_EMPTY)
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static int ksz8795_r_dyn_mac_table(struct ksz_device *dev, u16 addr,
+ u8 *mac_addr, u8 *fid, u8 *src_port,
+ u8 *timestamp, u16 *entries)
+{
+ u32 data_hi, data_lo;
+ u16 ctrl_addr;
+ u8 data;
+ int rc;
+
+ ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr;
+
+ mutex_lock(&dev->alu_mutex);
+ ksz_write16(dev, REG_IND_CTRL_0, ctrl_addr);
+
+ rc = ksz8795_valid_dyn_entry(dev, &data);
+ if (rc == -EAGAIN) {
+ if (addr == 0)
+ *entries = 0;
+ } else if (rc == -ENXIO) {
+ *entries = 0;
+ /* At least one valid entry in the table. */
+ } else {
+ u64 buf = 0;
+ int cnt;
+
+ ksz_read64(dev, REG_IND_DATA_HI, &buf);
+ data_hi = (u32)(buf >> 32);
+ data_lo = (u32)buf;
+
+ /* Check out how many valid entry in the table. */
+ cnt = data & DYNAMIC_MAC_TABLE_ENTRIES_H;
+ cnt <<= DYNAMIC_MAC_ENTRIES_H_S;
+ cnt |= (data_hi & DYNAMIC_MAC_TABLE_ENTRIES) >>
+ DYNAMIC_MAC_ENTRIES_S;
+ *entries = cnt + 1;
+
+ *fid = (data_hi & DYNAMIC_MAC_TABLE_FID) >>
+ DYNAMIC_MAC_FID_S;
+ *src_port = (data_hi & DYNAMIC_MAC_TABLE_SRC_PORT) >>
+ DYNAMIC_MAC_SRC_PORT_S;
+ *timestamp = (data_hi & DYNAMIC_MAC_TABLE_TIMESTAMP) >>
+ DYNAMIC_MAC_TIMESTAMP_S;
+
+ mac_addr[5] = (u8)data_lo;
+ mac_addr[4] = (u8)(data_lo >> 8);
+ mac_addr[3] = (u8)(data_lo >> 16);
+ mac_addr[2] = (u8)(data_lo >> 24);
+
+ mac_addr[1] = (u8)data_hi;
+ mac_addr[0] = (u8)(data_hi >> 8);
+ rc = 0;
+ }
+ mutex_unlock(&dev->alu_mutex);
+
+ return rc;
+}
+
+static int ksz8795_r_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
+{
+ u32 data_hi, data_lo;
+ u64 data;
+
+ ksz8795_r_table(dev, TABLE_STATIC_MAC, addr, &data);
+ data_hi = data >> 32;
+ data_lo = (u32)data;
+ if (data_hi & (STATIC_MAC_TABLE_VALID | STATIC_MAC_TABLE_OVERRIDE)) {
+ alu->mac[5] = (u8)data_lo;
+ alu->mac[4] = (u8)(data_lo >> 8);
+ alu->mac[3] = (u8)(data_lo >> 16);
+ alu->mac[2] = (u8)(data_lo >> 24);
+ alu->mac[1] = (u8)data_hi;
+ alu->mac[0] = (u8)(data_hi >> 8);
+ alu->port_forward = (data_hi & STATIC_MAC_TABLE_FWD_PORTS) >>
+ STATIC_MAC_FWD_PORTS_S;
+ alu->is_override =
+ (data_hi & STATIC_MAC_TABLE_OVERRIDE) ? 1 : 0;
+ data_hi >>= 1;
+ alu->is_use_fid = (data_hi & STATIC_MAC_TABLE_USE_FID) ? 1 : 0;
+ alu->fid = (data_hi & STATIC_MAC_TABLE_FID) >>
+ STATIC_MAC_FID_S;
+ return 0;
+ }
+ return -ENXIO;
+}
+
+static void ksz8795_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
+{
+ u32 data_hi, data_lo;
+ u64 data;
+
+ data_lo = ((u32)alu->mac[2] << 24) |
+ ((u32)alu->mac[3] << 16) |
+ ((u32)alu->mac[4] << 8) | alu->mac[5];
+ data_hi = ((u32)alu->mac[0] << 8) | alu->mac[1];
+ data_hi |= (u32)alu->port_forward << STATIC_MAC_FWD_PORTS_S;
+
+ if (alu->is_override)
+ data_hi |= STATIC_MAC_TABLE_OVERRIDE;
+ if (alu->is_use_fid) {
+ data_hi |= STATIC_MAC_TABLE_USE_FID;
+ data_hi |= (u32)alu->fid << STATIC_MAC_FID_S;
+ }
+ if (alu->is_static)
+ data_hi |= STATIC_MAC_TABLE_VALID;
+ else
+ data_hi &= ~STATIC_MAC_TABLE_OVERRIDE;
+
+ data = (u64)data_hi << 32 | data_lo;
+ ksz8795_w_table(dev, TABLE_STATIC_MAC, addr, data);
+}
+
+static void ksz8795_from_vlan(u16 vlan, u8 *fid, u8 *member, u8 *valid)
+{
+ *fid = vlan & VLAN_TABLE_FID;
+ *member = (vlan & VLAN_TABLE_MEMBERSHIP) >> VLAN_TABLE_MEMBERSHIP_S;
+ *valid = !!(vlan & VLAN_TABLE_VALID);
+}
+
+static void ksz8795_to_vlan(u8 fid, u8 member, u8 valid, u16 *vlan)
+{
+ *vlan = fid;
+ *vlan |= (u16)member << VLAN_TABLE_MEMBERSHIP_S;
+ if (valid)
+ *vlan |= VLAN_TABLE_VALID;
+}
+
+static void ksz8795_r_vlan_entries(struct ksz_device *dev, u16 addr)
+{
+ u64 data;
+ int i;
+
+ ksz8795_r_table(dev, TABLE_VLAN, addr, &data);
+ addr *= 4;
+ for (i = 0; i < 4; i++) {
+ dev->vlan_cache[addr + i].table[0] = (u16)data;
+ data >>= VLAN_TABLE_S;
+ }
+}
+
+static void ksz8795_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
+{
+ int index;
+ u16 *data;
+ u16 addr;
+ u64 buf;
+
+ data = (u16 *)&buf;
+ addr = vid / 4;
+ index = vid & 3;
+ ksz8795_r_table(dev, TABLE_VLAN, addr, &buf);
+ *vlan = data[index];
+}
+
+static void ksz8795_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
+{
+ int index;
+ u16 *data;
+ u16 addr;
+ u64 buf;
+
+ data = (u16 *)&buf;
+ addr = vid / 4;
+ index = vid & 3;
+ ksz8795_r_table(dev, TABLE_VLAN, addr, &buf);
+ data[index] = vlan;
+ dev->vlan_cache[vid].table[0] = vlan;
+ ksz8795_w_table(dev, TABLE_VLAN, addr, buf);
+}
+
+static void ksz8795_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+{
+ u8 restart, speed, ctrl, link;
+ int processed = true;
+ u16 data = 0;
+ u8 p = phy;
+
+ switch (reg) {
+ case PHY_REG_CTRL:
+ ksz_pread8(dev, p, P_NEG_RESTART_CTRL, &restart);
+ ksz_pread8(dev, p, P_SPEED_STATUS, &speed);
+ ksz_pread8(dev, p, P_FORCE_CTRL, &ctrl);
+ if (restart & PORT_PHY_LOOPBACK)
+ data |= PHY_LOOPBACK;
+ if (ctrl & PORT_FORCE_100_MBIT)
+ data |= PHY_SPEED_100MBIT;
+ if (!(ctrl & PORT_AUTO_NEG_DISABLE))
+ data |= PHY_AUTO_NEG_ENABLE;
+ if (restart & PORT_POWER_DOWN)
+ data |= PHY_POWER_DOWN;
+ if (restart & PORT_AUTO_NEG_RESTART)
+ data |= PHY_AUTO_NEG_RESTART;
+ if (ctrl & PORT_FORCE_FULL_DUPLEX)
+ data |= PHY_FULL_DUPLEX;
+ if (speed & PORT_HP_MDIX)
+ data |= PHY_HP_MDIX;
+ if (restart & PORT_FORCE_MDIX)
+ data |= PHY_FORCE_MDIX;
+ if (restart & PORT_AUTO_MDIX_DISABLE)
+ data |= PHY_AUTO_MDIX_DISABLE;
+ if (restart & PORT_TX_DISABLE)
+ data |= PHY_TRANSMIT_DISABLE;
+ if (restart & PORT_LED_OFF)
+ data |= PHY_LED_DISABLE;
+ break;
+ case PHY_REG_STATUS:
+ ksz_pread8(dev, p, P_LINK_STATUS, &link);
+ data = PHY_100BTX_FD_CAPABLE |
+ PHY_100BTX_CAPABLE |
+ PHY_10BT_FD_CAPABLE |
+ PHY_10BT_CAPABLE |
+ PHY_AUTO_NEG_CAPABLE;
+ if (link & PORT_AUTO_NEG_COMPLETE)
+ data |= PHY_AUTO_NEG_ACKNOWLEDGE;
+ if (link & PORT_STAT_LINK_GOOD)
+ data |= PHY_LINK_STATUS;
+ break;
+ case PHY_REG_ID_1:
+ data = KSZ8795_ID_HI;
+ break;
+ case PHY_REG_ID_2:
+ data = KSZ8795_ID_LO;
+ break;
+ case PHY_REG_AUTO_NEGOTIATION:
+ ksz_pread8(dev, p, P_LOCAL_CTRL, &ctrl);
+ data = PHY_AUTO_NEG_802_3;
+ if (ctrl & PORT_AUTO_NEG_SYM_PAUSE)
+ data |= PHY_AUTO_NEG_SYM_PAUSE;
+ if (ctrl & PORT_AUTO_NEG_100BTX_FD)
+ data |= PHY_AUTO_NEG_100BTX_FD;
+ if (ctrl & PORT_AUTO_NEG_100BTX)
+ data |= PHY_AUTO_NEG_100BTX;
+ if (ctrl & PORT_AUTO_NEG_10BT_FD)
+ data |= PHY_AUTO_NEG_10BT_FD;
+ if (ctrl & PORT_AUTO_NEG_10BT)
+ data |= PHY_AUTO_NEG_10BT;
+ break;
+ case PHY_REG_REMOTE_CAPABILITY:
+ ksz_pread8(dev, p, P_REMOTE_STATUS, &link);
+ data = PHY_AUTO_NEG_802_3;
+ if (link & PORT_REMOTE_SYM_PAUSE)
+ data |= PHY_AUTO_NEG_SYM_PAUSE;
+ if (link & PORT_REMOTE_100BTX_FD)
+ data |= PHY_AUTO_NEG_100BTX_FD;
+ if (link & PORT_REMOTE_100BTX)
+ data |= PHY_AUTO_NEG_100BTX;
+ if (link & PORT_REMOTE_10BT_FD)
+ data |= PHY_AUTO_NEG_10BT_FD;
+ if (link & PORT_REMOTE_10BT)
+ data |= PHY_AUTO_NEG_10BT;
+ if (data & ~PHY_AUTO_NEG_802_3)
+ data |= PHY_REMOTE_ACKNOWLEDGE_NOT;
+ break;
+ default:
+ processed = false;
+ break;
+ }
+ if (processed)
+ *val = data;
+}
+
+static void ksz8795_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+{
+ u8 p = phy;
+ u8 restart, speed, ctrl, data;
+
+ switch (reg) {
+ case PHY_REG_CTRL:
+
+ /* Do not support PHY reset function. */
+ if (val & PHY_RESET)
+ break;
+ ksz_pread8(dev, p, P_SPEED_STATUS, &speed);
+ data = speed;
+ if (val & PHY_HP_MDIX)
+ data |= PORT_HP_MDIX;
+ else
+ data &= ~PORT_HP_MDIX;
+ if (data != speed)
+ ksz_pwrite8(dev, p, P_SPEED_STATUS, data);
+ ksz_pread8(dev, p, P_FORCE_CTRL, &ctrl);
+ data = ctrl;
+ if (!(val & PHY_AUTO_NEG_ENABLE))
+ data |= PORT_AUTO_NEG_DISABLE;
+ else
+ data &= ~PORT_AUTO_NEG_DISABLE;
+
+ /* Fiber port does not support auto-negotiation. */
+ if (dev->ports[p].fiber)
+ data |= PORT_AUTO_NEG_DISABLE;
+ if (val & PHY_SPEED_100MBIT)
+ data |= PORT_FORCE_100_MBIT;
+ else
+ data &= ~PORT_FORCE_100_MBIT;
+ if (val & PHY_FULL_DUPLEX)
+ data |= PORT_FORCE_FULL_DUPLEX;
+ else
+ data &= ~PORT_FORCE_FULL_DUPLEX;
+ if (data != ctrl)
+ ksz_pwrite8(dev, p, P_FORCE_CTRL, data);
+ ksz_pread8(dev, p, P_NEG_RESTART_CTRL, &restart);
+ data = restart;
+ if (val & PHY_LED_DISABLE)
+ data |= PORT_LED_OFF;
+ else
+ data &= ~PORT_LED_OFF;
+ if (val & PHY_TRANSMIT_DISABLE)
+ data |= PORT_TX_DISABLE;
+ else
+ data &= ~PORT_TX_DISABLE;
+ if (val & PHY_AUTO_NEG_RESTART)
+ data |= PORT_AUTO_NEG_RESTART;
+ else
+ data &= ~(PORT_AUTO_NEG_RESTART);
+ if (val & PHY_POWER_DOWN)
+ data |= PORT_POWER_DOWN;
+ else
+ data &= ~PORT_POWER_DOWN;
+ if (val & PHY_AUTO_MDIX_DISABLE)
+ data |= PORT_AUTO_MDIX_DISABLE;
+ else
+ data &= ~PORT_AUTO_MDIX_DISABLE;
+ if (val & PHY_FORCE_MDIX)
+ data |= PORT_FORCE_MDIX;
+ else
+ data &= ~PORT_FORCE_MDIX;
+ if (val & PHY_LOOPBACK)
+ data |= PORT_PHY_LOOPBACK;
+ else
+ data &= ~PORT_PHY_LOOPBACK;
+ if (data != restart)
+ ksz_pwrite8(dev, p, P_NEG_RESTART_CTRL, data);
+ break;
+ case PHY_REG_AUTO_NEGOTIATION:
+ ksz_pread8(dev, p, P_LOCAL_CTRL, &ctrl);
+ data = ctrl;
+ data &= ~(PORT_AUTO_NEG_SYM_PAUSE |
+ PORT_AUTO_NEG_100BTX_FD |
+ PORT_AUTO_NEG_100BTX |
+ PORT_AUTO_NEG_10BT_FD |
+ PORT_AUTO_NEG_10BT);
+ if (val & PHY_AUTO_NEG_SYM_PAUSE)
+ data |= PORT_AUTO_NEG_SYM_PAUSE;
+ if (val & PHY_AUTO_NEG_100BTX_FD)
+ data |= PORT_AUTO_NEG_100BTX_FD;
+ if (val & PHY_AUTO_NEG_100BTX)
+ data |= PORT_AUTO_NEG_100BTX;
+ if (val & PHY_AUTO_NEG_10BT_FD)
+ data |= PORT_AUTO_NEG_10BT_FD;
+ if (val & PHY_AUTO_NEG_10BT)
+ data |= PORT_AUTO_NEG_10BT;
+ if (data != ctrl)
+ ksz_pwrite8(dev, p, P_LOCAL_CTRL, data);
+ break;
+ default:
+ break;
+ }
+}
+
+static enum dsa_tag_protocol ksz8795_get_tag_protocol(struct dsa_switch *ds,
+ int port)
+{
+ return DSA_TAG_PROTO_KSZ8795;
+}
+
+static void ksz8795_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf)
+{
+ int i;
+
+ for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
+ memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string,
+ ETH_GSTRING_LEN);
+ }
+}
+
+static void ksz8795_cfg_port_member(struct ksz_device *dev, int port,
+ u8 member)
+{
+ u8 data;
+
+ ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
+ data &= ~PORT_VLAN_MEMBERSHIP;
+ data |= (member & dev->port_mask);
+ ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
+ dev->ports[port].member = member;
+}
+
+static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct ksz_device *dev = ds->priv;
+ int forward = dev->member;
+ struct ksz_port *p;
+ int member = -1;
+ u8 data;
+
+ p = &dev->ports[port];
+
+ ksz_pread8(dev, port, P_STP_CTRL, &data);
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ data |= PORT_LEARN_DISABLE;
+ if (port < SWITCH_PORT_NUM)
+ member = 0;
+ break;
+ case BR_STATE_LISTENING:
+ data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ if (port < SWITCH_PORT_NUM &&
+ p->stp_state == BR_STATE_DISABLED)
+ member = dev->host_mask | p->vid_member;
+ break;
+ case BR_STATE_LEARNING:
+ data |= PORT_RX_ENABLE;
+ break;
+ case BR_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+
+ /* This function is also used internally. */
+ if (port == dev->cpu_port)
+ break;
+
+ /* Port is a member of a bridge. */
+ if (dev->br_member & BIT(port)) {
+ dev->member |= BIT(port);
+ member = dev->member;
+ } else {
+ member = dev->host_mask | p->vid_member;
+ }
+ break;
+ case BR_STATE_BLOCKING:
+ data |= PORT_LEARN_DISABLE;
+ if (port < SWITCH_PORT_NUM &&
+ p->stp_state == BR_STATE_DISABLED)
+ member = dev->host_mask | p->vid_member;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ ksz_pwrite8(dev, port, P_STP_CTRL, data);
+ p->stp_state = state;
+ if (data & PORT_RX_ENABLE)
+ dev->rx_ports |= BIT(port);
+ else
+ dev->rx_ports &= ~BIT(port);
+ if (data & PORT_TX_ENABLE)
+ dev->tx_ports |= BIT(port);
+ else
+ dev->tx_ports &= ~BIT(port);
+
+ /* Port membership may share register with STP state. */
+ if (member >= 0 && member != p->member)
+ ksz8795_cfg_port_member(dev, port, (u8)member);
+
+ /* Check if forwarding needs to be updated. */
+ if (state != BR_STATE_FORWARDING) {
+ if (dev->br_member & BIT(port))
+ dev->member &= ~BIT(port);
+ }
+
+ /* When topology has changed the function ksz_update_port_member
+ * should be called to modify port forwarding behavior.
+ */
+ if (forward != dev->member)
+ ksz_update_port_member(dev, port);
+}
+
+static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port)
+{
+ u8 learn[TOTAL_PORT_NUM];
+ int first, index, cnt;
+ struct ksz_port *p;
+
+ if ((uint)port < TOTAL_PORT_NUM) {
+ first = port;
+ cnt = port + 1;
+ } else {
+ /* Flush all ports. */
+ first = 0;
+ cnt = dev->mib_port_cnt;
+ }
+ for (index = first; index < cnt; index++) {
+ p = &dev->ports[index];
+ if (!p->on)
+ continue;
+ ksz_pread8(dev, index, P_STP_CTRL, &learn[index]);
+ if (!(learn[index] & PORT_LEARN_DISABLE))
+ ksz_pwrite8(dev, index, P_STP_CTRL,
+ learn[index] | PORT_LEARN_DISABLE);
+ }
+ ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
+ for (index = first; index < cnt; index++) {
+ p = &dev->ports[index];
+ if (!p->on)
+ continue;
+ if (!(learn[index] & PORT_LEARN_DISABLE))
+ ksz_pwrite8(dev, index, P_STP_CTRL, learn[index]);
+ }
+}
+
+static int ksz8795_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool flag)
+{
+ struct ksz_device *dev = ds->priv;
+
+ ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
+
+ return 0;
+}
+
+static void ksz8795_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct ksz_device *dev = ds->priv;
+ u16 data, vid, new_pvid = 0;
+ u8 fid, member, valid;
+
+ ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ ksz8795_r_vlan_table(dev, vid, &data);
+ ksz8795_from_vlan(data, &fid, &member, &valid);
+
+ /* First time to setup the VLAN entry. */
+ if (!valid) {
+ /* Need to find a way to map VID to FID. */
+ fid = 1;
+ valid = 1;
+ }
+ member |= BIT(port);
+
+ ksz8795_to_vlan(fid, member, valid, &data);
+ ksz8795_w_vlan_table(dev, vid, data);
+
+ /* change PVID */
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ new_pvid = vid;
+ }
+
+ if (new_pvid) {
+ ksz_pread16(dev, port, REG_PORT_CTRL_VID, &vid);
+ vid &= 0xfff;
+ vid |= new_pvid;
+ ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, vid);
+ }
+}
+
+static int ksz8795_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct ksz_device *dev = ds->priv;
+ u16 data, vid, pvid, new_pvid = 0;
+ u8 fid, member, valid;
+
+ ksz_pread16(dev, port, REG_PORT_CTRL_VID, &pvid);
+ pvid = pvid & 0xFFF;
+
+ ksz_port_cfg(dev, port, P_TAG_CTRL, PORT_REMOVE_TAG, untagged);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ ksz8795_r_vlan_table(dev, vid, &data);
+ ksz8795_from_vlan(data, &fid, &member, &valid);
+
+ member &= ~BIT(port);
+
+ /* Invalidate the entry if no more member. */
+ if (!member) {
+ fid = 0;
+ valid = 0;
+ }
+
+ if (pvid == vid)
+ new_pvid = 1;
+
+ ksz8795_to_vlan(fid, member, valid, &data);
+ ksz8795_w_vlan_table(dev, vid, data);
+ }
+
+ if (new_pvid != pvid)
+ ksz_pwrite16(dev, port, REG_PORT_CTRL_VID, pvid);
+
+ return 0;
+}
+
+static int ksz8795_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (ingress) {
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
+ dev->mirror_rx |= BIT(port);
+ } else {
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
+ dev->mirror_tx |= BIT(port);
+ }
+
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
+
+ /* configure mirror port */
+ if (dev->mirror_rx || dev->mirror_tx)
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, true);
+
+ return 0;
+}
+
+static void ksz8795_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ksz_device *dev = ds->priv;
+ u8 data;
+
+ if (mirror->ingress) {
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
+ dev->mirror_rx &= ~BIT(port);
+ } else {
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
+ dev->mirror_tx &= ~BIT(port);
+ }
+
+ ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
+
+ if (!dev->mirror_rx && !dev->mirror_tx)
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, false);
+}
+
+static void ksz8795_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+{
+ struct ksz_port *p = &dev->ports[port];
+ u8 data8, member;
+
+ /* enable broadcast storm limit */
+ ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
+
+ ksz8795_set_prio_queue(dev, port, 4);
+
+ /* disable DiffServ priority */
+ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_ENABLE, false);
+
+ /* replace priority */
+ ksz_port_cfg(dev, port, P_802_1P_CTRL, PORT_802_1P_REMAPPING, false);
+
+ /* enable 802.1p priority */
+ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true);
+
+ if (cpu_port) {
+ /* Configure MII interface for proper network communication. */
+ ksz_read8(dev, REG_PORT_5_CTRL_6, &data8);
+ data8 &= ~PORT_INTERFACE_TYPE;
+ data8 &= ~PORT_GMII_1GPS_MODE;
+ switch (dev->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ p->phydev.speed = SPEED_100;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ data8 |= PORT_INTERFACE_RMII;
+ p->phydev.speed = SPEED_100;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ data8 |= PORT_GMII_1GPS_MODE;
+ data8 |= PORT_INTERFACE_GMII;
+ p->phydev.speed = SPEED_1000;
+ break;
+ default:
+ data8 &= ~PORT_RGMII_ID_IN_ENABLE;
+ data8 &= ~PORT_RGMII_ID_OUT_ENABLE;
+ if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ dev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ data8 |= PORT_RGMII_ID_IN_ENABLE;
+ if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ dev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ data8 |= PORT_RGMII_ID_OUT_ENABLE;
+ data8 |= PORT_GMII_1GPS_MODE;
+ data8 |= PORT_INTERFACE_RGMII;
+ p->phydev.speed = SPEED_1000;
+ break;
+ }
+ ksz_write8(dev, REG_PORT_5_CTRL_6, data8);
+ p->phydev.duplex = 1;
+
+ member = dev->port_mask;
+ dev->on_ports = dev->host_mask;
+ dev->live_ports = dev->host_mask;
+ } else {
+ member = dev->host_mask | p->vid_member;
+ dev->on_ports |= BIT(port);
+
+ /* Link was detected before port is enabled. */
+ if (p->phydev.link)
+ dev->live_ports |= BIT(port);
+ }
+ ksz8795_cfg_port_member(dev, port, member);
+}
+
+static void ksz8795_config_cpu_port(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
+ u8 remote;
+ int i;
+
+ ds->num_ports = dev->port_cnt + 1;
+
+ /* Switch marks the maximum frame with extra byte as oversize. */
+ ksz_cfg(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, true);
+ ksz_cfg(dev, S_TAIL_TAG_CTRL, SW_TAIL_TAG_ENABLE, true);
+
+ p = &dev->ports[dev->cpu_port];
+ p->vid_member = dev->port_mask;
+ p->on = 1;
+
+ ksz8795_port_setup(dev, dev->cpu_port, true);
+ dev->member = dev->host_mask;
+
+ for (i = 0; i < SWITCH_PORT_NUM; i++) {
+ p = &dev->ports[i];
+
+ /* Initialize to non-zero so that ksz_cfg_port_member() will
+ * be called.
+ */
+ p->vid_member = BIT(i);
+ p->member = dev->port_mask;
+ ksz8795_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+
+ /* Last port may be disabled. */
+ if (i == dev->port_cnt)
+ break;
+ p->on = 1;
+ p->phy = 1;
+ }
+ for (i = 0; i < dev->phy_port_cnt; i++) {
+ p = &dev->ports[i];
+ if (!p->on)
+ continue;
+ ksz_pread8(dev, i, P_REMOTE_STATUS, &remote);
+ if (remote & PORT_FIBER_MODE)
+ p->fiber = 1;
+ if (p->fiber)
+ ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL,
+ true);
+ else
+ ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL,
+ false);
+ }
+}
+
+static int ksz8795_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct alu_struct alu;
+ int i, ret = 0;
+
+ dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
+ dev->num_vlans, GFP_KERNEL);
+ if (!dev->vlan_cache)
+ return -ENOMEM;
+
+ ret = ksz8795_reset_switch(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+ return ret;
+ }
+
+ ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_FLOW_CTRL, true);
+
+ /* Enable automatic fast aging when link changed detected. */
+ ksz_cfg(dev, S_LINK_AGING_CTRL, SW_LINK_AUTO_AGING, true);
+
+ /* Enable aggressive back off algorithm in half duplex mode. */
+ regmap_update_bits(dev->regmap[0], REG_SW_CTRL_1,
+ SW_AGGR_BACKOFF, SW_AGGR_BACKOFF);
+
+ /*
+ * Make sure unicast VLAN boundary is set as default and
+ * enable no excessive collision drop.
+ */
+ regmap_update_bits(dev->regmap[0], REG_SW_CTRL_2,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
+ UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
+
+ ksz8795_config_cpu_port(ds);
+
+ ksz_cfg(dev, REG_SW_CTRL_2, MULTICAST_STORM_DISABLE, true);
+
+ ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
+
+ ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
+
+ /* set broadcast storm protection 10% rate */
+ regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL,
+ BROADCAST_STORM_RATE,
+ (BROADCAST_STORM_VALUE *
+ BROADCAST_STORM_PROT_RATE) / 100);
+
+ for (i = 0; i < VLAN_TABLE_ENTRIES; i++)
+ ksz8795_r_vlan_entries(dev, i);
+
+ /* Setup STP address for STP operation. */
+ memset(&alu, 0, sizeof(alu));
+ ether_addr_copy(alu.mac, eth_stp_addr);
+ alu.is_static = true;
+ alu.is_override = true;
+ alu.port_forward = dev->host_mask;
+
+ ksz8795_w_sta_mac_table(dev, 0, &alu);
+
+ ksz_init_mib_timer(dev);
+
+ return 0;
+}
+
+static const struct dsa_switch_ops ksz8795_switch_ops = {
+ .get_tag_protocol = ksz8795_get_tag_protocol,
+ .setup = ksz8795_setup,
+ .phy_read = ksz_phy_read16,
+ .phy_write = ksz_phy_write16,
+ .adjust_link = ksz_adjust_link,
+ .port_enable = ksz_enable_port,
+ .port_disable = ksz_disable_port,
+ .get_strings = ksz8795_get_strings,
+ .get_ethtool_stats = ksz_get_ethtool_stats,
+ .get_sset_count = ksz_sset_count,
+ .port_bridge_join = ksz_port_bridge_join,
+ .port_bridge_leave = ksz_port_bridge_leave,
+ .port_stp_state_set = ksz8795_port_stp_state_set,
+ .port_fast_age = ksz_port_fast_age,
+ .port_vlan_filtering = ksz8795_port_vlan_filtering,
+ .port_vlan_prepare = ksz_port_vlan_prepare,
+ .port_vlan_add = ksz8795_port_vlan_add,
+ .port_vlan_del = ksz8795_port_vlan_del,
+ .port_fdb_dump = ksz_port_fdb_dump,
+ .port_mdb_prepare = ksz_port_mdb_prepare,
+ .port_mdb_add = ksz_port_mdb_add,
+ .port_mdb_del = ksz_port_mdb_del,
+ .port_mirror_add = ksz8795_port_mirror_add,
+ .port_mirror_del = ksz8795_port_mirror_del,
+};
+
+static u32 ksz8795_get_port_addr(int port, int offset)
+{
+ return PORT_CTRL_ADDR(port, offset);
+}
+
+static int ksz8795_switch_detect(struct ksz_device *dev)
+{
+ u8 id1, id2;
+ u16 id16;
+ int ret;
+
+ /* read chip id */
+ ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
+ if (ret)
+ return ret;
+
+ id1 = id16 >> 8;
+ id2 = id16 & SW_CHIP_ID_M;
+ if (id1 != FAMILY_ID ||
+ (id2 != CHIP_ID_94 && id2 != CHIP_ID_95))
+ return -ENODEV;
+
+ dev->mib_port_cnt = TOTAL_PORT_NUM;
+ dev->phy_port_cnt = SWITCH_PORT_NUM;
+ dev->port_cnt = SWITCH_PORT_NUM;
+
+ if (id2 == CHIP_ID_95) {
+ u8 val;
+
+ id2 = 0x95;
+ ksz_read8(dev, REG_PORT_1_STATUS_0, &val);
+ if (val & PORT_FIBER_MODE)
+ id2 = 0x65;
+ } else if (id2 == CHIP_ID_94) {
+ dev->port_cnt--;
+ dev->last_port = dev->port_cnt;
+ id2 = 0x94;
+ }
+ id16 &= ~0xff;
+ id16 |= id2;
+ dev->chip_id = id16;
+
+ dev->cpu_port = dev->mib_port_cnt - 1;
+ dev->host_mask = BIT(dev->cpu_port);
+
+ return 0;
+}
+
+struct ksz_chip_data {
+ u16 chip_id;
+ const char *dev_name;
+ int num_vlans;
+ int num_alus;
+ int num_statics;
+ int cpu_ports;
+ int port_cnt;
+};
+
+static const struct ksz_chip_data ksz8795_switch_chips[] = {
+ {
+ .chip_id = 0x8795,
+ .dev_name = "KSZ8795",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 4, /* total physical port count */
+ },
+ {
+ .chip_id = 0x8794,
+ .dev_name = "KSZ8794",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 3, /* total physical port count */
+ },
+ {
+ .chip_id = 0x8765,
+ .dev_name = "KSZ8765",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 4, /* total physical port count */
+ },
+};
+
+static int ksz8795_switch_init(struct ksz_device *dev)
+{
+ int i;
+
+ mutex_init(&dev->stats_mutex);
+ mutex_init(&dev->alu_mutex);
+ mutex_init(&dev->vlan_mutex);
+
+ dev->ds->ops = &ksz8795_switch_ops;
+
+ for (i = 0; i < ARRAY_SIZE(ksz8795_switch_chips); i++) {
+ const struct ksz_chip_data *chip = &ksz8795_switch_chips[i];
+
+ if (dev->chip_id == chip->chip_id) {
+ dev->name = chip->dev_name;
+ dev->num_vlans = chip->num_vlans;
+ dev->num_alus = chip->num_alus;
+ dev->num_statics = chip->num_statics;
+ dev->port_cnt = chip->port_cnt;
+ dev->cpu_ports = chip->cpu_ports;
+
+ break;
+ }
+ }
+
+ /* no switch found */
+ if (!dev->cpu_ports)
+ return -ENODEV;
+
+ dev->port_mask = BIT(dev->port_cnt) - 1;
+ dev->port_mask |= dev->host_mask;
+
+ dev->reg_mib_cnt = SWITCH_COUNTER_NUM;
+ dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM;
+
+ i = dev->mib_port_cnt;
+ dev->ports = devm_kzalloc(dev->dev, sizeof(struct ksz_port) * i,
+ GFP_KERNEL);
+ if (!dev->ports)
+ return -ENOMEM;
+ for (i = 0; i < dev->mib_port_cnt; i++) {
+ mutex_init(&dev->ports[i].mib.cnt_mutex);
+ dev->ports[i].mib.counters =
+ devm_kzalloc(dev->dev,
+ sizeof(u64) *
+ (TOTAL_SWITCH_COUNTER_NUM + 1),
+ GFP_KERNEL);
+ if (!dev->ports[i].mib.counters)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ksz8795_switch_exit(struct ksz_device *dev)
+{
+ ksz8795_reset_switch(dev);
+}
+
+static const struct ksz_dev_ops ksz8795_dev_ops = {
+ .get_port_addr = ksz8795_get_port_addr,
+ .cfg_port_member = ksz8795_cfg_port_member,
+ .flush_dyn_mac_table = ksz8795_flush_dyn_mac_table,
+ .port_setup = ksz8795_port_setup,
+ .r_phy = ksz8795_r_phy,
+ .w_phy = ksz8795_w_phy,
+ .r_dyn_mac_table = ksz8795_r_dyn_mac_table,
+ .r_sta_mac_table = ksz8795_r_sta_mac_table,
+ .w_sta_mac_table = ksz8795_w_sta_mac_table,
+ .r_mib_cnt = ksz8795_r_mib_cnt,
+ .r_mib_pkt = ksz8795_r_mib_pkt,
+ .freeze_mib = ksz8795_freeze_mib,
+ .port_init_cnt = ksz8795_port_init_cnt,
+ .shutdown = ksz8795_reset_switch,
+ .detect = ksz8795_switch_detect,
+ .init = ksz8795_switch_init,
+ .exit = ksz8795_switch_exit,
+};
+
+int ksz8795_switch_register(struct ksz_device *dev)
+{
+ return ksz_switch_register(dev, &ksz8795_dev_ops);
+}
+EXPORT_SYMBOL(ksz8795_switch_register);
+
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
new file mode 100644
index 000000000000..3a50462df8fa
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -0,0 +1,1004 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Microchip KSZ8795 register definitions
+ *
+ * Copyright (c) 2017 Microchip Technology Inc.
+ * Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#ifndef __KSZ8795_REG_H
+#define __KSZ8795_REG_H
+
+#define KS_PORT_M 0x1F
+
+#define KS_PRIO_M 0x3
+#define KS_PRIO_S 2
+
+#define REG_CHIP_ID0 0x00
+
+#define FAMILY_ID 0x87
+
+#define REG_CHIP_ID1 0x01
+
+#define SW_CHIP_ID_M 0xF0
+#define SW_CHIP_ID_S 4
+#define SW_REVISION_M 0x0E
+#define SW_REVISION_S 1
+#define SW_START 0x01
+
+#define CHIP_ID_94 0x60
+#define CHIP_ID_95 0x90
+
+#define REG_SW_CTRL_0 0x02
+
+#define SW_NEW_BACKOFF BIT(7)
+#define SW_GLOBAL_RESET BIT(6)
+#define SW_FLUSH_DYN_MAC_TABLE BIT(5)
+#define SW_FLUSH_STA_MAC_TABLE BIT(4)
+#define SW_LINK_AUTO_AGING BIT(0)
+
+#define REG_SW_CTRL_1 0x03
+
+#define SW_HUGE_PACKET BIT(6)
+#define SW_TX_FLOW_CTRL_DISABLE BIT(5)
+#define SW_RX_FLOW_CTRL_DISABLE BIT(4)
+#define SW_CHECK_LENGTH BIT(3)
+#define SW_AGING_ENABLE BIT(2)
+#define SW_FAST_AGING BIT(1)
+#define SW_AGGR_BACKOFF BIT(0)
+
+#define REG_SW_CTRL_2 0x04
+
+#define UNICAST_VLAN_BOUNDARY BIT(7)
+#define MULTICAST_STORM_DISABLE BIT(6)
+#define SW_BACK_PRESSURE BIT(5)
+#define FAIR_FLOW_CTRL BIT(4)
+#define NO_EXC_COLLISION_DROP BIT(3)
+#define SW_LEGAL_PACKET_DISABLE BIT(1)
+
+#define REG_SW_CTRL_3 0x05
+ #define WEIGHTED_FAIR_QUEUE_ENABLE BIT(3)
+
+#define SW_VLAN_ENABLE BIT(7)
+#define SW_IGMP_SNOOP BIT(6)
+#define SW_MIRROR_RX_TX BIT(0)
+
+#define REG_SW_CTRL_4 0x06
+
+#define SW_HALF_DUPLEX_FLOW_CTRL BIT(7)
+#define SW_HALF_DUPLEX BIT(6)
+#define SW_FLOW_CTRL BIT(5)
+#define SW_10_MBIT BIT(4)
+#define SW_REPLACE_VID BIT(3)
+#define BROADCAST_STORM_RATE_HI 0x07
+
+#define REG_SW_CTRL_5 0x07
+
+#define BROADCAST_STORM_RATE_LO 0xFF
+#define BROADCAST_STORM_RATE 0x07FF
+
+#define REG_SW_CTRL_6 0x08
+
+#define SW_MIB_COUNTER_FLUSH BIT(7)
+#define SW_MIB_COUNTER_FREEZE BIT(6)
+#define SW_MIB_COUNTER_CTRL_ENABLE KS_PORT_M
+
+#define REG_SW_CTRL_9 0x0B
+
+#define SPI_CLK_125_MHZ 0x80
+#define SPI_CLK_62_5_MHZ 0x40
+#define SPI_CLK_31_25_MHZ 0x00
+
+#define SW_LED_MODE_M 0x3
+#define SW_LED_MODE_S 4
+#define SW_LED_LINK_ACT_SPEED 0
+#define SW_LED_LINK_ACT 1
+#define SW_LED_LINK_ACT_DUPLEX 2
+#define SW_LED_LINK_DUPLEX 3
+
+#define REG_SW_CTRL_10 0x0C
+
+#define SW_TAIL_TAG_ENABLE BIT(1)
+#define SW_PASS_PAUSE BIT(0)
+
+#define REG_SW_CTRL_11 0x0D
+
+#define REG_POWER_MANAGEMENT_1 0x0E
+
+#define SW_PLL_POWER_DOWN BIT(5)
+#define SW_POWER_MANAGEMENT_MODE_M 0x3
+#define SW_POWER_MANAGEMENT_MODE_S 3
+#define SW_POWER_NORMAL 0
+#define SW_ENERGY_DETECTION 1
+#define SW_SOFTWARE_POWER_DOWN 2
+
+#define REG_POWER_MANAGEMENT_2 0x0F
+
+#define REG_PORT_1_CTRL_0 0x10
+#define REG_PORT_2_CTRL_0 0x20
+#define REG_PORT_3_CTRL_0 0x30
+#define REG_PORT_4_CTRL_0 0x40
+#define REG_PORT_5_CTRL_0 0x50
+
+#define PORT_BROADCAST_STORM BIT(7)
+#define PORT_DIFFSERV_ENABLE BIT(6)
+#define PORT_802_1P_ENABLE BIT(5)
+#define PORT_BASED_PRIO_S 3
+#define PORT_BASED_PRIO_M KS_PRIO_M
+#define PORT_BASED_PRIO_0 0
+#define PORT_BASED_PRIO_1 1
+#define PORT_BASED_PRIO_2 2
+#define PORT_BASED_PRIO_3 3
+#define PORT_INSERT_TAG BIT(2)
+#define PORT_REMOVE_TAG BIT(1)
+#define PORT_QUEUE_SPLIT_L BIT(0)
+
+#define REG_PORT_1_CTRL_1 0x11
+#define REG_PORT_2_CTRL_1 0x21
+#define REG_PORT_3_CTRL_1 0x31
+#define REG_PORT_4_CTRL_1 0x41
+#define REG_PORT_5_CTRL_1 0x51
+
+#define PORT_MIRROR_SNIFFER BIT(7)
+#define PORT_MIRROR_RX BIT(6)
+#define PORT_MIRROR_TX BIT(5)
+#define PORT_VLAN_MEMBERSHIP KS_PORT_M
+
+#define REG_PORT_1_CTRL_2 0x12
+#define REG_PORT_2_CTRL_2 0x22
+#define REG_PORT_3_CTRL_2 0x32
+#define REG_PORT_4_CTRL_2 0x42
+#define REG_PORT_5_CTRL_2 0x52
+
+#define PORT_802_1P_REMAPPING BIT(7)
+#define PORT_INGRESS_FILTER BIT(6)
+#define PORT_DISCARD_NON_VID BIT(5)
+#define PORT_FORCE_FLOW_CTRL BIT(4)
+#define PORT_BACK_PRESSURE BIT(3)
+#define PORT_TX_ENABLE BIT(2)
+#define PORT_RX_ENABLE BIT(1)
+#define PORT_LEARN_DISABLE BIT(0)
+
+#define REG_PORT_1_CTRL_3 0x13
+#define REG_PORT_2_CTRL_3 0x23
+#define REG_PORT_3_CTRL_3 0x33
+#define REG_PORT_4_CTRL_3 0x43
+#define REG_PORT_5_CTRL_3 0x53
+#define REG_PORT_1_CTRL_4 0x14
+#define REG_PORT_2_CTRL_4 0x24
+#define REG_PORT_3_CTRL_4 0x34
+#define REG_PORT_4_CTRL_4 0x44
+#define REG_PORT_5_CTRL_4 0x54
+
+#define PORT_DEFAULT_VID 0x0001
+
+#define REG_PORT_1_CTRL_5 0x15
+#define REG_PORT_2_CTRL_5 0x25
+#define REG_PORT_3_CTRL_5 0x35
+#define REG_PORT_4_CTRL_5 0x45
+#define REG_PORT_5_CTRL_5 0x55
+
+#define PORT_ACL_ENABLE BIT(2)
+#define PORT_AUTHEN_MODE 0x3
+#define PORT_AUTHEN_PASS 0
+#define PORT_AUTHEN_BLOCK 1
+#define PORT_AUTHEN_TRAP 2
+
+#define REG_PORT_5_CTRL_6 0x56
+
+#define PORT_MII_INTERNAL_CLOCK BIT(7)
+#define PORT_GMII_1GPS_MODE BIT(6)
+#define PORT_RGMII_ID_IN_ENABLE BIT(4)
+#define PORT_RGMII_ID_OUT_ENABLE BIT(3)
+#define PORT_GMII_MAC_MODE BIT(2)
+#define PORT_INTERFACE_TYPE 0x3
+#define PORT_INTERFACE_MII 0
+#define PORT_INTERFACE_RMII 1
+#define PORT_INTERFACE_GMII 2
+#define PORT_INTERFACE_RGMII 3
+
+#define REG_PORT_1_CTRL_7 0x17
+#define REG_PORT_2_CTRL_7 0x27
+#define REG_PORT_3_CTRL_7 0x37
+#define REG_PORT_4_CTRL_7 0x47
+
+#define PORT_AUTO_NEG_ASYM_PAUSE BIT(5)
+#define PORT_AUTO_NEG_SYM_PAUSE BIT(4)
+#define PORT_AUTO_NEG_100BTX_FD BIT(3)
+#define PORT_AUTO_NEG_100BTX BIT(2)
+#define PORT_AUTO_NEG_10BT_FD BIT(1)
+#define PORT_AUTO_NEG_10BT BIT(0)
+
+#define REG_PORT_1_STATUS_0 0x18
+#define REG_PORT_2_STATUS_0 0x28
+#define REG_PORT_3_STATUS_0 0x38
+#define REG_PORT_4_STATUS_0 0x48
+
+/* For KSZ8765. */
+#define PORT_FIBER_MODE BIT(7)
+
+#define PORT_REMOTE_ASYM_PAUSE BIT(5)
+#define PORT_REMOTE_SYM_PAUSE BIT(4)
+#define PORT_REMOTE_100BTX_FD BIT(3)
+#define PORT_REMOTE_100BTX BIT(2)
+#define PORT_REMOTE_10BT_FD BIT(1)
+#define PORT_REMOTE_10BT BIT(0)
+
+#define REG_PORT_1_STATUS_1 0x19
+#define REG_PORT_2_STATUS_1 0x29
+#define REG_PORT_3_STATUS_1 0x39
+#define REG_PORT_4_STATUS_1 0x49
+
+#define PORT_HP_MDIX BIT(7)
+#define PORT_REVERSED_POLARITY BIT(5)
+#define PORT_TX_FLOW_CTRL BIT(4)
+#define PORT_RX_FLOW_CTRL BIT(3)
+#define PORT_STAT_SPEED_100MBIT BIT(2)
+#define PORT_STAT_FULL_DUPLEX BIT(1)
+
+#define PORT_REMOTE_FAULT BIT(0)
+
+#define REG_PORT_1_LINK_MD_CTRL 0x1A
+#define REG_PORT_2_LINK_MD_CTRL 0x2A
+#define REG_PORT_3_LINK_MD_CTRL 0x3A
+#define REG_PORT_4_LINK_MD_CTRL 0x4A
+
+#define PORT_CABLE_10M_SHORT BIT(7)
+#define PORT_CABLE_DIAG_RESULT_M 0x3
+#define PORT_CABLE_DIAG_RESULT_S 5
+#define PORT_CABLE_STAT_NORMAL 0
+#define PORT_CABLE_STAT_OPEN 1
+#define PORT_CABLE_STAT_SHORT 2
+#define PORT_CABLE_STAT_FAILED 3
+#define PORT_START_CABLE_DIAG BIT(4)
+#define PORT_FORCE_LINK BIT(3)
+#define PORT_POWER_SAVING BIT(2)
+#define PORT_PHY_REMOTE_LOOPBACK BIT(1)
+#define PORT_CABLE_FAULT_COUNTER_H 0x01
+
+#define REG_PORT_1_LINK_MD_RESULT 0x1B
+#define REG_PORT_2_LINK_MD_RESULT 0x2B
+#define REG_PORT_3_LINK_MD_RESULT 0x3B
+#define REG_PORT_4_LINK_MD_RESULT 0x4B
+
+#define PORT_CABLE_FAULT_COUNTER_L 0xFF
+#define PORT_CABLE_FAULT_COUNTER 0x1FF
+
+#define REG_PORT_1_CTRL_9 0x1C
+#define REG_PORT_2_CTRL_9 0x2C
+#define REG_PORT_3_CTRL_9 0x3C
+#define REG_PORT_4_CTRL_9 0x4C
+
+#define PORT_AUTO_NEG_DISABLE BIT(7)
+#define PORT_FORCE_100_MBIT BIT(6)
+#define PORT_FORCE_FULL_DUPLEX BIT(5)
+
+#define REG_PORT_1_CTRL_10 0x1D
+#define REG_PORT_2_CTRL_10 0x2D
+#define REG_PORT_3_CTRL_10 0x3D
+#define REG_PORT_4_CTRL_10 0x4D
+
+#define PORT_LED_OFF BIT(7)
+#define PORT_TX_DISABLE BIT(6)
+#define PORT_AUTO_NEG_RESTART BIT(5)
+#define PORT_POWER_DOWN BIT(3)
+#define PORT_AUTO_MDIX_DISABLE BIT(2)
+#define PORT_FORCE_MDIX BIT(1)
+#define PORT_MAC_LOOPBACK BIT(0)
+
+#define REG_PORT_1_STATUS_2 0x1E
+#define REG_PORT_2_STATUS_2 0x2E
+#define REG_PORT_3_STATUS_2 0x3E
+#define REG_PORT_4_STATUS_2 0x4E
+
+#define PORT_MDIX_STATUS BIT(7)
+#define PORT_AUTO_NEG_COMPLETE BIT(6)
+#define PORT_STAT_LINK_GOOD BIT(5)
+
+#define REG_PORT_1_STATUS_3 0x1F
+#define REG_PORT_2_STATUS_3 0x2F
+#define REG_PORT_3_STATUS_3 0x3F
+#define REG_PORT_4_STATUS_3 0x4F
+
+#define PORT_PHY_LOOPBACK BIT(7)
+#define PORT_PHY_ISOLATE BIT(5)
+#define PORT_PHY_SOFT_RESET BIT(4)
+#define PORT_PHY_FORCE_LINK BIT(3)
+#define PORT_PHY_MODE_M 0x7
+#define PHY_MODE_IN_AUTO_NEG 1
+#define PHY_MODE_10BT_HALF 2
+#define PHY_MODE_100BT_HALF 3
+#define PHY_MODE_10BT_FULL 5
+#define PHY_MODE_100BT_FULL 6
+#define PHY_MODE_ISOLDATE 7
+
+#define REG_PORT_CTRL_0 0x00
+#define REG_PORT_CTRL_1 0x01
+#define REG_PORT_CTRL_2 0x02
+#define REG_PORT_CTRL_VID 0x03
+
+#define REG_PORT_CTRL_5 0x05
+
+#define REG_PORT_CTRL_7 0x07
+#define REG_PORT_STATUS_0 0x08
+#define REG_PORT_STATUS_1 0x09
+#define REG_PORT_LINK_MD_CTRL 0x0A
+#define REG_PORT_LINK_MD_RESULT 0x0B
+#define REG_PORT_CTRL_9 0x0C
+#define REG_PORT_CTRL_10 0x0D
+#define REG_PORT_STATUS_2 0x0E
+#define REG_PORT_STATUS_3 0x0F
+
+#define REG_PORT_CTRL_12 0xA0
+#define REG_PORT_CTRL_13 0xA1
+#define REG_PORT_RATE_CTRL_3 0xA2
+#define REG_PORT_RATE_CTRL_2 0xA3
+#define REG_PORT_RATE_CTRL_1 0xA4
+#define REG_PORT_RATE_CTRL_0 0xA5
+#define REG_PORT_RATE_LIMIT 0xA6
+#define REG_PORT_IN_RATE_0 0xA7
+#define REG_PORT_IN_RATE_1 0xA8
+#define REG_PORT_IN_RATE_2 0xA9
+#define REG_PORT_IN_RATE_3 0xAA
+#define REG_PORT_OUT_RATE_0 0xAB
+#define REG_PORT_OUT_RATE_1 0xAC
+#define REG_PORT_OUT_RATE_2 0xAD
+#define REG_PORT_OUT_RATE_3 0xAE
+
+#define PORT_CTRL_ADDR(port, addr) \
+ ((addr) + REG_PORT_1_CTRL_0 + (port) * \
+ (REG_PORT_2_CTRL_0 - REG_PORT_1_CTRL_0))
+
+#define REG_SW_MAC_ADDR_0 0x68
+#define REG_SW_MAC_ADDR_1 0x69
+#define REG_SW_MAC_ADDR_2 0x6A
+#define REG_SW_MAC_ADDR_3 0x6B
+#define REG_SW_MAC_ADDR_4 0x6C
+#define REG_SW_MAC_ADDR_5 0x6D
+
+#define REG_IND_CTRL_0 0x6E
+
+#define TABLE_EXT_SELECT_S 5
+#define TABLE_EEE_V 1
+#define TABLE_ACL_V 2
+#define TABLE_PME_V 4
+#define TABLE_LINK_MD_V 5
+#define TABLE_EEE (TABLE_EEE_V << TABLE_EXT_SELECT_S)
+#define TABLE_ACL (TABLE_ACL_V << TABLE_EXT_SELECT_S)
+#define TABLE_PME (TABLE_PME_V << TABLE_EXT_SELECT_S)
+#define TABLE_LINK_MD (TABLE_LINK_MD << TABLE_EXT_SELECT_S)
+#define TABLE_READ BIT(4)
+#define TABLE_SELECT_S 2
+#define TABLE_STATIC_MAC_V 0
+#define TABLE_VLAN_V 1
+#define TABLE_DYNAMIC_MAC_V 2
+#define TABLE_MIB_V 3
+#define TABLE_STATIC_MAC (TABLE_STATIC_MAC_V << TABLE_SELECT_S)
+#define TABLE_VLAN (TABLE_VLAN_V << TABLE_SELECT_S)
+#define TABLE_DYNAMIC_MAC (TABLE_DYNAMIC_MAC_V << TABLE_SELECT_S)
+#define TABLE_MIB (TABLE_MIB_V << TABLE_SELECT_S)
+
+#define REG_IND_CTRL_1 0x6F
+
+#define TABLE_ENTRY_MASK 0x03FF
+#define TABLE_EXT_ENTRY_MASK 0x0FFF
+
+#define REG_IND_DATA_8 0x70
+#define REG_IND_DATA_7 0x71
+#define REG_IND_DATA_6 0x72
+#define REG_IND_DATA_5 0x73
+#define REG_IND_DATA_4 0x74
+#define REG_IND_DATA_3 0x75
+#define REG_IND_DATA_2 0x76
+#define REG_IND_DATA_1 0x77
+#define REG_IND_DATA_0 0x78
+
+#define REG_IND_DATA_PME_EEE_ACL 0xA0
+
+#define REG_IND_DATA_CHECK REG_IND_DATA_6
+#define REG_IND_MIB_CHECK REG_IND_DATA_4
+#define REG_IND_DATA_HI REG_IND_DATA_7
+#define REG_IND_DATA_LO REG_IND_DATA_3
+
+#define REG_INT_STATUS 0x7C
+#define REG_INT_ENABLE 0x7D
+
+#define INT_PME BIT(4)
+
+#define REG_ACL_INT_STATUS 0x7E
+#define REG_ACL_INT_ENABLE 0x7F
+
+#define INT_PORT_5 BIT(4)
+#define INT_PORT_4 BIT(3)
+#define INT_PORT_3 BIT(2)
+#define INT_PORT_2 BIT(1)
+#define INT_PORT_1 BIT(0)
+
+#define INT_PORT_ALL \
+ (INT_PORT_5 | INT_PORT_4 | INT_PORT_3 | INT_PORT_2 | INT_PORT_1)
+
+#define REG_SW_CTRL_12 0x80
+#define REG_SW_CTRL_13 0x81
+
+#define SWITCH_802_1P_MASK 3
+#define SWITCH_802_1P_BASE 3
+#define SWITCH_802_1P_SHIFT 2
+
+#define SW_802_1P_MAP_M KS_PRIO_M
+#define SW_802_1P_MAP_S KS_PRIO_S
+
+#define REG_SWITCH_CTRL_14 0x82
+
+#define SW_PRIO_MAPPING_M KS_PRIO_M
+#define SW_PRIO_MAPPING_S 6
+#define SW_PRIO_MAP_3_HI 0
+#define SW_PRIO_MAP_2_HI 2
+#define SW_PRIO_MAP_0_LO 3
+
+#define REG_SW_CTRL_15 0x83
+#define REG_SW_CTRL_16 0x84
+#define REG_SW_CTRL_17 0x85
+#define REG_SW_CTRL_18 0x86
+
+#define SW_SELF_ADDR_FILTER_ENABLE BIT(6)
+
+#define REG_SW_UNK_UCAST_CTRL 0x83
+#define REG_SW_UNK_MCAST_CTRL 0x84
+#define REG_SW_UNK_VID_CTRL 0x85
+#define REG_SW_UNK_IP_MCAST_CTRL 0x86
+
+#define SW_UNK_FWD_ENABLE BIT(5)
+#define SW_UNK_FWD_MAP KS_PORT_M
+
+#define REG_SW_CTRL_19 0x87
+
+#define SW_IN_RATE_LIMIT_PERIOD_M 0x3
+#define SW_IN_RATE_LIMIT_PERIOD_S 4
+#define SW_IN_RATE_LIMIT_16_MS 0
+#define SW_IN_RATE_LIMIT_64_MS 1
+#define SW_IN_RATE_LIMIT_256_MS 2
+#define SW_OUT_RATE_LIMIT_QUEUE_BASED BIT(3)
+#define SW_INS_TAG_ENABLE BIT(2)
+
+#define REG_TOS_PRIO_CTRL_0 0x90
+#define REG_TOS_PRIO_CTRL_1 0x91
+#define REG_TOS_PRIO_CTRL_2 0x92
+#define REG_TOS_PRIO_CTRL_3 0x93
+#define REG_TOS_PRIO_CTRL_4 0x94
+#define REG_TOS_PRIO_CTRL_5 0x95
+#define REG_TOS_PRIO_CTRL_6 0x96
+#define REG_TOS_PRIO_CTRL_7 0x97
+#define REG_TOS_PRIO_CTRL_8 0x98
+#define REG_TOS_PRIO_CTRL_9 0x99
+#define REG_TOS_PRIO_CTRL_10 0x9A
+#define REG_TOS_PRIO_CTRL_11 0x9B
+#define REG_TOS_PRIO_CTRL_12 0x9C
+#define REG_TOS_PRIO_CTRL_13 0x9D
+#define REG_TOS_PRIO_CTRL_14 0x9E
+#define REG_TOS_PRIO_CTRL_15 0x9F
+
+#define TOS_PRIO_M KS_PRIO_M
+#define TOS_PRIO_S KS_PRIO_S
+
+#define REG_SW_CTRL_20 0xA3
+
+#define SW_GMII_DRIVE_STRENGTH_S 4
+#define SW_DRIVE_STRENGTH_M 0x7
+#define SW_DRIVE_STRENGTH_2MA 0
+#define SW_DRIVE_STRENGTH_4MA 1
+#define SW_DRIVE_STRENGTH_8MA 2
+#define SW_DRIVE_STRENGTH_12MA 3
+#define SW_DRIVE_STRENGTH_16MA 4
+#define SW_DRIVE_STRENGTH_20MA 5
+#define SW_DRIVE_STRENGTH_24MA 6
+#define SW_DRIVE_STRENGTH_28MA 7
+#define SW_MII_DRIVE_STRENGTH_S 0
+
+#define REG_SW_CTRL_21 0xA4
+
+#define SW_IPV6_MLD_OPTION BIT(3)
+#define SW_IPV6_MLD_SNOOP BIT(2)
+
+#define REG_PORT_1_CTRL_12 0xB0
+#define REG_PORT_2_CTRL_12 0xC0
+#define REG_PORT_3_CTRL_12 0xD0
+#define REG_PORT_4_CTRL_12 0xE0
+#define REG_PORT_5_CTRL_12 0xF0
+
+#define PORT_PASS_ALL BIT(6)
+#define PORT_INS_TAG_FOR_PORT_5_S 3
+#define PORT_INS_TAG_FOR_PORT_5 BIT(3)
+#define PORT_INS_TAG_FOR_PORT_4 BIT(2)
+#define PORT_INS_TAG_FOR_PORT_3 BIT(1)
+#define PORT_INS_TAG_FOR_PORT_2 BIT(0)
+
+#define REG_PORT_1_CTRL_13 0xB1
+#define REG_PORT_2_CTRL_13 0xC1
+#define REG_PORT_3_CTRL_13 0xD1
+#define REG_PORT_4_CTRL_13 0xE1
+#define REG_PORT_5_CTRL_13 0xF1
+
+#define PORT_QUEUE_SPLIT_H BIT(1)
+#define PORT_QUEUE_SPLIT_1 0
+#define PORT_QUEUE_SPLIT_2 1
+#define PORT_QUEUE_SPLIT_4 2
+#define PORT_DROP_TAG BIT(0)
+
+#define REG_PORT_1_CTRL_14 0xB2
+#define REG_PORT_2_CTRL_14 0xC2
+#define REG_PORT_3_CTRL_14 0xD2
+#define REG_PORT_4_CTRL_14 0xE2
+#define REG_PORT_5_CTRL_14 0xF2
+#define REG_PORT_1_CTRL_15 0xB3
+#define REG_PORT_2_CTRL_15 0xC3
+#define REG_PORT_3_CTRL_15 0xD3
+#define REG_PORT_4_CTRL_15 0xE3
+#define REG_PORT_5_CTRL_15 0xF3
+#define REG_PORT_1_CTRL_16 0xB4
+#define REG_PORT_2_CTRL_16 0xC4
+#define REG_PORT_3_CTRL_16 0xD4
+#define REG_PORT_4_CTRL_16 0xE4
+#define REG_PORT_5_CTRL_16 0xF4
+#define REG_PORT_1_CTRL_17 0xB5
+#define REG_PORT_2_CTRL_17 0xC5
+#define REG_PORT_3_CTRL_17 0xD5
+#define REG_PORT_4_CTRL_17 0xE5
+#define REG_PORT_5_CTRL_17 0xF5
+
+#define REG_PORT_1_RATE_CTRL_3 0xB2
+#define REG_PORT_1_RATE_CTRL_2 0xB3
+#define REG_PORT_1_RATE_CTRL_1 0xB4
+#define REG_PORT_1_RATE_CTRL_0 0xB5
+#define REG_PORT_2_RATE_CTRL_3 0xC2
+#define REG_PORT_2_RATE_CTRL_2 0xC3
+#define REG_PORT_2_RATE_CTRL_1 0xC4
+#define REG_PORT_2_RATE_CTRL_0 0xC5
+#define REG_PORT_3_RATE_CTRL_3 0xD2
+#define REG_PORT_3_RATE_CTRL_2 0xD3
+#define REG_PORT_3_RATE_CTRL_1 0xD4
+#define REG_PORT_3_RATE_CTRL_0 0xD5
+#define REG_PORT_4_RATE_CTRL_3 0xE2
+#define REG_PORT_4_RATE_CTRL_2 0xE3
+#define REG_PORT_4_RATE_CTRL_1 0xE4
+#define REG_PORT_4_RATE_CTRL_0 0xE5
+#define REG_PORT_5_RATE_CTRL_3 0xF2
+#define REG_PORT_5_RATE_CTRL_2 0xF3
+#define REG_PORT_5_RATE_CTRL_1 0xF4
+#define REG_PORT_5_RATE_CTRL_0 0xF5
+
+#define RATE_CTRL_ENABLE BIT(7)
+#define RATE_RATIO_M (BIT(7) - 1)
+
+#define PORT_OUT_RATE_ENABLE BIT(7)
+
+#define REG_PORT_1_RATE_LIMIT 0xB6
+#define REG_PORT_2_RATE_LIMIT 0xC6
+#define REG_PORT_3_RATE_LIMIT 0xD6
+#define REG_PORT_4_RATE_LIMIT 0xE6
+#define REG_PORT_5_RATE_LIMIT 0xF6
+
+#define PORT_IN_PORT_BASED_S 6
+#define PORT_RATE_PACKET_BASED_S 5
+#define PORT_IN_FLOW_CTRL_S 4
+#define PORT_IN_LIMIT_MODE_M 0x3
+#define PORT_IN_LIMIT_MODE_S 2
+#define PORT_COUNT_IFG_S 1
+#define PORT_COUNT_PREAMBLE_S 0
+#define PORT_IN_PORT_BASED BIT(PORT_IN_PORT_BASED_S)
+#define PORT_RATE_PACKET_BASED BIT(PORT_RATE_PACKET_BASED_S)
+#define PORT_IN_FLOW_CTRL BIT(PORT_IN_FLOW_CTRL_S)
+#define PORT_IN_ALL 0
+#define PORT_IN_UNICAST 1
+#define PORT_IN_MULTICAST 2
+#define PORT_IN_BROADCAST 3
+#define PORT_COUNT_IFG BIT(PORT_COUNT_IFG_S)
+#define PORT_COUNT_PREAMBLE BIT(PORT_COUNT_PREAMBLE_S)
+
+#define REG_PORT_1_IN_RATE_0 0xB7
+#define REG_PORT_2_IN_RATE_0 0xC7
+#define REG_PORT_3_IN_RATE_0 0xD7
+#define REG_PORT_4_IN_RATE_0 0xE7
+#define REG_PORT_5_IN_RATE_0 0xF7
+#define REG_PORT_1_IN_RATE_1 0xB8
+#define REG_PORT_2_IN_RATE_1 0xC8
+#define REG_PORT_3_IN_RATE_1 0xD8
+#define REG_PORT_4_IN_RATE_1 0xE8
+#define REG_PORT_5_IN_RATE_1 0xF8
+#define REG_PORT_1_IN_RATE_2 0xB9
+#define REG_PORT_2_IN_RATE_2 0xC9
+#define REG_PORT_3_IN_RATE_2 0xD9
+#define REG_PORT_4_IN_RATE_2 0xE9
+#define REG_PORT_5_IN_RATE_2 0xF9
+#define REG_PORT_1_IN_RATE_3 0xBA
+#define REG_PORT_2_IN_RATE_3 0xCA
+#define REG_PORT_3_IN_RATE_3 0xDA
+#define REG_PORT_4_IN_RATE_3 0xEA
+#define REG_PORT_5_IN_RATE_3 0xFA
+
+#define PORT_IN_RATE_ENABLE BIT(7)
+#define PORT_RATE_LIMIT_M (BIT(7) - 1)
+
+#define REG_PORT_1_OUT_RATE_0 0xBB
+#define REG_PORT_2_OUT_RATE_0 0xCB
+#define REG_PORT_3_OUT_RATE_0 0xDB
+#define REG_PORT_4_OUT_RATE_0 0xEB
+#define REG_PORT_5_OUT_RATE_0 0xFB
+#define REG_PORT_1_OUT_RATE_1 0xBC
+#define REG_PORT_2_OUT_RATE_1 0xCC
+#define REG_PORT_3_OUT_RATE_1 0xDC
+#define REG_PORT_4_OUT_RATE_1 0xEC
+#define REG_PORT_5_OUT_RATE_1 0xFC
+#define REG_PORT_1_OUT_RATE_2 0xBD
+#define REG_PORT_2_OUT_RATE_2 0xCD
+#define REG_PORT_3_OUT_RATE_2 0xDD
+#define REG_PORT_4_OUT_RATE_2 0xED
+#define REG_PORT_5_OUT_RATE_2 0xFD
+#define REG_PORT_1_OUT_RATE_3 0xBE
+#define REG_PORT_2_OUT_RATE_3 0xCE
+#define REG_PORT_3_OUT_RATE_3 0xDE
+#define REG_PORT_4_OUT_RATE_3 0xEE
+#define REG_PORT_5_OUT_RATE_3 0xFE
+
+/* PME */
+
+#define SW_PME_OUTPUT_ENABLE BIT(1)
+#define SW_PME_ACTIVE_HIGH BIT(0)
+
+#define PORT_MAGIC_PACKET_DETECT BIT(2)
+#define PORT_LINK_UP_DETECT BIT(1)
+#define PORT_ENERGY_DETECT BIT(0)
+
+/* ACL */
+
+#define ACL_FIRST_RULE_M 0xF
+
+#define ACL_MODE_M 0x3
+#define ACL_MODE_S 4
+#define ACL_MODE_DISABLE 0
+#define ACL_MODE_LAYER_2 1
+#define ACL_MODE_LAYER_3 2
+#define ACL_MODE_LAYER_4 3
+#define ACL_ENABLE_M 0x3
+#define ACL_ENABLE_S 2
+#define ACL_ENABLE_2_COUNT 0
+#define ACL_ENABLE_2_TYPE 1
+#define ACL_ENABLE_2_MAC 2
+#define ACL_ENABLE_2_BOTH 3
+#define ACL_ENABLE_3_IP 1
+#define ACL_ENABLE_3_SRC_DST_COMP 2
+#define ACL_ENABLE_4_PROTOCOL 0
+#define ACL_ENABLE_4_TCP_PORT_COMP 1
+#define ACL_ENABLE_4_UDP_PORT_COMP 2
+#define ACL_ENABLE_4_TCP_SEQN_COMP 3
+#define ACL_SRC BIT(1)
+#define ACL_EQUAL BIT(0)
+
+#define ACL_MAX_PORT 0xFFFF
+
+#define ACL_MIN_PORT 0xFFFF
+#define ACL_IP_ADDR 0xFFFFFFFF
+#define ACL_TCP_SEQNUM 0xFFFFFFFF
+
+#define ACL_RESERVED 0xF8
+#define ACL_PORT_MODE_M 0x3
+#define ACL_PORT_MODE_S 1
+#define ACL_PORT_MODE_DISABLE 0
+#define ACL_PORT_MODE_EITHER 1
+#define ACL_PORT_MODE_IN_RANGE 2
+#define ACL_PORT_MODE_OUT_OF_RANGE 3
+
+#define ACL_TCP_FLAG_ENABLE BIT(0)
+
+#define ACL_TCP_FLAG_M 0xFF
+
+#define ACL_TCP_FLAG 0xFF
+#define ACL_ETH_TYPE 0xFFFF
+#define ACL_IP_M 0xFFFFFFFF
+
+#define ACL_PRIO_MODE_M 0x3
+#define ACL_PRIO_MODE_S 6
+#define ACL_PRIO_MODE_DISABLE 0
+#define ACL_PRIO_MODE_HIGHER 1
+#define ACL_PRIO_MODE_LOWER 2
+#define ACL_PRIO_MODE_REPLACE 3
+#define ACL_PRIO_M 0x7
+#define ACL_PRIO_S 3
+#define ACL_VLAN_PRIO_REPLACE BIT(2)
+#define ACL_VLAN_PRIO_M 0x7
+#define ACL_VLAN_PRIO_HI_M 0x3
+
+#define ACL_VLAN_PRIO_LO_M 0x8
+#define ACL_VLAN_PRIO_S 7
+#define ACL_MAP_MODE_M 0x3
+#define ACL_MAP_MODE_S 5
+#define ACL_MAP_MODE_DISABLE 0
+#define ACL_MAP_MODE_OR 1
+#define ACL_MAP_MODE_AND 2
+#define ACL_MAP_MODE_REPLACE 3
+#define ACL_MAP_PORT_M 0x1F
+
+#define ACL_CNT_M (BIT(11) - 1)
+#define ACL_CNT_S 5
+#define ACL_MSEC_UNIT BIT(4)
+#define ACL_INTR_MODE BIT(3)
+
+#define REG_PORT_ACL_BYTE_EN_MSB 0x10
+
+#define ACL_BYTE_EN_MSB_M 0x3F
+
+#define REG_PORT_ACL_BYTE_EN_LSB 0x11
+
+#define ACL_ACTION_START 0xA
+#define ACL_ACTION_LEN 2
+#define ACL_INTR_CNT_START 0xB
+#define ACL_RULESET_START 0xC
+#define ACL_RULESET_LEN 2
+#define ACL_TABLE_LEN 14
+
+#define ACL_ACTION_ENABLE 0x000C
+#define ACL_MATCH_ENABLE 0x1FF0
+#define ACL_RULESET_ENABLE 0x2003
+#define ACL_BYTE_ENABLE ((ACL_BYTE_EN_MSB_M << 8) | 0xFF)
+#define ACL_MODE_ENABLE (0x10 << 8)
+
+#define REG_PORT_ACL_CTRL_0 0x12
+
+#define PORT_ACL_WRITE_DONE BIT(6)
+#define PORT_ACL_READ_DONE BIT(5)
+#define PORT_ACL_WRITE BIT(4)
+#define PORT_ACL_INDEX_M 0xF
+
+#define REG_PORT_ACL_CTRL_1 0x13
+
+#define PORT_ACL_FORCE_DLR_MISS BIT(0)
+
+#ifndef PHY_REG_CTRL
+#define PHY_REG_CTRL 0
+
+#define PHY_RESET BIT(15)
+#define PHY_LOOPBACK BIT(14)
+#define PHY_SPEED_100MBIT BIT(13)
+#define PHY_AUTO_NEG_ENABLE BIT(12)
+#define PHY_POWER_DOWN BIT(11)
+#define PHY_MII_DISABLE BIT(10)
+#define PHY_AUTO_NEG_RESTART BIT(9)
+#define PHY_FULL_DUPLEX BIT(8)
+#define PHY_COLLISION_TEST_NOT BIT(7)
+#define PHY_HP_MDIX BIT(5)
+#define PHY_FORCE_MDIX BIT(4)
+#define PHY_AUTO_MDIX_DISABLE BIT(3)
+#define PHY_REMOTE_FAULT_DISABLE BIT(2)
+#define PHY_TRANSMIT_DISABLE BIT(1)
+#define PHY_LED_DISABLE BIT(0)
+
+#define PHY_REG_STATUS 1
+
+#define PHY_100BT4_CAPABLE BIT(15)
+#define PHY_100BTX_FD_CAPABLE BIT(14)
+#define PHY_100BTX_CAPABLE BIT(13)
+#define PHY_10BT_FD_CAPABLE BIT(12)
+#define PHY_10BT_CAPABLE BIT(11)
+#define PHY_MII_SUPPRESS_CAPABLE_NOT BIT(6)
+#define PHY_AUTO_NEG_ACKNOWLEDGE BIT(5)
+#define PHY_REMOTE_FAULT BIT(4)
+#define PHY_AUTO_NEG_CAPABLE BIT(3)
+#define PHY_LINK_STATUS BIT(2)
+#define PHY_JABBER_DETECT_NOT BIT(1)
+#define PHY_EXTENDED_CAPABILITY BIT(0)
+
+#define PHY_REG_ID_1 2
+#define PHY_REG_ID_2 3
+
+#define PHY_REG_AUTO_NEGOTIATION 4
+
+#define PHY_AUTO_NEG_NEXT_PAGE_NOT BIT(15)
+#define PHY_AUTO_NEG_REMOTE_FAULT_NOT BIT(13)
+#define PHY_AUTO_NEG_SYM_PAUSE BIT(10)
+#define PHY_AUTO_NEG_100BT4 BIT(9)
+#define PHY_AUTO_NEG_100BTX_FD BIT(8)
+#define PHY_AUTO_NEG_100BTX BIT(7)
+#define PHY_AUTO_NEG_10BT_FD BIT(6)
+#define PHY_AUTO_NEG_10BT BIT(5)
+#define PHY_AUTO_NEG_SELECTOR 0x001F
+#define PHY_AUTO_NEG_802_3 0x0001
+
+#define PHY_REG_REMOTE_CAPABILITY 5
+
+#define PHY_REMOTE_NEXT_PAGE_NOT BIT(15)
+#define PHY_REMOTE_ACKNOWLEDGE_NOT BIT(14)
+#define PHY_REMOTE_REMOTE_FAULT_NOT BIT(13)
+#define PHY_REMOTE_SYM_PAUSE BIT(10)
+#define PHY_REMOTE_100BTX_FD BIT(8)
+#define PHY_REMOTE_100BTX BIT(7)
+#define PHY_REMOTE_10BT_FD BIT(6)
+#define PHY_REMOTE_10BT BIT(5)
+#endif
+
+#define KSZ8795_ID_HI 0x0022
+#define KSZ8795_ID_LO 0x1550
+
+#define KSZ8795_SW_ID 0x8795
+
+#define PHY_REG_LINK_MD 0x1D
+
+#define PHY_START_CABLE_DIAG BIT(15)
+#define PHY_CABLE_DIAG_RESULT 0x6000
+#define PHY_CABLE_STAT_NORMAL 0x0000
+#define PHY_CABLE_STAT_OPEN 0x2000
+#define PHY_CABLE_STAT_SHORT 0x4000
+#define PHY_CABLE_STAT_FAILED 0x6000
+#define PHY_CABLE_10M_SHORT BIT(12)
+#define PHY_CABLE_FAULT_COUNTER 0x01FF
+
+#define PHY_REG_PHY_CTRL 0x1F
+
+#define PHY_MODE_M 0x7
+#define PHY_MODE_S 8
+#define PHY_STAT_REVERSED_POLARITY BIT(5)
+#define PHY_STAT_MDIX BIT(4)
+#define PHY_FORCE_LINK BIT(3)
+#define PHY_POWER_SAVING_ENABLE BIT(2)
+#define PHY_REMOTE_LOOPBACK BIT(1)
+
+/* Chip resource */
+
+#define PRIO_QUEUES 4
+
+#define KS_PRIO_IN_REG 4
+
+#define TOTAL_PORT_NUM 5
+
+/* Host port can only be last of them. */
+#define SWITCH_PORT_NUM (TOTAL_PORT_NUM - 1)
+
+#define KSZ8795_COUNTER_NUM 0x20
+#define TOTAL_KSZ8795_COUNTER_NUM (KSZ8795_COUNTER_NUM + 4)
+
+#define SWITCH_COUNTER_NUM KSZ8795_COUNTER_NUM
+#define TOTAL_SWITCH_COUNTER_NUM TOTAL_KSZ8795_COUNTER_NUM
+
+/* Common names used by other drivers */
+
+#define P_BCAST_STORM_CTRL REG_PORT_CTRL_0
+#define P_PRIO_CTRL REG_PORT_CTRL_0
+#define P_TAG_CTRL REG_PORT_CTRL_0
+#define P_MIRROR_CTRL REG_PORT_CTRL_1
+#define P_802_1P_CTRL REG_PORT_CTRL_2
+#define P_STP_CTRL REG_PORT_CTRL_2
+#define P_LOCAL_CTRL REG_PORT_CTRL_7
+#define P_REMOTE_STATUS REG_PORT_STATUS_0
+#define P_FORCE_CTRL REG_PORT_CTRL_9
+#define P_NEG_RESTART_CTRL REG_PORT_CTRL_10
+#define P_SPEED_STATUS REG_PORT_STATUS_1
+#define P_LINK_STATUS REG_PORT_STATUS_2
+#define P_PASS_ALL_CTRL REG_PORT_CTRL_12
+#define P_INS_SRC_PVID_CTRL REG_PORT_CTRL_12
+#define P_DROP_TAG_CTRL REG_PORT_CTRL_13
+#define P_RATE_LIMIT_CTRL REG_PORT_RATE_LIMIT
+
+#define S_UNKNOWN_DA_CTRL REG_SWITCH_CTRL_12
+#define S_FORWARD_INVALID_VID_CTRL REG_FORWARD_INVALID_VID
+
+#define S_FLUSH_TABLE_CTRL REG_SW_CTRL_0
+#define S_LINK_AGING_CTRL REG_SW_CTRL_0
+#define S_HUGE_PACKET_CTRL REG_SW_CTRL_1
+#define S_MIRROR_CTRL REG_SW_CTRL_3
+#define S_REPLACE_VID_CTRL REG_SW_CTRL_4
+#define S_PASS_PAUSE_CTRL REG_SW_CTRL_10
+#define S_TAIL_TAG_CTRL REG_SW_CTRL_10
+#define S_802_1P_PRIO_CTRL REG_SW_CTRL_12
+#define S_TOS_PRIO_CTRL REG_TOS_PRIO_CTRL_0
+#define S_IPV6_MLD_CTRL REG_SW_CTRL_21
+
+#define IND_ACC_TABLE(table) ((table) << 8)
+
+/* Driver set switch broadcast storm protection at 10% rate. */
+#define BROADCAST_STORM_PROT_RATE 10
+
+/* 148,800 frames * 67 ms / 100 */
+#define BROADCAST_STORM_VALUE 9969
+
+/**
+ * STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
+ * STATIC_MAC_TABLE_FWD_PORTS 00-001F0000-00000000
+ * STATIC_MAC_TABLE_VALID 00-00200000-00000000
+ * STATIC_MAC_TABLE_OVERRIDE 00-00400000-00000000
+ * STATIC_MAC_TABLE_USE_FID 00-00800000-00000000
+ * STATIC_MAC_TABLE_FID 00-7F000000-00000000
+ */
+
+#define STATIC_MAC_TABLE_ADDR 0x0000FFFF
+#define STATIC_MAC_TABLE_FWD_PORTS 0x001F0000
+#define STATIC_MAC_TABLE_VALID 0x00200000
+#define STATIC_MAC_TABLE_OVERRIDE 0x00400000
+#define STATIC_MAC_TABLE_USE_FID 0x00800000
+#define STATIC_MAC_TABLE_FID 0x7F000000
+
+#define STATIC_MAC_FWD_PORTS_S 16
+#define STATIC_MAC_FID_S 24
+
+/**
+ * VLAN_TABLE_FID 00-007F007F-007F007F
+ * VLAN_TABLE_MEMBERSHIP 00-0F800F80-0F800F80
+ * VLAN_TABLE_VALID 00-10001000-10001000
+ */
+
+#define VLAN_TABLE_FID 0x007F
+#define VLAN_TABLE_MEMBERSHIP 0x0F80
+#define VLAN_TABLE_VALID 0x1000
+
+#define VLAN_TABLE_MEMBERSHIP_S 7
+#define VLAN_TABLE_S 16
+
+/**
+ * DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
+ * DYNAMIC_MAC_TABLE_FID 00-007F0000-00000000
+ * DYNAMIC_MAC_TABLE_NOT_READY 00-00800000-00000000
+ * DYNAMIC_MAC_TABLE_SRC_PORT 00-07000000-00000000
+ * DYNAMIC_MAC_TABLE_TIMESTAMP 00-18000000-00000000
+ * DYNAMIC_MAC_TABLE_ENTRIES 7F-E0000000-00000000
+ * DYNAMIC_MAC_TABLE_MAC_EMPTY 80-00000000-00000000
+ */
+
+#define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
+#define DYNAMIC_MAC_TABLE_FID 0x007F0000
+#define DYNAMIC_MAC_TABLE_SRC_PORT 0x07000000
+#define DYNAMIC_MAC_TABLE_TIMESTAMP 0x18000000
+#define DYNAMIC_MAC_TABLE_ENTRIES 0xE0000000
+
+#define DYNAMIC_MAC_TABLE_NOT_READY 0x80
+
+#define DYNAMIC_MAC_TABLE_ENTRIES_H 0x7F
+#define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x80
+
+#define DYNAMIC_MAC_FID_S 16
+#define DYNAMIC_MAC_SRC_PORT_S 24
+#define DYNAMIC_MAC_TIMESTAMP_S 27
+#define DYNAMIC_MAC_ENTRIES_S 29
+#define DYNAMIC_MAC_ENTRIES_H_S 3
+
+/**
+ * MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
+ * MIB_TOTAL_BYTES 00-0000000F-FFFFFFFF
+ * MIB_PACKET_DROPPED 00-00000000-0000FFFF
+ * MIB_COUNTER_VALID 00-00000020-00000000
+ * MIB_COUNTER_OVERFLOW 00-00000040-00000000
+ */
+
+#define MIB_COUNTER_OVERFLOW BIT(6)
+#define MIB_COUNTER_VALID BIT(5)
+
+#define MIB_COUNTER_VALUE 0x3FFFFFFF
+
+#define KS_MIB_TOTAL_RX_0 0x100
+#define KS_MIB_TOTAL_TX_0 0x101
+#define KS_MIB_PACKET_DROPPED_RX_0 0x102
+#define KS_MIB_PACKET_DROPPED_TX_0 0x103
+#define KS_MIB_TOTAL_RX_1 0x104
+#define KS_MIB_TOTAL_TX_1 0x105
+#define KS_MIB_PACKET_DROPPED_TX_1 0x106
+#define KS_MIB_PACKET_DROPPED_RX_1 0x107
+#define KS_MIB_TOTAL_RX_2 0x108
+#define KS_MIB_TOTAL_TX_2 0x109
+#define KS_MIB_PACKET_DROPPED_TX_2 0x10A
+#define KS_MIB_PACKET_DROPPED_RX_2 0x10B
+#define KS_MIB_TOTAL_RX_3 0x10C
+#define KS_MIB_TOTAL_TX_3 0x10D
+#define KS_MIB_PACKET_DROPPED_TX_3 0x10E
+#define KS_MIB_PACKET_DROPPED_RX_3 0x10F
+#define KS_MIB_TOTAL_RX_4 0x110
+#define KS_MIB_TOTAL_TX_4 0x111
+#define KS_MIB_PACKET_DROPPED_TX_4 0x112
+#define KS_MIB_PACKET_DROPPED_RX_4 0x113
+
+#define MIB_PACKET_DROPPED 0x0000FFFF
+
+#define MIB_TOTAL_BYTES_H 0x0000000F
+
+#define TAIL_TAG_OVERRIDE BIT(6)
+#define TAIL_TAG_LOOKUP BIT(7)
+
+#define VLAN_TABLE_ENTRIES (4096 / 4)
+#define FID_ENTRIES 128
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
new file mode 100644
index 000000000000..d0f8153e86b7
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz8795_spi.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Microchip KSZ8795 series register access through SPI
+ *
+ * Copyright (C) 2017 Microchip Technology Inc.
+ * Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "ksz_common.h"
+
+#define SPI_ADDR_SHIFT 12
+#define SPI_ADDR_ALIGN 3
+#define SPI_TURNAROUND_SHIFT 1
+
+KSZ_REGMAP_TABLE(ksz8795, 16, SPI_ADDR_SHIFT,
+ SPI_TURNAROUND_SHIFT, SPI_ADDR_ALIGN);
+
+static int ksz8795_spi_probe(struct spi_device *spi)
+{
+ struct ksz_device *dev;
+ int i, ret;
+
+ dev = ksz_switch_alloc(&spi->dev, spi);
+ if (!dev)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
+ dev->regmap[i] = devm_regmap_init_spi(spi,
+ &ksz8795_regmap_config
+ [i]);
+ if (IS_ERR(dev->regmap[i])) {
+ ret = PTR_ERR(dev->regmap[i]);
+ dev_err(&spi->dev,
+ "Failed to initialize regmap%i: %d\n",
+ ksz8795_regmap_config[i].val_bits, ret);
+ return ret;
+ }
+ }
+
+ if (spi->dev.platform_data)
+ dev->pdata = spi->dev.platform_data;
+
+ ret = ksz8795_switch_register(dev);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, dev);
+
+ return 0;
+}
+
+static int ksz8795_spi_remove(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ ksz_switch_remove(dev);
+
+ return 0;
+}
+
+static void ksz8795_spi_shutdown(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (dev && dev->dev_ops->shutdown)
+ dev->dev_ops->shutdown(dev);
+}
+
+static const struct of_device_id ksz8795_dt_ids[] = {
+ { .compatible = "microchip,ksz8765" },
+ { .compatible = "microchip,ksz8794" },
+ { .compatible = "microchip,ksz8795" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
+
+static struct spi_driver ksz8795_spi_driver = {
+ .driver = {
+ .name = "ksz8795-switch",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ksz8795_dt_ids),
+ },
+ .probe = ksz8795_spi_probe,
+ .remove = ksz8795_spi_remove,
+ .shutdown = ksz8795_spi_shutdown,
+};
+
+module_spi_driver(ksz8795_spi_driver);
+
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index a8c97f7a79b7..187be42de5f1 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -14,7 +14,6 @@
#include <net/dsa.h>
#include <net/switchdev.h>
-#include "ksz_priv.h"
#include "ksz9477_reg.h"
#include "ksz_common.h"
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index 098b01e4ed1a..2e402e4d866f 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -13,7 +13,6 @@
#include <linux/regmap.h>
#include <linux/spi/spi.h>
-#include "ksz_priv.h"
#include "ksz_common.h"
#define SPI_ADDR_SHIFT 24
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index a3d2d67894bd..b0b870f0c252 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -18,17 +18,7 @@
#include <net/dsa.h>
#include <net/switchdev.h>
-#include "ksz_priv.h"
-
-void ksz_port_cleanup(struct ksz_device *dev, int port)
-{
- /* Common code for port cleanup. */
- mutex_lock(&dev->dev_mutex);
- dev->on_ports &= ~(1 << port);
- dev->live_ports &= ~(1 << port);
- mutex_unlock(&dev->dev_mutex);
-}
-EXPORT_SYMBOL_GPL(ksz_port_cleanup);
+#include "ksz_common.h"
void ksz_update_port_member(struct ksz_device *dev, int port)
{
@@ -371,9 +361,13 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct ksz_device *dev = ds->priv;
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
/* setup slave port */
dev->dev_ops->port_setup(dev, port, false);
- dev->dev_ops->phy_setup(dev, port, phy);
+ if (dev->dev_ops->phy_setup)
+ dev->dev_ops->phy_setup(dev, port, phy);
/* port_stp_state_set() will be called after to enable the port so
* there is no need to do anything.
@@ -387,6 +381,9 @@ void ksz_disable_port(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
+ if (!dsa_is_user_port(ds, port))
+ return;
+
dev->on_ports &= ~(1 << port);
dev->live_ports &= ~(1 << port);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 72ec250b9540..13d027baaa8b 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -7,9 +7,152 @@
#ifndef __KSZ_COMMON_H
#define __KSZ_COMMON_H
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
#include <linux/regmap.h>
+#include <net/dsa.h>
+
+struct vlan_table {
+ u32 table[3];
+};
+
+struct ksz_port_mib {
+ struct mutex cnt_mutex; /* structure access */
+ u8 cnt_ptr;
+ u64 *counters;
+};
+
+struct ksz_port {
+ u16 member;
+ u16 vid_member;
+ int stp_state;
+ struct phy_device phydev;
+
+ u32 on:1; /* port is not disabled by hardware */
+ u32 phy:1; /* port has a PHY */
+ u32 fiber:1; /* port is fiber */
+ u32 sgmii:1; /* port is SGMII */
+ u32 force:1;
+ u32 read:1; /* read MIB counters in background */
+ u32 freeze:1; /* MIB counter freeze is enabled */
+
+ struct ksz_port_mib mib;
+};
+
+struct ksz_device {
+ struct dsa_switch *ds;
+ struct ksz_platform_data *pdata;
+ const char *name;
+
+ struct mutex dev_mutex; /* device access */
+ struct mutex stats_mutex; /* status access */
+ struct mutex alu_mutex; /* ALU access */
+ struct mutex vlan_mutex; /* vlan access */
+ const struct ksz_dev_ops *dev_ops;
+
+ struct device *dev;
+ struct regmap *regmap[3];
+
+ void *priv;
+
+ struct gpio_desc *reset_gpio; /* Optional reset GPIO */
+
+ /* chip specific data */
+ u32 chip_id;
+ int num_vlans;
+ int num_alus;
+ int num_statics;
+ int cpu_port; /* port connected to CPU */
+ int cpu_ports; /* port bitmap can be cpu port */
+ int phy_port_cnt;
+ int port_cnt;
+ int reg_mib_cnt;
+ int mib_cnt;
+ int mib_port_cnt;
+ int last_port; /* ports after that not used */
+ phy_interface_t interface;
+ u32 regs_size;
+ bool phy_errata_9477;
+ bool synclko_125;
+
+ struct vlan_table *vlan_cache;
+
+ struct ksz_port *ports;
+ struct timer_list mib_read_timer;
+ struct work_struct mib_read;
+ unsigned long mib_read_interval;
+ u16 br_member;
+ u16 member;
+ u16 live_ports;
+ u16 on_ports; /* ports enabled by DSA */
+ u16 rx_ports;
+ u16 tx_ports;
+ u16 mirror_rx;
+ u16 mirror_tx;
+ u32 features; /* chip specific features */
+ u32 overrides; /* chip functions set by user */
+ u16 host_mask;
+ u16 port_mask;
+};
+
+struct alu_struct {
+ /* entry 1 */
+ u8 is_static:1;
+ u8 is_src_filter:1;
+ u8 is_dst_filter:1;
+ u8 prio_age:3;
+ u32 _reserv_0_1:23;
+ u8 mstp:3;
+ /* entry 2 */
+ u8 is_override:1;
+ u8 is_use_fid:1;
+ u32 _reserv_1_1:23;
+ u8 port_forward:7;
+ /* entry 3 & 4*/
+ u32 _reserv_2_1:9;
+ u8 fid:7;
+ u8 mac[ETH_ALEN];
+};
+
+struct ksz_dev_ops {
+ u32 (*get_port_addr)(int port, int offset);
+ void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
+ void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
+ void (*phy_setup)(struct ksz_device *dev, int port,
+ struct phy_device *phy);
+ void (*port_cleanup)(struct ksz_device *dev, int port);
+ void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
+ void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+ void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+ int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u8 *timestamp,
+ u16 *entries);
+ int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu);
+ void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu);
+ void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
+ u64 *cnt);
+ void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+ void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
+ void (*port_init_cnt)(struct ksz_device *dev, int port);
+ int (*shutdown)(struct ksz_device *dev);
+ int (*detect)(struct ksz_device *dev);
+ int (*init)(struct ksz_device *dev);
+ void (*exit)(struct ksz_device *dev);
+};
+
+struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
+int ksz_switch_register(struct ksz_device *dev,
+ const struct ksz_dev_ops *ops);
+void ksz_switch_remove(struct ksz_device *dev);
+
+int ksz8795_switch_register(struct ksz_device *dev);
+int ksz9477_switch_register(struct ksz_device *dev);
-void ksz_port_cleanup(struct ksz_device *dev, int port);
void ksz_update_port_member(struct ksz_device *dev, int port);
void ksz_init_mib_timer(struct ksz_device *dev);
@@ -68,6 +211,22 @@ static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
return ret;
}
+static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
+{
+ u32 value[2];
+ int ret;
+
+ ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
+ if (!ret) {
+ /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */
+ value[0] = swab32(value[0]);
+ value[1] = swab32(value[1]);
+ *val = swab64((u64)*value);
+ }
+
+ return ret;
+}
+
static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
{
return regmap_write(dev->regmap[0], reg, value);
@@ -83,6 +242,18 @@ static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
return regmap_write(dev->regmap[2], reg, value);
}
+static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
+{
+ u32 val[2];
+
+ /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */
+ value = swab64(value);
+ val[0] = swab32(value & 0xffffffffULL);
+ val[1] = swab32(value >> 32ULL);
+
+ return regmap_bulk_write(dev->regmap[2], reg, val, 2);
+}
+
static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
u8 *data)
{
diff --git a/drivers/net/dsa/microchip/ksz_priv.h b/drivers/net/dsa/microchip/ksz_priv.h
deleted file mode 100644
index beacf0e40f42..000000000000
--- a/drivers/net/dsa/microchip/ksz_priv.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Microchip KSZ series switch common definitions
- *
- * Copyright (C) 2017-2019 Microchip Technology Inc.
- */
-
-#ifndef __KSZ_PRIV_H
-#define __KSZ_PRIV_H
-
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/phy.h>
-#include <linux/etherdevice.h>
-#include <net/dsa.h>
-
-struct vlan_table {
- u32 table[3];
-};
-
-struct ksz_port_mib {
- struct mutex cnt_mutex; /* structure access */
- u8 cnt_ptr;
- u64 *counters;
-};
-
-struct ksz_port {
- u16 member;
- u16 vid_member;
- int stp_state;
- struct phy_device phydev;
-
- u32 on:1; /* port is not disabled by hardware */
- u32 phy:1; /* port has a PHY */
- u32 fiber:1; /* port is fiber */
- u32 sgmii:1; /* port is SGMII */
- u32 force:1;
- u32 read:1; /* read MIB counters in background */
- u32 freeze:1; /* MIB counter freeze is enabled */
-
- struct ksz_port_mib mib;
-};
-
-struct ksz_device {
- struct dsa_switch *ds;
- struct ksz_platform_data *pdata;
- const char *name;
-
- struct mutex dev_mutex; /* device access */
- struct mutex stats_mutex; /* status access */
- struct mutex alu_mutex; /* ALU access */
- struct mutex vlan_mutex; /* vlan access */
- const struct ksz_dev_ops *dev_ops;
-
- struct device *dev;
- struct regmap *regmap[3];
-
- void *priv;
-
- struct gpio_desc *reset_gpio; /* Optional reset GPIO */
-
- /* chip specific data */
- u32 chip_id;
- int num_vlans;
- int num_alus;
- int num_statics;
- int cpu_port; /* port connected to CPU */
- int cpu_ports; /* port bitmap can be cpu port */
- int phy_port_cnt;
- int port_cnt;
- int reg_mib_cnt;
- int mib_cnt;
- int mib_port_cnt;
- int last_port; /* ports after that not used */
- phy_interface_t interface;
- u32 regs_size;
- bool phy_errata_9477;
- bool synclko_125;
-
- struct vlan_table *vlan_cache;
-
- struct ksz_port *ports;
- struct timer_list mib_read_timer;
- struct work_struct mib_read;
- unsigned long mib_read_interval;
- u16 br_member;
- u16 member;
- u16 live_ports;
- u16 on_ports; /* ports enabled by DSA */
- u16 rx_ports;
- u16 tx_ports;
- u16 mirror_rx;
- u16 mirror_tx;
- u32 features; /* chip specific features */
- u32 overrides; /* chip functions set by user */
- u16 host_mask;
- u16 port_mask;
-};
-
-struct alu_struct {
- /* entry 1 */
- u8 is_static:1;
- u8 is_src_filter:1;
- u8 is_dst_filter:1;
- u8 prio_age:3;
- u32 _reserv_0_1:23;
- u8 mstp:3;
- /* entry 2 */
- u8 is_override:1;
- u8 is_use_fid:1;
- u32 _reserv_1_1:23;
- u8 port_forward:7;
- /* entry 3 & 4*/
- u32 _reserv_2_1:9;
- u8 fid:7;
- u8 mac[ETH_ALEN];
-};
-
-struct ksz_dev_ops {
- u32 (*get_port_addr)(int port, int offset);
- void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
- void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
- void (*phy_setup)(struct ksz_device *dev, int port,
- struct phy_device *phy);
- void (*port_cleanup)(struct ksz_device *dev, int port);
- void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
- void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
- void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
- int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr,
- u8 *fid, u8 *src_port, u8 *timestamp,
- u16 *entries);
- int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
- void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
- void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
- u64 *cnt);
- void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
- u64 *dropped, u64 *cnt);
- void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
- void (*port_init_cnt)(struct ksz_device *dev, int port);
- int (*shutdown)(struct ksz_device *dev);
- int (*detect)(struct ksz_device *dev);
- int (*init)(struct ksz_device *dev);
- void (*exit)(struct ksz_device *dev);
-};
-
-struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
-int ksz_switch_register(struct ksz_device *dev,
- const struct ksz_dev_ops *ops);
-void ksz_switch_remove(struct ksz_device *dev);
-
-int ksz9477_switch_register(struct ksz_device *dev);
-
-#endif
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 3181e95586d6..c48e29486b10 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -726,6 +726,9 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
mutex_lock(&priv->reg_mutex);
/* Setup the MAC for the user port */
@@ -751,6 +754,9 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
+ if (!dsa_is_user_port(ds, port))
+ return;
+
mutex_lock(&priv->reg_mutex);
/* Clear up all port matrix which could be restored in the next
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index e85755dde90b..aa645ff86f64 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -10,6 +10,7 @@ mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2_scratch.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += hwtstamp.o
mv88e6xxx-objs += phy.o
mv88e6xxx-objs += port.o
+mv88e6xxx-objs += port_hidden.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o
mv88e6xxx-objs += serdes.o
mv88e6xxx-objs += smi.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index d0a97eb73a37..30365a54c31b 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -10,6 +10,7 @@
* Vivien Didelot <vivien.didelot@savoirfairelinux.com>
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -80,6 +81,36 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
return 0;
}
+int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 mask, u16 val)
+{
+ u16 data;
+ int err;
+ int i;
+
+ /* There's no bus specific operation to wait for a mask */
+ for (i = 0; i < 16; i++) {
+ err = mv88e6xxx_read(chip, addr, reg, &data);
+ if (err)
+ return err;
+
+ if ((data & mask) == val)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ dev_err(chip->dev, "Timeout while waiting for switch\n");
+ return -ETIMEDOUT;
+}
+
+int mv88e6xxx_wait_bit(struct mv88e6xxx_chip *chip, int addr, int reg,
+ int bit, int val)
+{
+ return mv88e6xxx_wait_mask(chip, addr, reg, BIT(bit),
+ val ? BIT(bit) : 0x0000);
+}
+
struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
{
struct mv88e6xxx_mdio_bus *mdio_bus;
@@ -363,45 +394,6 @@ static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
mv88e6xxx_reg_unlock(chip);
}
-int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
-{
- int i;
-
- for (i = 0; i < 16; i++) {
- u16 val;
- int err;
-
- err = mv88e6xxx_read(chip, addr, reg, &val);
- if (err)
- return err;
-
- if (!(val & mask))
- return 0;
-
- usleep_range(1000, 2000);
- }
-
- dev_err(chip->dev, "Timeout while waiting for switch\n");
- return -ETIMEDOUT;
-}
-
-/* Indirect write to single pointer-data register with an Update bit */
-int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update)
-{
- u16 val;
- int err;
-
- /* Wait until the previous operation is completed */
- err = mv88e6xxx_wait(chip, addr, reg, BIT(15));
- if (err)
- return err;
-
- /* Set the Update bit to trigger a write operation */
- val = BIT(15) | update;
-
- return mv88e6xxx_write(chip, addr, reg, val);
-}
-
int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
int speed, int duplex, int pause,
phy_interface_t mode)
@@ -425,7 +417,9 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
*/
if (state.link == link &&
state.speed == speed &&
- state.duplex == duplex)
+ state.duplex == duplex &&
+ (state.interface == mode ||
+ state.interface == PHY_INTERFACE_MODE_NA))
return 0;
/* Port's MAC control must not be changed unless the link is down */
@@ -1336,9 +1330,7 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
{
DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
- struct mv88e6xxx_vtu_entry vlan = {
- .vid = chip->info->max_vid,
- };
+ struct mv88e6xxx_vtu_entry vlan;
int i, err;
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
@@ -1353,6 +1345,9 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
}
/* Set every FID bit used by the VLAN entries */
+ vlan.vid = chip->info->max_vid;
+ vlan.valid = false;
+
do {
err = mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
@@ -1375,51 +1370,11 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
-static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
- struct mv88e6xxx_vtu_entry *entry, bool new)
-{
- int err;
-
- if (!vid)
- return -EOPNOTSUPP;
-
- entry->vid = vid - 1;
- entry->valid = false;
-
- err = mv88e6xxx_vtu_getnext(chip, entry);
- if (err)
- return err;
-
- if (entry->vid == vid && entry->valid)
- return 0;
-
- if (new) {
- int i;
-
- /* Initialize a fresh VLAN entry */
- memset(entry, 0, sizeof(*entry));
- entry->valid = true;
- entry->vid = vid;
-
- /* Exclude all ports */
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
- entry->member[i] =
- MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER;
-
- return mv88e6xxx_atu_new(chip, &entry->fid);
- }
-
- /* switchdev expects -EOPNOTSUPP to honor software VLANs */
- return -EOPNOTSUPP;
-}
-
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid_begin, u16 vid_end)
{
struct mv88e6xxx_chip *chip = ds->priv;
- struct mv88e6xxx_vtu_entry vlan = {
- .vid = vid_begin - 1,
- };
+ struct mv88e6xxx_vtu_entry vlan;
int i, err;
/* DSA and CPU ports have to be members of multiple vlans */
@@ -1429,12 +1384,13 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (!vid_begin)
return -EOPNOTSUPP;
- mv88e6xxx_reg_lock(chip);
+ vlan.vid = vid_begin - 1;
+ vlan.valid = false;
do {
err = mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
- goto unlock;
+ return err;
if (!vlan.valid)
break;
@@ -1463,15 +1419,11 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
port, vlan.vid, i,
netdev_name(dsa_to_port(ds, i)->bridge_dev));
- err = -EOPNOTSUPP;
- goto unlock;
+ return -EOPNOTSUPP;
}
} while (vlan.vid < vid_end);
-unlock:
- mv88e6xxx_reg_unlock(chip);
-
- return err;
+ return 0;
}
static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
@@ -1505,38 +1457,51 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
/* If the requested port doesn't belong to the same bridge as the VLAN
* members, do not support it (yet) and fallback to software VLAN.
*/
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin,
vlan->vid_end);
- if (err)
- return err;
+ mv88e6xxx_reg_unlock(chip);
/* We don't need any dynamic resource from the kernel (yet),
* so skip the prepare phase.
*/
- return 0;
+ return err;
}
static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
const unsigned char *addr, u16 vid,
u8 state)
{
- struct mv88e6xxx_vtu_entry vlan;
struct mv88e6xxx_atu_entry entry;
+ struct mv88e6xxx_vtu_entry vlan;
+ u16 fid;
int err;
/* Null VLAN ID corresponds to the port private database */
- if (vid == 0)
- err = mv88e6xxx_port_get_fid(chip, port, &vlan.fid);
- else
- err = mv88e6xxx_vtu_get(chip, vid, &vlan, false);
- if (err)
- return err;
+ if (vid == 0) {
+ err = mv88e6xxx_port_get_fid(chip, port, &fid);
+ if (err)
+ return err;
+ } else {
+ vlan.vid = vid - 1;
+ vlan.valid = false;
+
+ err = mv88e6xxx_vtu_getnext(chip, &vlan);
+ if (err)
+ return err;
+
+ /* switchdev expects -EOPNOTSUPP to honor software VLANs */
+ if (vlan.vid != vid || !vlan.valid)
+ return -EOPNOTSUPP;
+
+ fid = vlan.fid;
+ }
entry.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED;
ether_addr_copy(entry.mac, addr);
eth_addr_dec(entry.mac);
- err = mv88e6xxx_g1_atu_getnext(chip, vlan.fid, &entry);
+ err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry);
if (err)
return err;
@@ -1557,7 +1522,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
entry.state = state;
}
- return mv88e6xxx_g1_atu_loadpurge(chip, vlan.fid, &entry);
+ return mv88e6xxx_g1_atu_loadpurge(chip, fid, &entry);
}
static int mv88e6xxx_port_add_broadcast(struct mv88e6xxx_chip *chip, int port,
@@ -1583,23 +1548,58 @@ static int mv88e6xxx_broadcast_setup(struct mv88e6xxx_chip *chip, u16 vid)
return 0;
}
-static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port,
+static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
u16 vid, u8 member)
{
+ const u8 non_member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER;
struct mv88e6xxx_vtu_entry vlan;
- int err;
+ int i, err;
- err = mv88e6xxx_vtu_get(chip, vid, &vlan, true);
- if (err)
- return err;
+ if (!vid)
+ return -EOPNOTSUPP;
- vlan.member[port] = member;
+ vlan.vid = vid - 1;
+ vlan.valid = false;
- err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ err = mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
return err;
- return mv88e6xxx_broadcast_setup(chip, vid);
+ if (vlan.vid != vid || !vlan.valid) {
+ memset(&vlan, 0, sizeof(vlan));
+
+ err = mv88e6xxx_atu_new(chip, &vlan.fid);
+ if (err)
+ return err;
+
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
+ if (i == port)
+ vlan.member[i] = member;
+ else
+ vlan.member[i] = non_member;
+
+ vlan.vid = vid;
+ vlan.valid = true;
+
+ err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_broadcast_setup(chip, vlan.vid);
+ if (err)
+ return err;
+ } else if (vlan.member[port] != member) {
+ vlan.member[port] = member;
+
+ err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ if (err)
+ return err;
+ } else {
+ dev_info(chip->dev, "p%d: already a member of VLAN %d\n",
+ port, vid);
+ }
+
+ return 0;
}
static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
@@ -1624,7 +1624,7 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
- if (_mv88e6xxx_port_vlan_add(chip, port, vid, member))
+ if (mv88e6xxx_port_vlan_join(chip, port, vid, member))
dev_err(ds->dev, "p%d: failed to add VLAN %d%c\n", port,
vid, untagged ? 'u' : 't');
@@ -1635,18 +1635,27 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
mv88e6xxx_reg_unlock(chip);
}
-static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
- int port, u16 vid)
+static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
+ int port, u16 vid)
{
struct mv88e6xxx_vtu_entry vlan;
int i, err;
- err = mv88e6xxx_vtu_get(chip, vid, &vlan, false);
+ if (!vid)
+ return -EOPNOTSUPP;
+
+ vlan.vid = vid - 1;
+ vlan.valid = false;
+
+ err = mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
return err;
- /* Tell switchdev if this VLAN is handled in software */
- if (vlan.member[port] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
+ /* If the VLAN doesn't exist in hardware or the port isn't a member,
+ * tell switchdev that this VLAN is likely handled in software.
+ */
+ if (vlan.vid != vid || !vlan.valid ||
+ vlan.member[port] == MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
return -EOPNOTSUPP;
vlan.member[port] = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER;
@@ -1685,7 +1694,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
goto unlock;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- err = _mv88e6xxx_port_vlan_del(chip, port, vid);
+ err = mv88e6xxx_port_vlan_leave(chip, port, vid);
if (err)
goto unlock;
@@ -1768,9 +1777,7 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
- struct mv88e6xxx_vtu_entry vlan = {
- .vid = chip->info->max_vid,
- };
+ struct mv88e6xxx_vtu_entry vlan;
u16 fid;
int err;
@@ -1784,6 +1791,9 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
return err;
/* Dump VLANs' Filtering Information Databases */
+ vlan.vid = chip->info->max_vid;
+ vlan.valid = false;
+
do {
err = mv88e6xxx_vtu_getnext(chip, &vlan);
if (err)
@@ -2044,13 +2054,96 @@ static int mv88e6xxx_setup_egress_floods(struct mv88e6xxx_chip *chip, int port)
return 0;
}
+static irqreturn_t mv88e6xxx_serdes_irq_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_port *mvp = dev_id;
+ struct mv88e6xxx_chip *chip = mvp->chip;
+ irqreturn_t ret = IRQ_NONE;
+ int port = mvp->port;
+ u8 lane;
+
+ mv88e6xxx_reg_lock(chip);
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane)
+ ret = mv88e6xxx_serdes_irq_status(chip, port, lane);
+ mv88e6xxx_reg_unlock(chip);
+
+ return ret;
+}
+
+static int mv88e6xxx_serdes_irq_request(struct mv88e6xxx_chip *chip, int port,
+ u8 lane)
+{
+ struct mv88e6xxx_port *dev_id = &chip->ports[port];
+ unsigned int irq;
+ int err;
+
+ /* Nothing to request if this SERDES port has no IRQ */
+ irq = mv88e6xxx_serdes_irq_mapping(chip, port);
+ if (!irq)
+ return 0;
+
+ /* Requesting the IRQ will trigger IRQ callbacks, so release the lock */
+ mv88e6xxx_reg_unlock(chip);
+ err = request_threaded_irq(irq, NULL, mv88e6xxx_serdes_irq_thread_fn,
+ IRQF_ONESHOT, "mv88e6xxx-serdes", dev_id);
+ mv88e6xxx_reg_lock(chip);
+ if (err)
+ return err;
+
+ dev_id->serdes_irq = irq;
+
+ return mv88e6xxx_serdes_irq_enable(chip, port, lane);
+}
+
+static int mv88e6xxx_serdes_irq_free(struct mv88e6xxx_chip *chip, int port,
+ u8 lane)
+{
+ struct mv88e6xxx_port *dev_id = &chip->ports[port];
+ unsigned int irq = dev_id->serdes_irq;
+ int err;
+
+ /* Nothing to free if no IRQ has been requested */
+ if (!irq)
+ return 0;
+
+ err = mv88e6xxx_serdes_irq_disable(chip, port, lane);
+
+ /* Freeing the IRQ will trigger IRQ callbacks, so release the lock */
+ mv88e6xxx_reg_unlock(chip);
+ free_irq(irq, dev_id);
+ mv88e6xxx_reg_lock(chip);
+
+ dev_id->serdes_irq = 0;
+
+ return err;
+}
+
static int mv88e6xxx_serdes_power(struct mv88e6xxx_chip *chip, int port,
bool on)
{
- if (chip->info->ops->serdes_power)
- return chip->info->ops->serdes_power(chip, port, on);
+ u8 lane;
+ int err;
- return 0;
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (!lane)
+ return 0;
+
+ if (on) {
+ err = mv88e6xxx_serdes_power_up(chip, port, lane);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_serdes_irq_request(chip, port, lane);
+ } else {
+ err = mv88e6xxx_serdes_irq_free(chip, port, lane);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_serdes_power_down(chip, port, lane);
+ }
+
+ return err;
}
static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
@@ -2141,16 +2234,6 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- /* Enable the SERDES interface for DSA and CPU ports. Normal
- * ports SERDES are enabled when the port is enabled, thus
- * saving a bit of power.
- */
- if ((dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) {
- err = mv88e6xxx_serdes_power(chip, port, true);
- if (err)
- return err;
- }
-
/* Port Control 2: don't force a good FCS, set the maximum frame size to
* 10240 bytes, disable 802.1q tags checking, don't discard tagged or
* untagged frames on this port, do a destination address lookup on all
@@ -2227,9 +2310,11 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
- err = mv88e6xxx_setup_message_port(chip, port);
- if (err)
- return err;
+ if (chip->info->ops->port_setup_message_port) {
+ err = chip->info->ops->port_setup_message_port(chip, port);
+ if (err)
+ return err;
+ }
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
@@ -2256,12 +2341,7 @@ static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port,
int err;
mv88e6xxx_reg_lock(chip);
-
err = mv88e6xxx_serdes_power(chip, port, true);
-
- if (!err && chip->info->ops->serdes_irq_setup)
- err = chip->info->ops->serdes_irq_setup(chip, port);
-
mv88e6xxx_reg_unlock(chip);
return err;
@@ -2272,16 +2352,8 @@ static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port)
struct mv88e6xxx_chip *chip = ds->priv;
mv88e6xxx_reg_lock(chip);
-
- if (mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED))
- dev_err(chip->dev, "failed to disable port\n");
-
- if (chip->info->ops->serdes_irq_free)
- chip->info->ops->serdes_irq_free(chip, port);
-
if (mv88e6xxx_serdes_power(chip, port, false))
dev_err(chip->dev, "failed to power off SERDES\n");
-
mv88e6xxx_reg_unlock(chip);
}
@@ -2312,58 +2384,6 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_stats_clear(chip);
}
-/* The mv88e6390 has some hidden registers used for debug and
- * development. The errata also makes use of them.
- */
-static int mv88e6390_hidden_write(struct mv88e6xxx_chip *chip, int port,
- int reg, u16 val)
-{
- u16 ctrl;
- int err;
-
- err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_DATA_PORT,
- PORT_RESERVED_1A, val);
- if (err)
- return err;
-
- ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_WRITE |
- PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
- reg;
-
- return mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
- PORT_RESERVED_1A, ctrl);
-}
-
-static int mv88e6390_hidden_wait(struct mv88e6xxx_chip *chip)
-{
- return mv88e6xxx_wait(chip, PORT_RESERVED_1A_CTRL_PORT,
- PORT_RESERVED_1A, PORT_RESERVED_1A_BUSY);
-}
-
-
-static int mv88e6390_hidden_read(struct mv88e6xxx_chip *chip, int port,
- int reg, u16 *val)
-{
- u16 ctrl;
- int err;
-
- ctrl = PORT_RESERVED_1A_BUSY | PORT_RESERVED_1A_READ |
- PORT_RESERVED_1A_BLOCK | port << PORT_RESERVED_1A_PORT_SHIFT |
- reg;
-
- err = mv88e6xxx_port_write(chip, PORT_RESERVED_1A_CTRL_PORT,
- PORT_RESERVED_1A, ctrl);
- if (err)
- return err;
-
- err = mv88e6390_hidden_wait(chip);
- if (err)
- return err;
-
- return mv88e6xxx_port_read(chip, PORT_RESERVED_1A_DATA_PORT,
- PORT_RESERVED_1A, val);
-}
-
/* Check if the errata has already been applied. */
static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
{
@@ -2372,7 +2392,7 @@ static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip)
u16 val;
for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
- err = mv88e6390_hidden_read(chip, port, 0, &val);
+ err = mv88e6xxx_port_hidden_read(chip, 0xf, port, 0, &val);
if (err) {
dev_err(chip->dev,
"Error reading hidden register: %d\n", err);
@@ -2405,7 +2425,7 @@ static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
}
for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
- err = mv88e6390_hidden_write(chip, port, 0, 0x01c0);
+ err = mv88e6xxx_port_hidden_write(chip, 0xf, port, 0, 0x01c0);
if (err)
return err;
}
@@ -2444,17 +2464,14 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
- if (dsa_is_unused_port(ds, i)) {
- err = mv88e6xxx_port_set_state(chip, i,
- BR_STATE_DISABLED);
- if (err)
- goto unlock;
-
- err = mv88e6xxx_serdes_power(chip, i, false);
- if (err)
- goto unlock;
-
+ if (dsa_is_unused_port(ds, i))
continue;
+
+ /* Prevent the use of an invalid port. */
+ if (mv88e6xxx_is_invalid_port(chip, i)) {
+ dev_err(chip->dev, "port %d is invalid\n", i);
+ err = -EINVAL;
+ goto unlock;
}
err = mv88e6xxx_setup_port(chip, i);
@@ -2773,6 +2790,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2807,6 +2825,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.port_set_upstream_port = mv88e6095_port_set_upstream_port,
.port_link_state = mv88e6185_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2843,6 +2862,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2877,6 +2897,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2914,6 +2935,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.port_set_pause = mv88e6185_port_set_pause,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -2958,6 +2980,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6341_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -2971,7 +2995,11 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .serdes_power = mv88e6341_serdes_power,
+ .serdes_power = mv88e6390_serdes_power,
+ .serdes_get_lane = mv88e6341_serdes_get_lane,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6341_phylink_validate,
};
@@ -2998,6 +3026,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3031,6 +3060,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3072,6 +3102,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3113,6 +3144,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3127,6 +3159,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.rmu_disable = mv88e6352_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_power = mv88e6352_serdes_power,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6352_phylink_validate,
@@ -3155,6 +3188,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3196,6 +3230,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3210,9 +3245,11 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.rmu_disable = mv88e6352_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_power = mv88e6352_serdes_power,
- .serdes_irq_setup = mv88e6352_serdes_irq_setup,
- .serdes_irq_free = mv88e6352_serdes_irq_free,
+ .serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6352_serdes_irq_enable,
+ .serdes_irq_status = mv88e6352_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6352_phylink_validate,
};
@@ -3234,6 +3271,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.port_set_pause = mv88e6185_port_set_pause,
.port_link_state = mv88e6185_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3276,6 +3314,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3291,8 +3330,10 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
- .serdes_irq_setup = mv88e6390_serdes_irq_setup,
- .serdes_irq_free = mv88e6390_serdes_irq_free,
+ .serdes_get_lane = mv88e6390_serdes_get_lane,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6390_phylink_validate,
};
@@ -3321,6 +3362,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390x_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3335,9 +3377,11 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.rmu_disable = mv88e6390_g1_rmu_disable,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
- .serdes_power = mv88e6390x_serdes_power,
- .serdes_irq_setup = mv88e6390x_serdes_irq_setup,
- .serdes_irq_free = mv88e6390x_serdes_irq_free,
+ .serdes_power = mv88e6390_serdes_power,
+ .serdes_get_lane = mv88e6390x_serdes_get_lane,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6390x_phylink_validate,
};
@@ -3366,6 +3410,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3381,8 +3426,10 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
- .serdes_irq_setup = mv88e6390_serdes_irq_setup,
- .serdes_irq_free = mv88e6390_serdes_irq_free,
+ .serdes_get_lane = mv88e6390_serdes_get_lane,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.phylink_validate = mv88e6390_phylink_validate,
@@ -3413,6 +3460,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3427,9 +3475,11 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.rmu_disable = mv88e6352_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_power = mv88e6352_serdes_power,
- .serdes_irq_setup = mv88e6352_serdes_irq_setup,
- .serdes_irq_free = mv88e6352_serdes_irq_free,
+ .serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6352_serdes_irq_enable,
+ .serdes_irq_status = mv88e6352_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -3471,6 +3521,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.reset = mv88e6250_g1_reset,
.vtu_getnext = mv88e6250_g1_vtu_getnext,
.vtu_loadpurge = mv88e6250_g1_vtu_loadpurge,
+ .avb_ops = &mv88e6352_avb_ops,
+ .ptp_ops = &mv88e6250_ptp_ops,
.phylink_validate = mv88e6065_phylink_validate,
};
@@ -3498,6 +3550,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3513,8 +3566,10 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
- .serdes_irq_setup = mv88e6390_serdes_irq_setup,
- .serdes_irq_free = mv88e6390_serdes_irq_free,
+ .serdes_get_lane = mv88e6390_serdes_get_lane,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -3545,6 +3600,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3588,6 +3644,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3631,6 +3688,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6341_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3644,7 +3703,11 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .serdes_power = mv88e6341_serdes_power,
+ .serdes_power = mv88e6390_serdes_power,
+ .serdes_get_lane = mv88e6341_serdes_get_lane,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -3674,6 +3737,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3713,6 +3777,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3756,6 +3821,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6320_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
@@ -3770,9 +3836,11 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.rmu_disable = mv88e6352_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_power = mv88e6352_serdes_power,
- .serdes_irq_setup = mv88e6352_serdes_irq_setup,
- .serdes_irq_free = mv88e6352_serdes_irq_free,
+ .serdes_irq_mapping = mv88e6352_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6352_serdes_irq_enable,
+ .serdes_irq_status = mv88e6352_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -3808,6 +3876,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3823,8 +3892,10 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
- .serdes_irq_setup = mv88e6390_serdes_irq_setup,
- .serdes_irq_free = mv88e6390_serdes_irq_free,
+ .serdes_get_lane = mv88e6390_serdes_get_lane,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -3857,6 +3928,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
.port_set_cmode = mv88e6390x_port_set_cmode,
+ .port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3871,9 +3943,11 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.rmu_disable = mv88e6390_g1_rmu_disable,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
- .serdes_power = mv88e6390x_serdes_power,
- .serdes_irq_setup = mv88e6390x_serdes_irq_setup,
- .serdes_irq_free = mv88e6390x_serdes_irq_free,
+ .serdes_power = mv88e6390_serdes_power,
+ .serdes_get_lane = mv88e6390x_serdes_get_lane,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6390_serdes_irq_enable,
+ .serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
@@ -4235,6 +4309,33 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.ops = &mv88e6191_ops,
},
+ [MV88E6220] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6220,
+ .family = MV88E6XXX_FAMILY_6250,
+ .name = "Marvell 88E6220",
+ .num_databases = 64,
+
+ /* Ports 2-4 are not routed to pins
+ * => usable ports 0, 1, 5, 6
+ */
+ .num_ports = 7,
+ .num_internal_phys = 2,
+ .invalid_port_mask = BIT(2) | BIT(3) | BIT(4),
+ .max_vid = 4095,
+ .port_base_addr = 0x08,
+ .phy_base_addr = 0x00,
+ .global1_addr = 0x0f,
+ .global2_addr = 0x07,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .dual_chip = true,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
+ .ptp_support = true,
+ .ops = &mv88e6250_ops,
+ },
+
[MV88E6240] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6240,
.family = MV88E6XXX_FAMILY_6352,
@@ -4277,6 +4378,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.dual_chip = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
+ .ptp_support = true,
.ops = &mv88e6250_ops,
},
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 4646e46d47f2..6bc0a4e4fe7b 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -57,6 +57,7 @@ enum mv88e6xxx_model {
MV88E6190,
MV88E6190X,
MV88E6191,
+ MV88E6220,
MV88E6240,
MV88E6250,
MV88E6290,
@@ -77,7 +78,7 @@ enum mv88e6xxx_family {
MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */
MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */
MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */
- MV88E6XXX_FAMILY_6250, /* 6250 */
+ MV88E6XXX_FAMILY_6250, /* 6220 6250 */
MV88E6XXX_FAMILY_6320, /* 6320 6321 */
MV88E6XXX_FAMILY_6341, /* 6141 6341 */
MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */
@@ -105,6 +106,11 @@ struct mv88e6xxx_info {
unsigned int g2_irqs;
bool pvt;
+ /* Mark certain ports as invalid. This is required for example for the
+ * MV88E6220 (which is in general a MV88E6250 with 7 ports) but the
+ * ports 2-4 are not routet to pins.
+ */
+ unsigned int invalid_port_mask;
/* Multi-chip Addressing Mode.
* Some chips respond to only 2 registers of its own SMI device address
* when it is non-zero, and use indirect access to internal registers.
@@ -193,7 +199,7 @@ struct mv88e6xxx_port {
u64 vtu_member_violation;
u64 vtu_miss_violation;
u8 cmode;
- int serdes_irq;
+ unsigned int serdes_irq;
};
struct mv88e6xxx_chip {
@@ -389,6 +395,7 @@ struct mv88e6xxx_ops {
u8 out);
int (*port_disable_learn_limit)(struct mv88e6xxx_chip *chip, int port);
int (*port_disable_pri_override)(struct mv88e6xxx_chip *chip, int port);
+ int (*port_setup_message_port)(struct mv88e6xxx_chip *chip, int port);
/* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc.
* Some chips allow this to be configured on specific ports.
@@ -434,11 +441,19 @@ struct mv88e6xxx_ops {
int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip);
/* Power on/off a SERDES interface */
- int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, bool on);
+ int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ bool up);
+
+ /* SERDES lane mapping */
+ u8 (*serdes_get_lane)(struct mv88e6xxx_chip *chip, int port);
/* SERDES interrupt handling */
- int (*serdes_irq_setup)(struct mv88e6xxx_chip *chip, int port);
- void (*serdes_irq_free)(struct mv88e6xxx_chip *chip, int port);
+ unsigned int (*serdes_irq_mapping)(struct mv88e6xxx_chip *chip,
+ int port);
+ int (*serdes_irq_enable)(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ bool enable);
+ irqreturn_t (*serdes_irq_status)(struct mv88e6xxx_chip *chip, int port,
+ u8 lane);
/* Statistics from the SERDES interface */
int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port);
@@ -532,6 +547,10 @@ struct mv88e6xxx_ptp_ops {
int arr1_sts_reg;
int dep_sts_reg;
u32 rx_filters;
+ u32 cc_shift;
+ u32 cc_mult;
+ u32 cc_mult_num;
+ u32 cc_mult_dem;
};
#define STATS_TYPE_PORT BIT(0)
@@ -570,11 +589,17 @@ static inline unsigned int mv88e6xxx_num_gpio(struct mv88e6xxx_chip *chip)
return chip->info->num_gpio;
}
+static inline bool mv88e6xxx_is_invalid_port(struct mv88e6xxx_chip *chip, int port)
+{
+ return (chip->info->invalid_port_mask & BIT(port)) != 0;
+}
+
int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
-int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 update);
-int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask);
+int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
+ u16 mask, u16 val);
+int mv88e6xxx_wait_bit(struct mv88e6xxx_chip *chip, int addr, int reg,
+ int bit, int val);
int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
int speed, int duplex, int pause,
phy_interface_t mode);
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 1323ef30a5e9..25ec4c0ac589 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -27,100 +27,52 @@ int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
return mv88e6xxx_write(chip, addr, reg, val);
}
-int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
+int mv88e6xxx_g1_wait_bit(struct mv88e6xxx_chip *chip, int reg, int
+ bit, int val)
{
- return mv88e6xxx_wait(chip, chip->info->global1_addr, reg, mask);
+ return mv88e6xxx_wait_bit(chip, chip->info->global1_addr, reg,
+ bit, val);
+}
+
+int mv88e6xxx_g1_wait_mask(struct mv88e6xxx_chip *chip, int reg,
+ u16 mask, u16 val)
+{
+ return mv88e6xxx_wait_mask(chip, chip->info->global1_addr, reg,
+ mask, val);
}
/* Offset 0x00: Switch Global Status Register */
static int mv88e6185_g1_wait_ppu_disabled(struct mv88e6xxx_chip *chip)
{
- u16 state;
- int i, err;
-
- for (i = 0; i < 16; i++) {
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &state);
- if (err)
- return err;
-
- /* Check the value of the PPUState bits 15:14 */
- state &= MV88E6185_G1_STS_PPU_STATE_MASK;
- if (state != MV88E6185_G1_STS_PPU_STATE_POLLING)
- return 0;
-
- usleep_range(1000, 2000);
- }
-
- return -ETIMEDOUT;
+ return mv88e6xxx_g1_wait_mask(chip, MV88E6XXX_G1_STS,
+ MV88E6185_G1_STS_PPU_STATE_MASK,
+ MV88E6185_G1_STS_PPU_STATE_DISABLED);
}
static int mv88e6185_g1_wait_ppu_polling(struct mv88e6xxx_chip *chip)
{
- u16 state;
- int i, err;
-
- for (i = 0; i < 16; ++i) {
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &state);
- if (err)
- return err;
-
- /* Check the value of the PPUState bits 15:14 */
- state &= MV88E6185_G1_STS_PPU_STATE_MASK;
- if (state == MV88E6185_G1_STS_PPU_STATE_POLLING)
- return 0;
-
- usleep_range(1000, 2000);
- }
-
- return -ETIMEDOUT;
+ return mv88e6xxx_g1_wait_mask(chip, MV88E6XXX_G1_STS,
+ MV88E6185_G1_STS_PPU_STATE_MASK,
+ MV88E6185_G1_STS_PPU_STATE_POLLING);
}
static int mv88e6352_g1_wait_ppu_polling(struct mv88e6xxx_chip *chip)
{
- u16 state;
- int i, err;
+ int bit = __bf_shf(MV88E6352_G1_STS_PPU_STATE);
- for (i = 0; i < 16; ++i) {
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &state);
- if (err)
- return err;
-
- /* Check the value of the PPUState (or InitState) bit 15 */
- if (state & MV88E6352_G1_STS_PPU_STATE)
- return 0;
-
- usleep_range(1000, 2000);
- }
-
- return -ETIMEDOUT;
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
{
- const unsigned long timeout = jiffies + 1 * HZ;
- u16 val;
- int err;
+ int bit = __bf_shf(MV88E6XXX_G1_STS_INIT_READY);
/* Wait up to 1 second for the switch to be ready. The InitReady bit 11
* is set to a one when all units inside the device (ATU, VTU, etc.)
* have finished their initialization and are ready to accept frames.
*/
- while (time_before(jiffies, timeout)) {
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
- if (err)
- return err;
-
- if (val & MV88E6XXX_G1_STS_INIT_READY)
- break;
-
- usleep_range(1000, 2000);
- }
-
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
-
- return 0;
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
@@ -476,8 +428,9 @@ int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index)
static int mv88e6xxx_g1_stats_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g1_wait(chip, MV88E6XXX_G1_STATS_OP,
- MV88E6XXX_G1_STATS_OP_BUSY);
+ int bit = __bf_shf(MV88E6XXX_G1_STATS_OP_BUSY);
+
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STATS_OP, bit, 0);
}
int mv88e6095_g1_stats_set_histogram(struct mv88e6xxx_chip *chip)
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index d444266f7d78..78b9ae22d18c 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -249,7 +249,10 @@
int mv88e6xxx_g1_read(struct mv88e6xxx_chip *chip, int reg, u16 *val);
int mv88e6xxx_g1_write(struct mv88e6xxx_chip *chip, int reg, u16 val);
-int mv88e6xxx_g1_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask);
+int mv88e6xxx_g1_wait_bit(struct mv88e6xxx_chip *chip, int reg, int
+ bit, int val);
+int mv88e6xxx_g1_wait_mask(struct mv88e6xxx_chip *chip, int reg,
+ u16 mask, u16 val);
int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 1cf388e9bd94..18b86515b6bc 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -5,6 +5,8 @@
* Copyright (c) 2008 Marvell Semiconductor
* Copyright (c) 2017 Savoir-faire Linux, Inc.
*/
+
+#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
@@ -75,8 +77,9 @@ int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g1_wait(chip, MV88E6XXX_G1_ATU_OP,
- MV88E6XXX_G1_ATU_OP_BUSY);
+ int bit = __bf_shf(MV88E6XXX_G1_ATU_OP_BUSY);
+
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_ATU_OP, bit, 0);
}
static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 6cac997360e8..33056a609e96 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -7,6 +7,7 @@
* Copyright (c) 2017 Savoir-faire Linux, Inc.
*/
+#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
@@ -67,8 +68,9 @@ static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_g1_vtu_op_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g1_wait(chip, MV88E6XXX_G1_VTU_OP,
- MV88E6XXX_G1_VTU_OP_BUSY);
+ int bit = __bf_shf(MV88E6XXX_G1_VTU_OP_BUSY);
+
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_VTU_OP, bit, 0);
}
static int mv88e6xxx_g1_vtu_op(struct mv88e6xxx_chip *chip, u16 op)
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 2305b94b3051..bdbb72fc20ed 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -26,14 +26,11 @@ int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
return mv88e6xxx_write(chip, chip->info->global2_addr, reg, val);
}
-int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update)
+int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg, int
+ bit, int val)
{
- return mv88e6xxx_update(chip, chip->info->global2_addr, reg, update);
-}
-
-int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
-{
- return mv88e6xxx_wait(chip, chip->info->global2_addr, reg, mask);
+ return mv88e6xxx_wait_bit(chip, chip->info->global2_addr, reg,
+ bit, val);
}
/* Offset 0x00: Interrupt Source Register */
@@ -123,7 +120,8 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
* but bit 4 is reserved on older chips, so it is safe to use.
*/
- return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_DEVICE_MAPPING, val);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_DEVICE_MAPPING,
+ MV88E6XXX_G2_DEVICE_MAPPING_UPDATE | val);
}
/* Offset 0x07: Trunk Mask Table register */
@@ -136,7 +134,8 @@ static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num,
if (hash)
val |= MV88E6XXX_G2_TRUNK_MASK_HASH;
- return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MASK, val);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_TRUNK_MASK,
+ MV88E6XXX_G2_TRUNK_MASK_UPDATE | val);
}
/* Offset 0x08: Trunk Mapping Table register */
@@ -147,7 +146,8 @@ static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id,
const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1;
u16 val = (id << 11) | (map & port_mask);
- return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MAPPING, val);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_TRUNK_MAPPING,
+ MV88E6XXX_G2_TRUNK_MAPPING_UPDATE | val);
}
int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip)
@@ -178,8 +178,9 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_g2_irl_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_IRL_CMD,
- MV88E6XXX_G2_IRL_CMD_BUSY);
+ int bit = __bf_shf(MV88E6XXX_G2_IRL_CMD_BUSY);
+
+ return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_IRL_CMD, bit, 0);
}
static int mv88e6xxx_g2_irl_op(struct mv88e6xxx_chip *chip, u16 op, int port,
@@ -214,8 +215,9 @@ int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port)
static int mv88e6xxx_g2_pvt_op_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_PVT_ADDR,
- MV88E6XXX_G2_PVT_ADDR_BUSY);
+ int bit = __bf_shf(MV88E6XXX_G2_PVT_ADDR_BUSY);
+
+ return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_PVT_ADDR, bit, 0);
}
static int mv88e6xxx_g2_pvt_op(struct mv88e6xxx_chip *chip, int src_dev,
@@ -261,7 +263,8 @@ static int mv88e6xxx_g2_switch_mac_write(struct mv88e6xxx_chip *chip,
{
u16 val = (pointer << 8) | data;
- return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_SWITCH_MAC, val);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MAC,
+ MV88E6XXX_G2_SWITCH_MAC_UPDATE | val);
}
int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr)
@@ -284,7 +287,8 @@ static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer,
{
u16 val = (pointer << 8) | (data & 0x7);
- return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_PRIO_OVERRIDE, val);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_PRIO_OVERRIDE,
+ MV88E6XXX_G2_PRIO_OVERRIDE_UPDATE | val);
}
int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
@@ -308,9 +312,16 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_EEPROM_CMD,
- MV88E6XXX_G2_EEPROM_CMD_BUSY |
- MV88E6XXX_G2_EEPROM_CMD_RUNNING);
+ int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
+ int err;
+
+ err = mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_EEPROM_CMD, bit, 0);
+ if (err)
+ return err;
+
+ bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_RUNNING);
+
+ return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_EEPROM_CMD, bit, 0);
}
static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
@@ -572,8 +583,9 @@ int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_g2_smi_phy_wait(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_wait(chip, MV88E6XXX_G2_SMI_PHY_CMD,
- MV88E6XXX_G2_SMI_PHY_CMD_BUSY);
+ int bit = __bf_shf(MV88E6XXX_G2_SMI_PHY_CMD_BUSY);
+
+ return mv88e6xxx_g2_wait_bit(chip, MV88E6XXX_G2_SMI_PHY_CMD, bit, 0);
}
static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
@@ -840,12 +852,13 @@ const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops = {
static int mv88e6390_watchdog_setup(struct mv88e6xxx_chip *chip)
{
- return mv88e6xxx_g2_update(chip, MV88E6390_G2_WDOG_CTL,
- MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE |
- MV88E6390_G2_WDOG_CTL_CUT_THROUGH |
- MV88E6390_G2_WDOG_CTL_QUEUE_CONTROLLER |
- MV88E6390_G2_WDOG_CTL_EGRESS |
- MV88E6390_G2_WDOG_CTL_FORCE_IRQ);
+ return mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_UPDATE |
+ MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE |
+ MV88E6390_G2_WDOG_CTL_CUT_THROUGH |
+ MV88E6390_G2_WDOG_CTL_QUEUE_CONTROLLER |
+ MV88E6390_G2_WDOG_CTL_EGRESS |
+ MV88E6390_G2_WDOG_CTL_FORCE_IRQ);
}
static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
@@ -878,8 +891,9 @@ static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
static void mv88e6390_watchdog_free(struct mv88e6xxx_chip *chip)
{
- mv88e6xxx_g2_update(chip, MV88E6390_G2_WDOG_CTL,
- MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE);
+ mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_UPDATE |
+ MV88E6390_G2_WDOG_CTL_PTR_INT_ENABLE);
}
const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index a664fc25f132..42da4bca73e8 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -295,8 +295,8 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val);
int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val);
-int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update);
-int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask);
+int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg,
+ int bit, int val);
int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
@@ -376,12 +376,8 @@ static inline int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 v
return -EOPNOTSUPP;
}
-static inline int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
+static inline int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip,
+ int reg, int bit, int val)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/dsa/mv88e6xxx/global2_avb.c b/drivers/net/dsa/mv88e6xxx/global2_avb.c
index 116b8cf5a6e3..657783e043ff 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_avb.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_avb.c
@@ -11,6 +11,8 @@
* Brandon Streiff <brandon.streiff@ni.com>
*/
+#include <linux/bitfield.h>
+
#include "global2.h"
/* Offset 0x16: AVB Command Register
@@ -27,17 +29,33 @@
/* mv88e6xxx_g2_avb_read -- Read one or multiple 16-bit words.
* The hardware supports snapshotting up to four contiguous registers.
*/
+static int mv88e6xxx_g2_avb_wait(struct mv88e6xxx_chip *chip)
+{
+ int bit = __bf_shf(MV88E6352_G2_AVB_CMD_BUSY);
+
+ return mv88e6xxx_g2_wait_bit(chip, MV88E6352_G2_AVB_CMD, bit, 0);
+}
+
static int mv88e6xxx_g2_avb_read(struct mv88e6xxx_chip *chip, u16 readop,
u16 *data, int len)
{
int err;
int i;
+ err = mv88e6xxx_g2_avb_wait(chip);
+ if (err)
+ return err;
+
/* Hardware can only snapshot four words. */
if (len > 4)
return -E2BIG;
- err = mv88e6xxx_g2_update(chip, MV88E6352_G2_AVB_CMD, readop);
+ err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_CMD,
+ MV88E6352_G2_AVB_CMD_BUSY | readop);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_avb_wait(chip);
if (err)
return err;
@@ -57,11 +75,18 @@ static int mv88e6xxx_g2_avb_write(struct mv88e6xxx_chip *chip, u16 writeop,
{
int err;
+ err = mv88e6xxx_g2_avb_wait(chip);
+ if (err)
+ return err;
+
err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_DATA, data);
if (err)
return err;
- return mv88e6xxx_g2_update(chip, MV88E6352_G2_AVB_CMD, writeop);
+ err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_CMD,
+ MV88E6352_G2_AVB_CMD_BUSY | writeop);
+
+ return mv88e6xxx_g2_avb_wait(chip);
}
static int mv88e6352_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index baddecadd8be..33b7b9570d29 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -37,7 +37,8 @@ static int mv88e6xxx_g2_scratch_write(struct mv88e6xxx_chip *chip, int reg,
{
u16 value = (reg << 8) | data;
- return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC, value);
+ return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC,
+ MV88E6XXX_G2_SCRATCH_MISC_UPDATE | value);
}
/**
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 04309ef0a1cc..04006344adb2 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -392,17 +392,14 @@ phy_interface_t mv88e6390x_port_max_speed_mode(int port)
return PHY_INTERFACE_MODE_NA;
}
-int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
- phy_interface_t mode)
+static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
{
- int lane;
+ u8 lane;
u16 cmode;
u16 reg;
int err;
- if (port != 9 && port != 10)
- return -EOPNOTSUPP;
-
/* Default to a slow mode, so freeing up SERDES interfaces for
* other ports which might use them for SFPs.
*/
@@ -411,7 +408,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
switch (mode) {
case PHY_INTERFACE_MODE_1000BASEX:
- cmode = MV88E6XXX_PORT_STS_CMODE_1000BASE_X;
+ cmode = MV88E6XXX_PORT_STS_CMODE_1000BASEX;
break;
case PHY_INTERFACE_MODE_SGMII:
cmode = MV88E6XXX_PORT_STS_CMODE_SGMII;
@@ -434,18 +431,15 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (cmode == chip->ports[port].cmode)
return 0;
- lane = mv88e6390x_serdes_get_lane(chip, port);
- if (lane < 0 && lane != -ENODEV)
- return lane;
-
- if (lane >= 0) {
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane) {
if (chip->ports[port].serdes_irq) {
- err = mv88e6390_serdes_irq_disable(chip, port, lane);
+ err = mv88e6xxx_serdes_irq_disable(chip, port, lane);
if (err)
return err;
}
- err = mv88e6390x_serdes_power(chip, port, false);
+ err = mv88e6xxx_serdes_power_down(chip, port, lane);
if (err)
return err;
}
@@ -466,16 +460,16 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
chip->ports[port].cmode = cmode;
- lane = mv88e6390x_serdes_get_lane(chip, port);
- if (lane < 0)
- return lane;
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (!lane)
+ return -ENODEV;
- err = mv88e6390x_serdes_power(chip, port, true);
+ err = mv88e6xxx_serdes_power_up(chip, port, lane);
if (err)
return err;
if (chip->ports[port].serdes_irq) {
- err = mv88e6390_serdes_irq_enable(chip, port, lane);
+ err = mv88e6xxx_serdes_irq_enable(chip, port, lane);
if (err)
return err;
}
@@ -484,9 +478,68 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return 0;
}
+int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ if (port != 9 && port != 10)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_cmode(chip, port, mode);
+}
+
int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode)
{
+ if (port != 9 && port != 10)
+ return -EOPNOTSUPP;
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_NA:
+ return 0;
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ return -EINVAL;
+ default:
+ break;
+ }
+
+ return mv88e6xxx_port_set_cmode(chip, port, mode);
+}
+
+static int mv88e6341_port_set_cmode_writable(struct mv88e6xxx_chip *chip,
+ int port)
+{
+ int err, addr;
+ u16 reg, bits;
+
+ if (port != 5)
+ return -EOPNOTSUPP;
+
+ addr = chip->info->port_base_addr + port;
+
+ err = mv88e6xxx_port_hidden_read(chip, 0x7, addr, 0, &reg);
+ if (err)
+ return err;
+
+ bits = MV88E6341_PORT_RESERVED_1A_FORCE_CMODE |
+ MV88E6341_PORT_RESERVED_1A_SGMII_AN;
+
+ if ((reg & bits) == bits)
+ return 0;
+
+ reg |= bits;
+ return mv88e6xxx_port_hidden_write(chip, 0x7, addr, 0, reg);
+}
+
+int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ int err;
+
+ if (port != 5)
+ return -EOPNOTSUPP;
+
switch (mode) {
case PHY_INTERFACE_MODE_NA:
return 0;
@@ -498,7 +551,11 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
break;
}
- return mv88e6390x_port_set_cmode(chip, port, mode);
+ err = mv88e6341_port_set_cmode_writable(chip, port);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_set_cmode(chip, port, mode);
}
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
@@ -590,6 +647,7 @@ int mv88e6250_port_link_state(struct mv88e6xxx_chip *chip, int port,
state->link = !!(reg & MV88E6250_PORT_STS_LINK);
state->an_enabled = 1;
state->an_complete = state->link;
+ state->interface = PHY_INTERFACE_MODE_NA;
return 0;
}
@@ -600,6 +658,43 @@ int mv88e6352_port_link_state(struct mv88e6xxx_chip *chip, int port,
int err;
u16 reg;
+ switch (chip->ports[port].cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_RGMII:
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL,
+ &reg);
+ if (err)
+ return err;
+
+ if ((reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK) &&
+ (reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK))
+ state->interface = PHY_INTERFACE_MODE_RGMII_ID;
+ else if (reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_RXCLK)
+ state->interface = PHY_INTERFACE_MODE_RGMII_RXID;
+ else if (reg & MV88E6XXX_PORT_MAC_CTL_RGMII_DELAY_TXCLK)
+ state->interface = PHY_INTERFACE_MODE_RGMII_TXID;
+ else
+ state->interface = PHY_INTERFACE_MODE_RGMII;
+ break;
+ case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
+ state->interface = PHY_INTERFACE_MODE_1000BASEX;
+ break;
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ state->interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ state->interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+ case MV88E6XXX_PORT_STS_CMODE_XAUI:
+ state->interface = PHY_INTERFACE_MODE_XAUI;
+ break;
+ case MV88E6XXX_PORT_STS_CMODE_RXAUI:
+ state->interface = PHY_INTERFACE_MODE_RXAUI;
+ break;
+ default:
+ /* we do not support other cmode values here */
+ state->interface = PHY_INTERFACE_MODE_NA;
+ }
+
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 8d5a6cd6fb19..d4e9bea6e82f 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -42,8 +42,9 @@
#define MV88E6XXX_PORT_STS_TX_PAUSED 0x0020
#define MV88E6XXX_PORT_STS_FLOW_CTL 0x0010
#define MV88E6XXX_PORT_STS_CMODE_MASK 0x000f
-#define MV88E6XXX_PORT_STS_CMODE_100BASE_X 0x0008
-#define MV88E6XXX_PORT_STS_CMODE_1000BASE_X 0x0009
+#define MV88E6XXX_PORT_STS_CMODE_RGMII 0x0007
+#define MV88E6XXX_PORT_STS_CMODE_100BASEX 0x0008
+#define MV88E6XXX_PORT_STS_CMODE_1000BASEX 0x0009
#define MV88E6XXX_PORT_STS_CMODE_SGMII 0x000a
#define MV88E6XXX_PORT_STS_CMODE_2500BASEX 0x000b
#define MV88E6XXX_PORT_STS_CMODE_XAUI 0x000c
@@ -117,6 +118,7 @@
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6190 0x1900
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6191 0x1910
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6185 0x1a70
+#define MV88E6XXX_PORT_SWITCH_ID_PROD_6220 0x2200
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6240 0x2400
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6250 0x2500
#define MV88E6XXX_PORT_SWITCH_ID_PROD_6290 0x2900
@@ -259,14 +261,16 @@
#define MV88E6095_PORT_IEEE_PRIO_REMAP_4567 0x19
/* Offset 0x1a: Magic undocumented errata register */
-#define PORT_RESERVED_1A 0x1a
-#define PORT_RESERVED_1A_BUSY BIT(15)
-#define PORT_RESERVED_1A_WRITE BIT(14)
-#define PORT_RESERVED_1A_READ 0
-#define PORT_RESERVED_1A_PORT_SHIFT 5
-#define PORT_RESERVED_1A_BLOCK (0xf << 10)
-#define PORT_RESERVED_1A_CTRL_PORT 4
-#define PORT_RESERVED_1A_DATA_PORT 5
+#define MV88E6XXX_PORT_RESERVED_1A 0x1a
+#define MV88E6XXX_PORT_RESERVED_1A_BUSY 0x8000
+#define MV88E6XXX_PORT_RESERVED_1A_WRITE 0x4000
+#define MV88E6XXX_PORT_RESERVED_1A_READ 0x0000
+#define MV88E6XXX_PORT_RESERVED_1A_PORT_SHIFT 5
+#define MV88E6XXX_PORT_RESERVED_1A_BLOCK_SHIFT 10
+#define MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT 0x04
+#define MV88E6XXX_PORT_RESERVED_1A_DATA_PORT 0x05
+#define MV88E6341_PORT_RESERVED_1A_FORCE_CMODE 0x8000
+#define MV88E6341_PORT_RESERVED_1A_SGMII_AN 0x2000
int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg,
u16 *val);
@@ -332,6 +336,8 @@ int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
u8 out);
int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
u8 out);
+int mv88e6341_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
@@ -351,4 +357,10 @@ int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int mv88e6xxx_port_disable_learn_limit(struct mv88e6xxx_chip *chip, int port);
int mv88e6xxx_port_disable_pri_override(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_port_hidden_write(struct mv88e6xxx_chip *chip, int block,
+ int port, int reg, u16 val);
+int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,
+ int reg, u16 *val);
+
#endif /* _MV88E6XXX_PORT_H */
diff --git a/drivers/net/dsa/mv88e6xxx/port_hidden.c b/drivers/net/dsa/mv88e6xxx/port_hidden.c
new file mode 100644
index 000000000000..b49d05f0e117
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/port_hidden.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Marvell 88E6xxx Switch Hidden Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2019 Andrew Lunn <andrew@lunn.ch>
+ */
+
+#include <linux/bitfield.h>
+
+#include "chip.h"
+#include "port.h"
+
+/* The mv88e6390 and mv88e6341 have some hidden registers used for debug and
+ * development. The errata also makes use of them.
+ */
+int mv88e6xxx_port_hidden_write(struct mv88e6xxx_chip *chip, int block,
+ int port, int reg, u16 val)
+{
+ u16 ctrl;
+ int err;
+
+ err = mv88e6xxx_port_write(chip, MV88E6XXX_PORT_RESERVED_1A_DATA_PORT,
+ MV88E6XXX_PORT_RESERVED_1A, val);
+ if (err)
+ return err;
+
+ ctrl = MV88E6XXX_PORT_RESERVED_1A_BUSY |
+ MV88E6XXX_PORT_RESERVED_1A_WRITE |
+ block << MV88E6XXX_PORT_RESERVED_1A_BLOCK_SHIFT |
+ port << MV88E6XXX_PORT_RESERVED_1A_PORT_SHIFT |
+ reg;
+
+ return mv88e6xxx_port_write(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
+ MV88E6XXX_PORT_RESERVED_1A, ctrl);
+}
+
+int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip)
+{
+ int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY);
+
+ return mv88e6xxx_wait_bit(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
+ MV88E6XXX_PORT_RESERVED_1A, bit, 0);
+}
+
+int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,
+ int reg, u16 *val)
+{
+ u16 ctrl;
+ int err;
+
+ ctrl = MV88E6XXX_PORT_RESERVED_1A_BUSY |
+ MV88E6XXX_PORT_RESERVED_1A_READ |
+ block << MV88E6XXX_PORT_RESERVED_1A_BLOCK_SHIFT |
+ port << MV88E6XXX_PORT_RESERVED_1A_PORT_SHIFT |
+ reg;
+
+ err = mv88e6xxx_port_write(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
+ MV88E6XXX_PORT_RESERVED_1A, ctrl);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_hidden_wait(chip);
+ if (err)
+ return err;
+
+ return mv88e6xxx_port_read(chip, MV88E6XXX_PORT_RESERVED_1A_DATA_PORT,
+ MV88E6XXX_PORT_RESERVED_1A, val);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index 768d256f7c9f..073cbd0bb91b 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -15,11 +15,31 @@
#include "hwtstamp.h"
#include "ptp.h"
-/* Raw timestamps are in units of 8-ns clock periods. */
-#define CC_SHIFT 28
-#define CC_MULT (8 << CC_SHIFT)
-#define CC_MULT_NUM (1 << 9)
-#define CC_MULT_DEM 15625ULL
+#define MV88E6XXX_MAX_ADJ_PPB 1000000
+
+/* Family MV88E6250:
+ * Raw timestamps are in units of 10-ns clock periods.
+ *
+ * clkadj = scaled_ppm * 10*2^28 / (10^6 * 2^16)
+ * simplifies to
+ * clkadj = scaled_ppm * 2^7 / 5^5
+ */
+#define MV88E6250_CC_SHIFT 28
+#define MV88E6250_CC_MULT (10 << MV88E6250_CC_SHIFT)
+#define MV88E6250_CC_MULT_NUM (1 << 7)
+#define MV88E6250_CC_MULT_DEM 3125ULL
+
+/* Other families:
+ * Raw timestamps are in units of 8-ns clock periods.
+ *
+ * clkadj = scaled_ppm * 8*2^28 / (10^6 * 2^16)
+ * simplifies to
+ * clkadj = scaled_ppm * 2^9 / 5^6
+ */
+#define MV88E6XXX_CC_SHIFT 28
+#define MV88E6XXX_CC_MULT (8 << MV88E6XXX_CC_SHIFT)
+#define MV88E6XXX_CC_MULT_NUM (1 << 9)
+#define MV88E6XXX_CC_MULT_DEM 15625ULL
#define TAI_EVENT_WORK_INTERVAL msecs_to_jiffies(100)
@@ -179,6 +199,7 @@ out:
static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
+ const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
int neg_adj = 0;
u32 diff, mult;
u64 adj;
@@ -187,10 +208,11 @@ static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
neg_adj = 1;
scaled_ppm = -scaled_ppm;
}
- mult = CC_MULT;
- adj = CC_MULT_NUM;
+
+ mult = ptp_ops->cc_mult;
+ adj = ptp_ops->cc_mult_num;
adj *= scaled_ppm;
- diff = div_u64(adj, CC_MULT_DEM);
+ diff = div_u64(adj, ptp_ops->cc_mult_dem);
mv88e6xxx_reg_lock(chip);
@@ -310,7 +332,27 @@ static int mv88e6352_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
return 0;
}
-const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
+const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
+ .clock_read = mv88e6165_ptp_clock_read,
+ .global_enable = mv88e6165_global_enable,
+ .global_disable = mv88e6165_global_disable,
+ .arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS,
+ .arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS,
+ .dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS,
+ .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+ .cc_shift = MV88E6XXX_CC_SHIFT,
+ .cc_mult = MV88E6XXX_CC_MULT,
+ .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
+ .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
+};
+
+const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
.clock_read = mv88e6352_ptp_clock_read,
.ptp_enable = mv88e6352_ptp_enable,
.ptp_verify = mv88e6352_ptp_verify,
@@ -331,22 +373,37 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+ .cc_shift = MV88E6250_CC_SHIFT,
+ .cc_mult = MV88E6250_CC_MULT,
+ .cc_mult_num = MV88E6250_CC_MULT_NUM,
+ .cc_mult_dem = MV88E6250_CC_MULT_DEM,
};
-const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
- .clock_read = mv88e6165_ptp_clock_read,
- .global_enable = mv88e6165_global_enable,
- .global_disable = mv88e6165_global_disable,
- .arr0_sts_reg = MV88E6165_PORT_PTP_ARR0_STS,
- .arr1_sts_reg = MV88E6165_PORT_PTP_ARR1_STS,
- .dep_sts_reg = MV88E6165_PORT_PTP_DEP_STS,
+const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
+ .clock_read = mv88e6352_ptp_clock_read,
+ .ptp_enable = mv88e6352_ptp_enable,
+ .ptp_verify = mv88e6352_ptp_verify,
+ .event_work = mv88e6352_tai_event_work,
+ .port_enable = mv88e6352_hwtstamp_port_enable,
+ .port_disable = mv88e6352_hwtstamp_port_disable,
+ .n_ext_ts = 1,
+ .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
+ .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
+ .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
.rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+ .cc_shift = MV88E6XXX_CC_SHIFT,
+ .cc_mult = MV88E6XXX_CC_MULT,
+ .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
+ .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
};
static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
@@ -384,8 +441,8 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
memset(&chip->tstamp_cc, 0, sizeof(chip->tstamp_cc));
chip->tstamp_cc.read = mv88e6xxx_ptp_clock_read;
chip->tstamp_cc.mask = CYCLECOUNTER_MASK(32);
- chip->tstamp_cc.mult = CC_MULT;
- chip->tstamp_cc.shift = CC_SHIFT;
+ chip->tstamp_cc.mult = ptp_ops->cc_mult;
+ chip->tstamp_cc.shift = ptp_ops->cc_shift;
timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc,
ktime_to_ns(ktime_get_real()));
@@ -397,7 +454,6 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
chip->ptp_clock_info.owner = THIS_MODULE;
snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name),
"%s", dev_name(chip->dev));
- chip->ptp_clock_info.max_adj = 1000000;
chip->ptp_clock_info.n_ext_ts = ptp_ops->n_ext_ts;
chip->ptp_clock_info.n_per_out = 0;
@@ -413,6 +469,7 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
}
chip->ptp_clock_info.pin_config = chip->pin_config;
+ chip->ptp_clock_info.max_adj = MV88E6XXX_MAX_ADJ_PPB;
chip->ptp_clock_info.adjfine = mv88e6xxx_ptp_adjfine;
chip->ptp_clock_info.adjtime = mv88e6xxx_ptp_adjtime;
chip->ptp_clock_info.gettime64 = mv88e6xxx_ptp_gettime;
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h
index 0a1f8de8f062..269d5d16a466 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.h
+++ b/drivers/net/dsa/mv88e6xxx/ptp.h
@@ -148,8 +148,9 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
#define ptp_to_chip(ptp) container_of(ptp, struct mv88e6xxx_chip, \
ptp_clock_info)
-extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops;
+extern const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops;
+extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
@@ -167,8 +168,9 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
{
}
-static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {};
+static const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {};
+static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 20c526c2a9ee..902feb398746 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -49,7 +49,8 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
return mv88e6xxx_phy_write(chip, lane, reg_c45, val);
}
-static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on)
+int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ bool up)
{
u16 val, new_val;
int err;
@@ -58,7 +59,7 @@ static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on)
if (err)
return err;
- if (on)
+ if (up)
new_val = val & ~BMCR_PDOWN;
else
new_val = val | BMCR_PDOWN;
@@ -69,29 +70,25 @@ static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on)
return err;
}
-static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
+u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode = chip->ports[port].cmode;
+ u8 lane = 0;
- if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASE_X) ||
- (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) ||
+ if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASEX) ||
+ (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX) ||
(cmode == MV88E6XXX_PORT_STS_CMODE_SGMII))
- return true;
+ lane = 0xff; /* Unused */
- return false;
+ return lane;
}
-int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
{
- int err;
-
- if (mv88e6352_port_has_serdes(chip, port)) {
- err = mv88e6352_serdes_power_set(chip, on);
- if (err < 0)
- return err;
- }
+ if (mv88e6xxx_serdes_get_lane(chip, port))
+ return true;
- return 0;
+ return false;
}
struct mv88e6352_serdes_hw_stat {
@@ -186,214 +183,178 @@ static void mv88e6352_serdes_irq_link(struct mv88e6xxx_chip *chip, int port)
struct dsa_switch *ds = chip->ds;
u16 status;
bool up;
+ int err;
- mv88e6352_serdes_read(chip, MII_BMSR, &status);
+ err = mv88e6352_serdes_read(chip, MII_BMSR, &status);
+ if (err)
+ return;
/* Status must be read twice in order to give the current link
* status. Otherwise the change in link status since the last
* read of the register is returned.
*/
- mv88e6352_serdes_read(chip, MII_BMSR, &status);
+ err = mv88e6352_serdes_read(chip, MII_BMSR, &status);
+ if (err)
+ return;
up = status & BMSR_LSTATUS;
dsa_port_phylink_mac_change(ds, port, up);
}
-static irqreturn_t mv88e6352_serdes_thread_fn(int irq, void *dev_id)
+irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ u8 lane)
{
- struct mv88e6xxx_port *port = dev_id;
- struct mv88e6xxx_chip *chip = port->chip;
irqreturn_t ret = IRQ_NONE;
u16 status;
int err;
- mv88e6xxx_reg_lock(chip);
-
err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_INT_STATUS, &status);
if (err)
- goto out;
+ return ret;
if (status & MV88E6352_SERDES_INT_LINK_CHANGE) {
ret = IRQ_HANDLED;
- mv88e6352_serdes_irq_link(chip, port->port);
+ mv88e6352_serdes_irq_link(chip, port);
}
-out:
- mv88e6xxx_reg_unlock(chip);
return ret;
}
-static int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip)
+int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ bool enable)
{
- return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE,
- MV88E6352_SERDES_INT_LINK_CHANGE);
-}
+ u16 val = 0;
-static int mv88e6352_serdes_irq_disable(struct mv88e6xxx_chip *chip)
-{
- return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE, 0);
+ if (enable)
+ val |= MV88E6352_SERDES_INT_LINK_CHANGE;
+
+ return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE, val);
}
-int mv88e6352_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
+unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
{
- int err;
-
- if (!mv88e6352_port_has_serdes(chip, port))
- return 0;
-
- chip->ports[port].serdes_irq = irq_find_mapping(chip->g2_irq.domain,
- MV88E6352_SERDES_IRQ);
- if (chip->ports[port].serdes_irq < 0) {
- dev_err(chip->dev, "Unable to map SERDES irq: %d\n",
- chip->ports[port].serdes_irq);
- return chip->ports[port].serdes_irq;
- }
-
- /* Requesting the IRQ will trigger irq callbacks. So we cannot
- * hold the reg_lock.
- */
- mv88e6xxx_reg_unlock(chip);
- err = request_threaded_irq(chip->ports[port].serdes_irq, NULL,
- mv88e6352_serdes_thread_fn,
- IRQF_ONESHOT, "mv88e6xxx-serdes",
- &chip->ports[port]);
- mv88e6xxx_reg_lock(chip);
-
- if (err) {
- dev_err(chip->dev, "Unable to request SERDES interrupt: %d\n",
- err);
- return err;
- }
-
- return mv88e6352_serdes_irq_enable(chip);
+ return irq_find_mapping(chip->g2_irq.domain, MV88E6352_SERDES_IRQ);
}
-void mv88e6352_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
+u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
- if (!mv88e6352_port_has_serdes(chip, port))
- return;
-
- mv88e6352_serdes_irq_disable(chip);
+ u8 cmode = chip->ports[port].cmode;
+ u8 lane = 0;
- /* Freeing the IRQ will trigger irq callbacks. So we cannot
- * hold the reg_lock.
- */
- mv88e6xxx_reg_unlock(chip);
- free_irq(chip->ports[port].serdes_irq, &chip->ports[port]);
- mv88e6xxx_reg_lock(chip);
+ switch (port) {
+ case 5:
+ if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
+ cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ lane = MV88E6341_PORT5_LANE;
+ break;
+ }
- chip->ports[port].serdes_irq = 0;
+ return lane;
}
-/* Return the SERDES lane address a port is using. Only Ports 9 and 10
- * have SERDES lanes. Returns -ENODEV if a port does not have a lane.
- */
-static int mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode = chip->ports[port].cmode;
+ u8 lane = 0;
switch (port) {
case 9:
- if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
- return MV88E6390_PORT9_LANE0;
- return -ENODEV;
+ lane = MV88E6390_PORT9_LANE0;
+ break;
case 10:
- if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
- return MV88E6390_PORT10_LANE0;
- return -ENODEV;
- default:
- return -ENODEV;
+ lane = MV88E6390_PORT10_LANE0;
+ break;
}
+
+ return lane;
}
-/* Return the SERDES lane address a port is using. Ports 9 and 10 can
- * use multiple lanes. If so, return the first lane the port uses.
- * Returns -ENODEV if a port does not have a lane.
- */
-int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+u8 mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
- u8 cmode_port9, cmode_port10, cmode_port;
-
- cmode_port9 = chip->ports[9].cmode;
- cmode_port10 = chip->ports[10].cmode;
- cmode_port = chip->ports[port].cmode;
+ u8 cmode_port = chip->ports[port].cmode;
+ u8 cmode_port10 = chip->ports[10].cmode;
+ u8 cmode_port9 = chip->ports[9].cmode;
+ u8 lane = 0;
switch (port) {
case 2:
- if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
- if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
- return MV88E6390_PORT9_LANE1;
- return -ENODEV;
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
+ lane = MV88E6390_PORT9_LANE1;
+ break;
case 3:
- if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
- if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
- return MV88E6390_PORT9_LANE2;
- return -ENODEV;
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
+ lane = MV88E6390_PORT9_LANE2;
+ break;
case 4:
- if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
- if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
- return MV88E6390_PORT9_LANE3;
- return -ENODEV;
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
+ lane = MV88E6390_PORT9_LANE3;
+ break;
case 5:
- if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
- if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
- return MV88E6390_PORT10_LANE1;
- return -ENODEV;
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
+ lane = MV88E6390_PORT10_LANE1;
+ break;
case 6:
- if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
- if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
- return MV88E6390_PORT10_LANE2;
- return -ENODEV;
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
+ lane = MV88E6390_PORT10_LANE2;
+ break;
case 7:
- if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
- if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASE_X)
- return MV88E6390_PORT10_LANE3;
- return -ENODEV;
+ if (cmode_port == MV88E6XXX_PORT_STS_CMODE_1000BASEX)
+ lane = MV88E6390_PORT10_LANE3;
+ break;
case 9:
- if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ if (cmode_port9 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_XAUI ||
cmode_port9 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
- return MV88E6390_PORT9_LANE0;
- return -ENODEV;
+ lane = MV88E6390_PORT9_LANE0;
+ break;
case 10:
- if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
+ if (cmode_port10 == MV88E6XXX_PORT_STS_CMODE_1000BASEX ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_SGMII ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_2500BASEX ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_XAUI ||
cmode_port10 == MV88E6XXX_PORT_STS_CMODE_RXAUI)
- return MV88E6390_PORT10_LANE0;
- return -ENODEV;
- default:
- return -ENODEV;
+ lane = MV88E6390_PORT10_LANE0;
+ break;
}
+
+ return lane;
}
-/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */
-static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane,
- bool on)
+/* Set power up/down for 10GBASE-R and 10GBASE-X4/X2 */
+static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, u8 lane,
+ bool up)
{
u16 val, new_val;
int err;
@@ -404,7 +365,7 @@ static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane,
if (err)
return err;
- if (on)
+ if (up)
new_val = val & ~(MV88E6390_PCS_CONTROL_1_RESET |
MV88E6390_PCS_CONTROL_1_LOOPBACK |
MV88E6390_PCS_CONTROL_1_PDOWN);
@@ -418,9 +379,9 @@ static int mv88e6390_serdes_power_10g(struct mv88e6xxx_chip *chip, int lane,
return err;
}
-/* Set the power on/off for SGMII and 1000Base-X */
-static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane,
- bool on)
+/* Set power up/down for SGMII and 1000Base-X */
+static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, u8 lane,
+ bool up)
{
u16 val, new_val;
int err;
@@ -430,7 +391,7 @@ static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane,
if (err)
return err;
- if (on)
+ if (up)
new_val = val & ~(MV88E6390_SGMII_CONTROL_RESET |
MV88E6390_SGMII_CONTROL_LOOPBACK |
MV88E6390_SGMII_CONTROL_PDOWN);
@@ -444,70 +405,32 @@ static int mv88e6390_serdes_power_sgmii(struct mv88e6xxx_chip *chip, int lane,
return err;
}
-static int mv88e6390_serdes_power_lane(struct mv88e6xxx_chip *chip, int port,
- int lane, bool on)
+int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ bool up)
{
u8 cmode = chip->ports[port].cmode;
switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- return mv88e6390_serdes_power_sgmii(chip, lane, on);
+ return mv88e6390_serdes_power_sgmii(chip, lane, up);
case MV88E6XXX_PORT_STS_CMODE_XAUI:
case MV88E6XXX_PORT_STS_CMODE_RXAUI:
- return mv88e6390_serdes_power_10g(chip, lane, on);
- }
-
- return 0;
-}
-
-int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
-{
- int lane;
-
- lane = mv88e6390_serdes_get_lane(chip, port);
- if (lane == -ENODEV)
- return 0;
-
- if (lane < 0)
- return lane;
-
- switch (port) {
- case 9 ... 10:
- return mv88e6390_serdes_power_lane(chip, port, lane, on);
- }
-
- return 0;
-}
-
-int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
-{
- int lane;
-
- lane = mv88e6390x_serdes_get_lane(chip, port);
- if (lane == -ENODEV)
- return 0;
-
- if (lane < 0)
- return lane;
-
- switch (port) {
- case 2 ... 4:
- case 5 ... 7:
- case 9 ... 10:
- return mv88e6390_serdes_power_lane(chip, port, lane, on);
+ return mv88e6390_serdes_power_10g(chip, lane, up);
}
return 0;
}
static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
- int port, int lane)
+ int port, u8 lane)
{
+ u8 cmode = chip->ports[port].cmode;
struct dsa_switch *ds = chip->ds;
int duplex = DUPLEX_UNKNOWN;
int speed = SPEED_UNKNOWN;
+ phy_interface_t mode;
int link, err;
u16 status;
@@ -527,7 +450,10 @@ static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
switch (status & MV88E6390_SGMII_PHY_STATUS_SPEED_MASK) {
case MV88E6390_SGMII_PHY_STATUS_SPEED_1000:
- speed = SPEED_1000;
+ if (cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ speed = SPEED_2500;
+ else
+ speed = SPEED_1000;
break;
case MV88E6390_SGMII_PHY_STATUS_SPEED_100:
speed = SPEED_100;
@@ -541,8 +467,22 @@ static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
}
}
+ switch (cmode) {
+ case MV88E6XXX_PORT_STS_CMODE_SGMII:
+ mode = PHY_INTERFACE_MODE_SGMII;
+ break;
+ case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
+ mode = PHY_INTERFACE_MODE_1000BASEX;
+ break;
+ case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
+ mode = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+ default:
+ mode = PHY_INTERFACE_MODE_NA;
+ }
+
err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex,
- PAUSE_OFF, PHY_INTERFACE_MODE_NA);
+ PAUSE_OFF, mode);
if (err)
dev_err(chip->dev, "can't propagate PHY settings to MAC: %d\n",
err);
@@ -551,55 +491,35 @@ static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip,
}
static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip,
- int lane)
-{
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_INT_ENABLE,
- MV88E6390_SGMII_INT_LINK_DOWN |
- MV88E6390_SGMII_INT_LINK_UP);
-}
-
-static int mv88e6390_serdes_irq_disable_sgmii(struct mv88e6xxx_chip *chip,
- int lane)
+ u8 lane, bool enable)
{
- return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_INT_ENABLE, 0);
-}
+ u16 val = 0;
-int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
- int lane)
-{
- u8 cmode = chip->ports[port].cmode;
- int err = 0;
-
- switch (cmode) {
- case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
- case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- err = mv88e6390_serdes_irq_enable_sgmii(chip, lane);
- }
+ if (enable)
+ val |= MV88E6390_SGMII_INT_LINK_DOWN |
+ MV88E6390_SGMII_INT_LINK_UP;
- return err;
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_INT_ENABLE, val);
}
-int mv88e6390_serdes_irq_disable(struct mv88e6xxx_chip *chip, int port,
- int lane)
+int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ bool enable)
{
u8 cmode = chip->ports[port].cmode;
- int err = 0;
switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- err = mv88e6390_serdes_irq_disable_sgmii(chip, lane);
+ return mv88e6390_serdes_irq_enable_sgmii(chip, lane, enable);
}
- return err;
+ return 0;
}
static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip,
- int lane, u16 *status)
+ u8 lane, u16 *status)
{
int err;
@@ -609,129 +529,32 @@ static int mv88e6390_serdes_irq_status_sgmii(struct mv88e6xxx_chip *chip,
return err;
}
-static irqreturn_t mv88e6390_serdes_thread_fn(int irq, void *dev_id)
+irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ u8 lane)
{
- struct mv88e6xxx_port *port = dev_id;
- struct mv88e6xxx_chip *chip = port->chip;
+ u8 cmode = chip->ports[port].cmode;
irqreturn_t ret = IRQ_NONE;
- u8 cmode = port->cmode;
u16 status;
- int lane;
int err;
- lane = mv88e6390x_serdes_get_lane(chip, port->port);
-
- mv88e6xxx_reg_lock(chip);
-
switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
- case MV88E6XXX_PORT_STS_CMODE_1000BASE_X:
+ case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
err = mv88e6390_serdes_irq_status_sgmii(chip, lane, &status);
if (err)
- goto out;
+ return ret;
if (status & (MV88E6390_SGMII_INT_LINK_DOWN |
MV88E6390_SGMII_INT_LINK_UP)) {
ret = IRQ_HANDLED;
- mv88e6390_serdes_irq_link_sgmii(chip, port->port, lane);
+ mv88e6390_serdes_irq_link_sgmii(chip, port, lane);
}
}
-out:
- mv88e6xxx_reg_unlock(chip);
return ret;
}
-int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
-{
- int lane;
- int err;
-
- lane = mv88e6390x_serdes_get_lane(chip, port);
-
- if (lane == -ENODEV)
- return 0;
-
- if (lane < 0)
- return lane;
-
- chip->ports[port].serdes_irq = irq_find_mapping(chip->g2_irq.domain,
- port);
- if (chip->ports[port].serdes_irq < 0) {
- dev_err(chip->dev, "Unable to map SERDES irq: %d\n",
- chip->ports[port].serdes_irq);
- return chip->ports[port].serdes_irq;
- }
-
- /* Requesting the IRQ will trigger irq callbacks. So we cannot
- * hold the reg_lock.
- */
- mv88e6xxx_reg_unlock(chip);
- err = request_threaded_irq(chip->ports[port].serdes_irq, NULL,
- mv88e6390_serdes_thread_fn,
- IRQF_ONESHOT, "mv88e6xxx-serdes",
- &chip->ports[port]);
- mv88e6xxx_reg_lock(chip);
-
- if (err) {
- dev_err(chip->dev, "Unable to request SERDES interrupt: %d\n",
- err);
- return err;
- }
-
- return mv88e6390_serdes_irq_enable(chip, port, lane);
-}
-
-int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
-{
- if (port < 9)
- return 0;
-
- return mv88e6390x_serdes_irq_setup(chip, port);
-}
-
-void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
-{
- int lane = mv88e6390x_serdes_get_lane(chip, port);
-
- if (lane == -ENODEV)
- return;
-
- if (lane < 0)
- return;
-
- mv88e6390_serdes_irq_disable(chip, port, lane);
-
- /* Freeing the IRQ will trigger irq callbacks. So we cannot
- * hold the reg_lock.
- */
- mv88e6xxx_reg_unlock(chip);
- free_irq(chip->ports[port].serdes_irq, &chip->ports[port]);
- mv88e6xxx_reg_lock(chip);
-
- chip->ports[port].serdes_irq = 0;
-}
-
-void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
-{
- if (port < 9)
- return;
-
- mv88e6390x_serdes_irq_free(chip, port);
-}
-
-int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
{
- u8 cmode = chip->ports[port].cmode;
-
- if (port != 5)
- return 0;
-
- if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X ||
- cmode == MV88E6XXX_PORT_STS_CMODE_SGMII ||
- cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX)
- return mv88e6390_serdes_power_sgmii(chip, MV88E6341_ADDR_SERDES,
- on);
-
- return 0;
+ return irq_find_mapping(chip->g2_irq.domain, port);
}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index ff5b94439335..bd8df36ab537 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -28,7 +28,7 @@
#define MV88E6352_SERDES_INT_STATUS 0x13
-#define MV88E6341_ADDR_SERDES 0x15
+#define MV88E6341_PORT5_LANE 0x15
#define MV88E6390_PORT9_LANE0 0x09
#define MV88E6390_PORT9_LANE1 0x12
@@ -74,26 +74,94 @@
#define MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID BIT(11)
#define MV88E6390_SGMII_PHY_STATUS_LINK BIT(10)
-int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
-int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
-int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
-int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
-int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
-int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
-void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
-int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
-void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
+u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+u8 mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
+unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
+ int port);
+unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
+ int port);
+int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ bool on);
+int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ bool on);
+int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ bool enable);
+int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ bool enable);
+irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ u8 lane);
+irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ u8 lane);
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data);
int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data);
-int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port,
- int lane);
-int mv88e6390_serdes_irq_disable(struct mv88e6xxx_chip *chip, int port,
- int lane);
-int mv88e6352_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
-void mv88e6352_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
+/* Return the (first) SERDES lane address a port is using, 0 otherwise. */
+static inline u8 mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
+ int port)
+{
+ if (!chip->info->ops->serdes_get_lane)
+ return 0;
+
+ return chip->info->ops->serdes_get_lane(chip, port);
+}
+
+static inline int mv88e6xxx_serdes_power_up(struct mv88e6xxx_chip *chip,
+ int port, u8 lane)
+{
+ if (!chip->info->ops->serdes_power)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->serdes_power(chip, port, lane, true);
+}
+
+static inline int mv88e6xxx_serdes_power_down(struct mv88e6xxx_chip *chip,
+ int port, u8 lane)
+{
+ if (!chip->info->ops->serdes_power)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->serdes_power(chip, port, lane, false);
+}
+
+static inline unsigned int
+mv88e6xxx_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
+{
+ if (!chip->info->ops->serdes_irq_mapping)
+ return 0;
+
+ return chip->info->ops->serdes_irq_mapping(chip, port);
+}
+
+static inline int mv88e6xxx_serdes_irq_enable(struct mv88e6xxx_chip *chip,
+ int port, u8 lane)
+{
+ if (!chip->info->ops->serdes_irq_enable)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->serdes_irq_enable(chip, port, lane, true);
+}
+
+static inline int mv88e6xxx_serdes_irq_disable(struct mv88e6xxx_chip *chip,
+ int port, u8 lane)
+{
+ if (!chip->info->ops->serdes_irq_enable)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->serdes_irq_enable(chip, port, lane, false);
+}
+
+static inline irqreturn_t
+mv88e6xxx_serdes_irq_status(struct mv88e6xxx_chip *chip, int port, u8 lane)
+{
+ if (!chip->info->ops->serdes_irq_status)
+ return IRQ_NONE;
+
+ return chip->info->ops->serdes_irq_status(chip, port, lane);
+}
#endif
diff --git a/drivers/net/dsa/mv88e6xxx/smi.c b/drivers/net/dsa/mv88e6xxx/smi.c
index 5fc78a063843..282fe08db050 100644
--- a/drivers/net/dsa/mv88e6xxx/smi.c
+++ b/drivers/net/dsa/mv88e6xxx/smi.c
@@ -64,8 +64,10 @@ static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
if (err)
return err;
- if (!!(data >> bit) == !!val)
+ if (!!(data & BIT(bit)) == !!val)
return 0;
+
+ usleep_range(1000, 2000);
}
return -ETIMEDOUT;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index df976b259e43..d8cff0107ec4 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1728,6 +1728,21 @@ static void sja1105_teardown(struct dsa_switch *ds)
sja1105_static_config_free(&priv->static_config);
}
+static int sja1105_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct net_device *slave;
+
+ if (!dsa_is_user_port(ds, port))
+ return 0;
+
+ slave = ds->ports[port].slave;
+
+ slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ return 0;
+}
+
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
struct sk_buff *skb, bool takets)
{
@@ -2049,6 +2064,7 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.get_ethtool_stats = sja1105_get_ethtool_stats,
.get_sset_count = sja1105_get_sset_count,
.get_ts_info = sja1105_get_ts_info,
+ .port_enable = sja1105_port_enable,
.port_fdb_dump = sja1105_fdb_dump,
.port_fdb_add = sja1105_fdb_add,
.port_fdb_del = sja1105_fdb_del,
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 147051404194..8785c2ff3825 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -847,8 +847,7 @@ static void poll_vortex(struct net_device *dev)
static int vortex_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *ndev = pci_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
if (!ndev || !netif_running(ndev))
return 0;
@@ -861,8 +860,7 @@ static int vortex_suspend(struct device *dev)
static int vortex_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *ndev = pci_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
int err;
if (!ndev || !netif_running(ndev))
@@ -2175,7 +2173,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr = skb_frag_dma_map(vp->gendev, frag,
0,
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
if (dma_mapping_error(vp->gendev, dma_addr)) {
for(i = i-1; i >= 0; i--)
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 010a2f48aea5..2a9f8643629c 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -110,7 +110,7 @@ static void greth_print_tx_packet(struct sk_buff *skb)
print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
skb_frag_address(&skb_shinfo(skb)->frags[i]),
- skb_shinfo(skb)->frags[i].size, true);
+ skb_frag_size(&skb_shinfo(skb)->frags[i]), true);
}
}
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index edbb4b3604c7..174344c450af 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2426,7 +2426,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
u32 thiscopy, remainder;
struct sk_buff *skb = tcb->skb;
u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
- struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
+ skb_frag_t *frags = &skb_shinfo(skb)->frags[0];
struct phy_device *phydev = adapter->netdev->phydev;
dma_addr_t dma_addr;
struct tx_ring *tx_ring = &adapter->tx_ring;
@@ -2488,11 +2488,11 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
frag++;
}
} else {
- desc[frag].len_vlan = frags[i - 1].size;
+ desc[frag].len_vlan = skb_frag_size(&frags[i - 1]);
dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
&frags[i - 1],
0,
- frags[i - 1].size,
+ desc[frag].len_vlan,
DMA_TO_DEVICE);
desc[frag].addr_lo = lower_32_bits(dma_addr);
desc[frag].addr_hi = upper_32_bits(dma_addr);
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 650d1bae5f56..1793950f0582 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1100,7 +1100,6 @@ static int au1000_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "failed to retrieve IRQ\n");
err = -ENODEV;
goto out;
}
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 87ff5d6d1b22..c6c2a54c1121 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -697,16 +697,14 @@ static void ni65_free_buffer(struct priv *p)
for(i=0;i<TMDNUM;i++) {
kfree(p->tmdbounce[i]);
#ifdef XMT_VIA_SKB
- if(p->tmd_skb[i])
- dev_kfree_skb(p->tmd_skb[i]);
+ dev_kfree_skb(p->tmd_skb[i]);
#endif
}
for(i=0;i<RMDNUM;i++)
{
#ifdef RCV_VIA_SKB
- if(p->recv_skb[i])
- dev_kfree_skb(p->recv_skb[i]);
+ dev_kfree_skb(p->recv_skb[i]);
#else
kfree(p->recvbounce[i]);
#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index b91143947ed2..b0a6c96b6ef4 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -438,7 +438,6 @@ static const struct file_operations xi2c_reg_value_fops = {
void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
{
- struct dentry *pfile;
char *buf;
/* Set defaults */
@@ -451,88 +450,48 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
return;
pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
- if (!pdata->xgbe_debugfs) {
- netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
- kfree(buf);
- return;
- }
- pfile = debugfs_create_file("xgmac_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xgmac_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xgmac_register", 0600, pdata->xgbe_debugfs, pdata,
+ &xgmac_reg_addr_fops);
- pfile = debugfs_create_file("xgmac_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xgmac_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xgmac_register_value", 0600, pdata->xgbe_debugfs,
+ pdata, &xgmac_reg_value_fops);
- pfile = debugfs_create_file("xpcs_mmd", 0600,
- pdata->xgbe_debugfs, pdata,
- &xpcs_mmd_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xpcs_mmd", 0600, pdata->xgbe_debugfs, pdata,
+ &xpcs_mmd_fops);
- pfile = debugfs_create_file("xpcs_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xpcs_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xpcs_register", 0600, pdata->xgbe_debugfs, pdata,
+ &xpcs_reg_addr_fops);
- pfile = debugfs_create_file("xpcs_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xpcs_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+ debugfs_create_file("xpcs_register_value", 0600, pdata->xgbe_debugfs,
+ pdata, &xpcs_reg_value_fops);
if (pdata->xprop_regs) {
- pfile = debugfs_create_file("xprop_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xprop_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
-
- pfile = debugfs_create_file("xprop_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xprop_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
+ debugfs_create_file("xprop_register", 0600, pdata->xgbe_debugfs,
+ pdata, &xprop_reg_addr_fops);
+
+ debugfs_create_file("xprop_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xprop_reg_value_fops);
}
if (pdata->xi2c_regs) {
- pfile = debugfs_create_file("xi2c_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xi2c_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
-
- pfile = debugfs_create_file("xi2c_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xi2c_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_file failed\n");
+ debugfs_create_file("xi2c_register", 0600, pdata->xgbe_debugfs,
+ pdata, &xi2c_reg_addr_fops);
+
+ debugfs_create_file("xi2c_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xi2c_reg_value_fops);
}
if (pdata->vdata->an_cdr_workaround) {
- pfile = debugfs_create_bool("an_cdr_workaround", 0600,
- pdata->xgbe_debugfs,
- &pdata->debugfs_an_cdr_workaround);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_bool failed\n");
-
- pfile = debugfs_create_bool("an_cdr_track_early", 0600,
- pdata->xgbe_debugfs,
- &pdata->debugfs_an_cdr_track_early);
- if (!pfile)
- netdev_err(pdata->netdev,
- "debugfs_create_bool failed\n");
+ debugfs_create_bool("an_cdr_workaround", 0600,
+ pdata->xgbe_debugfs,
+ &pdata->debugfs_an_cdr_workaround);
+
+ debugfs_create_bool("an_cdr_track_early", 0600,
+ pdata->xgbe_debugfs,
+ &pdata->debugfs_an_cdr_track_early);
}
kfree(buf);
@@ -546,7 +505,6 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
{
- struct dentry *pfile;
char *buf;
if (!pdata->xgbe_debugfs)
@@ -559,11 +517,8 @@ void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf))
goto out;
- pfile = debugfs_rename(pdata->xgbe_debugfs->d_parent,
- pdata->xgbe_debugfs,
- pdata->xgbe_debugfs->d_parent, buf);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_rename failed\n");
+ debugfs_rename(pdata->xgbe_debugfs->d_parent, pdata->xgbe_debugfs,
+ pdata->xgbe_debugfs->d_parent, buf);
out:
kfree(buf);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
index 533094233659..230726d7b74f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -526,7 +526,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_packet_data *packet;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t skb_dma;
unsigned int start_index, cur_index;
unsigned int offset, tso, vlan, datalen, len;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 3dd0cecddba8..98f8f2033154 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1833,7 +1833,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring, struct sk_buff *skb,
struct xgbe_packet_data *packet)
{
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int context_desc;
unsigned int len;
unsigned int i;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index d0f3dfb88202..4ebd2410185a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -301,7 +301,6 @@ static int xgbe_platform_probe(struct platform_device *pdev)
struct xgbe_prv_data *pdata;
struct device *dev = &pdev->dev;
struct platform_device *phy_pdev;
- struct resource *res;
const char *phy_mode;
unsigned int phy_memnum, phy_irqnum;
unsigned int dma_irqnum, dma_irqend;
@@ -353,8 +352,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
}
/* Obtain the mmio areas for the device */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->xgmac_regs = devm_ioremap_resource(dev, res);
+ pdata->xgmac_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->xgmac_regs)) {
dev_err(dev, "xgmac ioremap failed\n");
ret = PTR_ERR(pdata->xgmac_regs);
@@ -363,8 +361,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pdata->xpcs_regs = devm_ioremap_resource(dev, res);
+ pdata->xpcs_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pdata->xpcs_regs)) {
dev_err(dev, "xpcs ioremap failed\n");
ret = PTR_ERR(pdata->xpcs_regs);
@@ -373,8 +370,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->rxtx_regs = devm_ioremap_resource(dev, res);
+ pdata->rxtx_regs = devm_platform_ioremap_resource(phy_pdev,
+ phy_memnum++);
if (IS_ERR(pdata->rxtx_regs)) {
dev_err(dev, "rxtx ioremap failed\n");
ret = PTR_ERR(pdata->rxtx_regs);
@@ -383,8 +380,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->sir0_regs = devm_ioremap_resource(dev, res);
+ pdata->sir0_regs = devm_platform_ioremap_resource(phy_pdev,
+ phy_memnum++);
if (IS_ERR(pdata->sir0_regs)) {
dev_err(dev, "sir0 ioremap failed\n");
ret = PTR_ERR(pdata->sir0_regs);
@@ -393,8 +390,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
if (netif_msg_probe(pdata))
dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->sir1_regs = devm_ioremap_resource(dev, res);
+ pdata->sir1_regs = devm_platform_ioremap_resource(phy_pdev,
+ phy_memnum++);
if (IS_ERR(pdata->sir1_regs)) {
dev_err(dev, "sir1 ioremap failed\n");
ret = PTR_ERR(pdata->sir1_regs);
@@ -467,10 +464,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
/* Get the device interrupt */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "platform_get_irq 0 failed\n");
+ if (ret < 0)
goto err_io;
- }
pdata->dev_irq = ret;
/* Get the per channel DMA interrupts */
@@ -479,12 +474,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
for (i = 0; (i < max) && (dma_irqnum < dma_irqend); i++) {
ret = platform_get_irq(pdata->platdev, dma_irqnum++);
- if (ret < 0) {
- netdev_err(pdata->netdev,
- "platform_get_irq %u failed\n",
- dma_irqnum - 1);
+ if (ret < 0)
goto err_io;
- }
pdata->channel_irq[i] = ret;
}
@@ -496,10 +487,8 @@ static int xgbe_platform_probe(struct platform_device *pdev)
/* Get the auto-negotiation interrupt */
ret = platform_get_irq(phy_pdev, phy_irqnum++);
- if (ret < 0) {
- dev_err(dev, "platform_get_irq phy 0 failed\n");
+ if (ret < 0)
goto err_io;
- }
pdata->an_irq = ret;
/* Configure the netdev resource */
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 79048cc46703..02b4f3af02b5 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -54,10 +54,8 @@ static int xge_get_resources(struct xge_pdata *pdata)
}
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "Unable to get irq\n");
+ if (ret < 0)
return ret;
- }
pdata->resources.irq = ret;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 61a465097cb8..5f657879134e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -712,11 +712,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
udelay(5);
} else {
#ifdef CONFIG_ACPI
- if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
- acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
- "_RST", NULL, NULL);
- } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
- "_INI")) {
+ acpi_status status;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
"_INI", NULL, NULL);
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 10b1c053e70a..d8612131c55e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -340,7 +340,8 @@ static int xgene_enet_work_msg(struct sk_buff *skb, u64 *hopinfo)
nr_frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < 2 && i < nr_frags; i++)
- len += skb_shinfo(skb)->frags[i].size;
+ len += skb_frag_size(
+ &skb_shinfo(skb)->frags[i]);
/* HW requires header must reside in 3 buffer */
if (unlikely(hdr_len > len)) {
@@ -1616,7 +1617,6 @@ static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata)
static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
{
struct platform_device *pdev = pdata->pdev;
- struct device *dev = &pdev->dev;
int i, ret, max_irqs;
if (phy_interface_mode_is_rgmii(pdata->phy_mode))
@@ -1636,9 +1636,7 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
pdata->cq_cnt = max_irqs / 2;
break;
}
- dev_err(dev, "Unable to get ENET IRQ\n");
- ret = ret ? : -ENXIO;
- return ret;
+ return ret ? : -ENXIO;
}
pdata->irqs[i] = ret;
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index 6453fc2ebb1f..f482ced2cadd 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -460,12 +460,14 @@ static int xgene_enet_reset(struct xgene_enet_pdata *p)
}
} else {
#ifdef CONFIG_ACPI
- if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_RST"))
- acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
- "_RST", NULL, NULL);
- else if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_INI"))
+ acpi_status status;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
"_INI", NULL, NULL);
+ }
#endif
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 133eb91c542e..304b5d43f236 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -393,11 +393,11 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
udelay(5);
} else {
#ifdef CONFIG_ACPI
- if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
- acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
- "_RST", NULL, NULL);
- } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
- "_INI")) {
+ acpi_status status;
+
+ status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(status)) {
acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
"_INI", NULL, NULL);
}
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index c40daad515d5..a58185b1d8bf 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -815,8 +815,8 @@ static int reverse6[64] = {
static unsigned int
crc416(unsigned int curval, unsigned short nxtval)
{
- register unsigned int counter, cur = curval, next = nxtval;
- register int high_crc_set, low_data_set;
+ unsigned int counter, cur = curval, next = nxtval;
+ int high_crc_set, low_data_set;
/* Swap bytes */
next = ((next & 0x00FF) << 8) | (next >> 8);
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 6703960c7cf5..7548247455d7 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1148,7 +1148,7 @@ static int ag71xx_rings_init(struct ag71xx *ag)
return -ENOMEM;
}
- rx->buf = &tx->buf[BIT(tx->order)];
+ rx->buf = &tx->buf[tx_size];
rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
@@ -1686,7 +1686,7 @@ static int ag71xx_probe(struct platform_device *pdev)
}
ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start,
- res->end - res->start + 1);
+ resource_size(res));
if (!ag->mac_base) {
err = -ENOMEM;
goto err_free;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e3538ba7d0e7..d4bbcdfd691a 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1465,9 +1465,7 @@ static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
tpd->len = cpu_to_le16(maplen);
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[f];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
if (++txq->write_idx == txq->count)
txq->write_idx = 0;
@@ -1879,8 +1877,7 @@ static void alx_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int alx_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_priv *alx = dev_get_drvdata(dev);
if (!netif_running(alx->dev))
return 0;
@@ -1891,8 +1888,7 @@ static int alx_suspend(struct device *dev)
static int alx_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct alx_priv *alx = pci_get_drvdata(pdev);
+ struct alx_priv *alx = dev_get_drvdata(dev);
struct alx_hw *hw = &alx->hw;
int err;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index be7f9cebb675..2b239ecea05f 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2150,9 +2150,7 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[f];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
use_tpd = atl1c_get_tpd(adapter, type);
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
@@ -2422,8 +2420,7 @@ static int atl1c_close(struct net_device *netdev)
static int atl1c_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
u32 wufc = adapter->wol;
@@ -2437,7 +2434,7 @@ static int atl1c_suspend(struct device *dev)
if (wufc)
if (atl1c_phy_to_ps_link(hw) != 0)
- dev_dbg(&pdev->dev, "phy power saving failed");
+ dev_dbg(dev, "phy power saving failed");
atl1c_power_saving(hw, wufc);
@@ -2447,8 +2444,7 @@ static int atl1c_suspend(struct device *dev)
#ifdef CONFIG_PM_SLEEP
static int atl1c_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 7f14e010bfeb..4f7b65825c15 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1770,11 +1770,10 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
u16 i;
u16 seg_num;
- frag = &skb_shinfo(skb)->frags[f];
buf_len = skb_frag_size(frag);
seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index b5c6dc914720..b498fd6a47d0 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2256,10 +2256,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
u16 i, nseg;
- frag = &skb_shinfo(skb)->frags[f];
buf_len = skb_frag_size(frag);
nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
@@ -2754,8 +2753,7 @@ static int atl1_close(struct net_device *netdev)
#ifdef CONFIG_PM_SLEEP
static int atl1_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_hw *hw = &adapter->hw;
u32 ctrl = 0;
@@ -2780,7 +2778,7 @@ static int atl1_suspend(struct device *dev)
val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
if (val) {
if (netif_msg_ifdown(adapter))
- dev_printk(KERN_DEBUG, &pdev->dev,
+ dev_printk(KERN_DEBUG, dev,
"error getting speed/duplex\n");
goto disable_wol;
}
@@ -2837,8 +2835,7 @@ static int atl1_suspend(struct device *dev)
static int atl1_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct atl1_adapter *adapter = netdev_priv(netdev);
iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 3b3370a94a9c..37752d9514e7 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1351,10 +1351,8 @@ static int nb8800_probe(struct platform_device *pdev)
ops = match->data;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "No IRQ\n");
+ if (irq <= 0)
return -EINVAL;
- }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 291e4afd4a1a..620cd3fc1fbc 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1693,7 +1693,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct bcm_enet_priv *priv;
struct net_device *dev;
struct bcm63xx_enet_platform_data *pd;
- struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
+ struct resource *res_irq, *res_irq_rx, *res_irq_tx;
struct mii_bus *bus;
int i, ret;
@@ -1719,8 +1719,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (ret)
goto out;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto out;
@@ -2762,15 +2761,13 @@ struct platform_driver bcm63xx_enetsw_driver = {
/* reserve & remap memory space shared between all macs */
static int bcm_enet_shared_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *p[3];
unsigned int i;
memset(bcm_enet_shared_base, 0, sizeof(bcm_enet_shared_base));
for (i = 0; i < 3; i++) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- p[i] = devm_ioremap_resource(&pdev->dev, res);
+ p[i] = devm_platform_ioremap_resource(pdev, i);
if (IS_ERR(p[i]))
return PTR_ERR(p[i]);
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 9483553ce444..7df887e4024c 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -708,8 +708,7 @@ static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
for (i = 0; i < priv->num_rx_bds; i++) {
cb = &priv->rx_cbs[i];
skb = bcm_sysport_rx_refill(priv, cb);
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
if (!cb->skb)
return -ENOMEM;
}
@@ -2420,12 +2419,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
struct device_node *dn;
struct net_device *dev;
const void *macaddr;
- struct resource *r;
u32 txq, rxq;
int ret;
dn = pdev->dev.of_node;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
of_id = of_match_node(bcm_sysport_of_match, dn);
if (!of_id || !of_id->data)
return -EINVAL;
@@ -2473,7 +2470,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_free_netdev;
}
- priv->base = devm_ioremap_resource(&pdev->dev, r);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto err_free_netdev;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 6dc0dd91ad11..c46c1b1416f7 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -199,10 +199,8 @@ static int bgmac_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "MAC address not present in device tree\n");
bgmac->irq = platform_get_irq(pdev, 0);
- if (bgmac->irq < 0) {
- dev_err(&pdev->dev, "Unable to obtain IRQ\n");
+ if (bgmac->irq < 0)
return bgmac->irq;
- }
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "amac_base");
if (!regs) {
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 4632dd5dbad1..148734b166f0 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -172,7 +172,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
flags = 0;
for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int len = skb_frag_size(frag);
index = (index + 1) % BGMAC_TX_RING_SLOTS;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index dfdd14eadd57..fbc196b480b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8673,8 +8673,7 @@ bnx2_remove_one(struct pci_dev *pdev)
static int
bnx2_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnx2 *bp = netdev_priv(dev);
if (netif_running(dev)) {
@@ -8693,8 +8692,7 @@ bnx2_suspend(struct device *device)
static int
bnx2_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnx2 *bp = netdev_priv(dev);
if (!netif_running(dev))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 8dce4069472b..402d9f50d92c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -116,6 +116,9 @@ enum board_idx {
BCM57508,
BCM57504,
BCM57502,
+ BCM57508_NPAR,
+ BCM57504_NPAR,
+ BCM57502_NPAR,
BCM58802,
BCM58804,
BCM58808,
@@ -161,6 +164,9 @@ static const struct {
[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
+ [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
+ [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
+ [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -209,6 +215,12 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
+ { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
@@ -242,6 +254,8 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
+ ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
+ ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -828,16 +842,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
return 0;
}
-static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
- u32 agg_bufs)
+static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ u16 cp_cons, u16 curr)
+{
+ struct rx_agg_cmp *agg;
+
+ cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
+ agg = (struct rx_agg_cmp *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ return agg;
+}
+
+static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 agg_id, u16 curr)
+{
+ struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
+
+ return &tpa_info->agg_arr[curr];
+}
+
+static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
+ u16 start, u32 agg_bufs, bool tpa)
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt *bp = bnapi->bp;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
u16 sw_prod = rxr->rx_sw_agg_prod;
+ bool p5_tpa = false;
u32 i;
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ p5_tpa = true;
+
for (i = 0; i < agg_bufs; i++) {
u16 cons;
struct rx_agg_cmp *agg;
@@ -845,8 +884,10 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
struct rx_bd *prod_bd;
struct page *page;
- agg = (struct rx_agg_cmp *)
- &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ if (p5_tpa)
+ agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
+ else
+ agg = bnxt_get_agg(bp, cpr, idx, start + i);
cons = agg->rx_agg_cmp_opaque;
__clear_bit(cons, rxr->rx_agg_bmap);
@@ -874,7 +915,6 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
prod = NEXT_RX_AGG(prod);
sw_prod = NEXT_RX_AGG(sw_prod);
- cp_cons = NEXT_CMP(cp_cons);
}
rxr->rx_agg_prod = prod;
rxr->rx_sw_agg_prod = sw_prod;
@@ -888,7 +928,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
{
unsigned int payload = offset_and_len >> 16;
unsigned int len = offset_and_len & 0xffff;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct page *page = data;
u16 prod = rxr->rx_prod;
struct sk_buff *skb;
@@ -919,7 +959,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
frag = &skb_shinfo(skb)->frags[0];
skb_frag_size_sub(frag, payload);
- frag->page_offset += payload;
+ skb_frag_off_add(frag, payload);
skb->data_len -= payload;
skb->tail += payload;
@@ -957,15 +997,19 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 cp_cons,
- u32 agg_bufs)
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct pci_dev *pdev = bp->pdev;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
+ bool p5_tpa = false;
u32 i;
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
+ p5_tpa = true;
+
for (i = 0; i < agg_bufs; i++) {
u16 cons, frag_len;
struct rx_agg_cmp *agg;
@@ -973,8 +1017,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
struct page *page;
dma_addr_t mapping;
- agg = (struct rx_agg_cmp *)
- &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ if (p5_tpa)
+ agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
+ else
+ agg = bnxt_get_agg(bp, cpr, idx, i);
cons = agg->rx_agg_cmp_opaque;
frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
@@ -1008,7 +1054,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
* allocated already.
*/
rxr->rx_agg_prod = prod;
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
+ bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
return NULL;
}
@@ -1021,7 +1067,6 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
skb->truesize += PAGE_SIZE;
prod = NEXT_RX_AGG(prod);
- cp_cons = NEXT_CMP(cp_cons);
}
rxr->rx_agg_prod = prod;
return skb;
@@ -1081,9 +1126,10 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
struct rx_tpa_end_cmp *tpa_end = cmp;
- agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
- RX_TPA_END_CMP_AGG_BUFS) >>
- RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return 0;
+
+ agg_bufs = TPA_END_AGG_BUFS(tpa_end);
}
if (agg_bufs) {
@@ -1094,6 +1140,14 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
return 0;
}
+static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
+{
+ if (BNXT_PF(bp))
+ queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
+ else
+ schedule_delayed_work(&bp->fw_reset_task, delay);
+}
+
static void bnxt_queue_sp_work(struct bnxt *bp)
{
if (BNXT_PF(bp))
@@ -1120,26 +1174,60 @@ static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
rxr->rx_next_cons = 0xffff;
}
+static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
+{
+ struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+ u16 idx = agg_id & MAX_TPA_P5_MASK;
+
+ if (test_bit(idx, map->agg_idx_bmap))
+ idx = find_first_zero_bit(map->agg_idx_bmap,
+ BNXT_AGG_IDX_BMAP_SIZE);
+ __set_bit(idx, map->agg_idx_bmap);
+ map->agg_id_tbl[agg_id] = idx;
+ return idx;
+}
+
+static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
+{
+ struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+
+ __clear_bit(idx, map->agg_idx_bmap);
+}
+
+static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
+{
+ struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
+
+ return map->agg_id_tbl[agg_id];
+}
+
static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
struct rx_tpa_start_cmp *tpa_start,
struct rx_tpa_start_cmp_ext *tpa_start1)
{
- u8 agg_id = TPA_START_AGG_ID(tpa_start);
- u16 cons, prod;
- struct bnxt_tpa_info *tpa_info;
struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
+ struct bnxt_tpa_info *tpa_info;
+ u16 cons, prod, agg_id;
struct rx_bd *prod_bd;
dma_addr_t mapping;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ agg_id = TPA_START_AGG_ID_P5(tpa_start);
+ agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
+ } else {
+ agg_id = TPA_START_AGG_ID(tpa_start);
+ }
cons = tpa_start->rx_tpa_start_cmp_opaque;
prod = rxr->rx_prod;
cons_rx_buf = &rxr->rx_buf_ring[cons];
prod_rx_buf = &rxr->rx_buf_ring[prod];
tpa_info = &rxr->rx_tpa[agg_id];
- if (unlikely(cons != rxr->rx_next_cons)) {
- netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
- cons, rxr->rx_next_cons);
+ if (unlikely(cons != rxr->rx_next_cons ||
+ TPA_START_ERROR(tpa_start))) {
+ netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
+ cons, rxr->rx_next_cons,
+ TPA_START_ERROR_CODE(tpa_start1));
bnxt_sched_reset(bp, rxr);
return;
}
@@ -1184,6 +1272,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
+ tpa_info->agg_count = 0;
rxr->rx_prod = NEXT_RX(prod);
cons = NEXT_RX(cons);
@@ -1195,13 +1284,37 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
cons_rx_buf->data = NULL;
}
-static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
- u32 agg_bufs)
+static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
{
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
}
+#ifdef CONFIG_INET
+static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
+{
+ struct udphdr *uh = NULL;
+
+ if (ip_proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+}
+#endif
+
static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
int payload_off, int tcp_ts,
struct sk_buff *skb)
@@ -1259,28 +1372,39 @@ static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
}
if (inner_mac_off) { /* tunnel */
- struct udphdr *uh = NULL;
__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
ETH_HLEN - 2));
- if (proto == htons(ETH_P_IP)) {
- struct iphdr *iph = (struct iphdr *)skb->data;
+ bnxt_gro_tunnel(skb, proto);
+ }
+#endif
+ return skb;
+}
- if (iph->protocol == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- } else {
- struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
+ int payload_off, int tcp_ts,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ u16 outer_ip_off, inner_ip_off, inner_mac_off;
+ u32 hdr_info = tpa_info->hdr_info;
+ int iphdr_len, nw_off;
- if (iph->nexthdr == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- }
- if (uh) {
- if (uh->check)
- skb_shinfo(skb)->gso_type |=
- SKB_GSO_UDP_TUNNEL_CSUM;
- else
- skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
- }
+ inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
+ inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
+ outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
+
+ nw_off = inner_ip_off - ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
+ sizeof(struct ipv6hdr) : sizeof(struct iphdr);
+ skb_set_transport_header(skb, nw_off + iphdr_len);
+
+ if (inner_mac_off) { /* tunnel */
+ __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
+ ETH_HLEN - 2));
+
+ bnxt_gro_tunnel(skb, proto);
}
#endif
return skb;
@@ -1327,28 +1451,8 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
return NULL;
}
- if (nw_off) { /* tunnel */
- struct udphdr *uh = NULL;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = (struct iphdr *)skb->data;
-
- if (iph->protocol == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- } else {
- struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
-
- if (iph->nexthdr == IPPROTO_UDP)
- uh = (struct udphdr *)(iph + 1);
- }
- if (uh) {
- if (uh->check)
- skb_shinfo(skb)->gso_type |=
- SKB_GSO_UDP_TUNNEL_CSUM;
- else
- skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
- }
- }
+ if (nw_off) /* tunnel */
+ bnxt_gro_tunnel(skb, skb->protocol);
#endif
return skb;
}
@@ -1371,9 +1475,10 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
skb_shinfo(skb)->gso_size =
le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
skb_shinfo(skb)->gso_type = tpa_info->gso_type;
- payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
- RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
- RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
+ else
+ payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
if (likely(skb))
tcp_gro_complete(skb);
@@ -1401,14 +1506,14 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- u8 agg_id = TPA_END_AGG_ID(tpa_end);
u8 *data_ptr, agg_bufs;
- u16 cp_cons = RING_CMP(*raw_cons);
unsigned int len;
struct bnxt_tpa_info *tpa_info;
dma_addr_t mapping;
struct sk_buff *skb;
+ u16 idx = 0, agg_id;
void *data;
+ bool gro;
if (unlikely(bnapi->in_reset)) {
int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
@@ -1418,26 +1523,43 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
- tpa_info = &rxr->rx_tpa[agg_id];
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ agg_id = TPA_END_AGG_ID_P5(tpa_end);
+ agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
+ agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
+ tpa_info = &rxr->rx_tpa[agg_id];
+ if (unlikely(agg_bufs != tpa_info->agg_count)) {
+ netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
+ agg_bufs, tpa_info->agg_count);
+ agg_bufs = tpa_info->agg_count;
+ }
+ tpa_info->agg_count = 0;
+ *event |= BNXT_AGG_EVENT;
+ bnxt_free_agg_idx(rxr, agg_id);
+ idx = agg_id;
+ gro = !!(bp->flags & BNXT_FLAG_GRO);
+ } else {
+ agg_id = TPA_END_AGG_ID(tpa_end);
+ agg_bufs = TPA_END_AGG_BUFS(tpa_end);
+ tpa_info = &rxr->rx_tpa[agg_id];
+ idx = RING_CMP(*raw_cons);
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
+ return ERR_PTR(-EBUSY);
+
+ *event |= BNXT_AGG_EVENT;
+ idx = NEXT_CMP(idx);
+ }
+ gro = !!TPA_END_GRO(tpa_end);
+ }
data = tpa_info->data;
data_ptr = tpa_info->data_ptr;
prefetch(data_ptr);
len = tpa_info->len;
mapping = tpa_info->mapping;
- agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
- RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
-
- if (agg_bufs) {
- if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
- return ERR_PTR(-EBUSY);
-
- *event |= BNXT_AGG_EVENT;
- cp_cons = NEXT_CMP(cp_cons);
- }
-
if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
if (agg_bufs > MAX_SKB_FRAGS)
netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
agg_bufs, (int)MAX_SKB_FRAGS);
@@ -1447,7 +1569,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (len <= bp->rx_copy_thresh) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
return NULL;
}
} else {
@@ -1456,7 +1578,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
return NULL;
}
@@ -1471,7 +1593,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
kfree(data);
- bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
+ bnxt_abort_tpa(cpr, idx, agg_bufs);
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1479,7 +1601,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
+ skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
return NULL;
@@ -1508,12 +1630,24 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
}
- if (TPA_END_GRO(tpa_end))
+ if (gro)
skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
return skb;
}
+static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ struct rx_agg_cmp *rx_agg)
+{
+ u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
+ struct bnxt_tpa_info *tpa_info;
+
+ agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
+ tpa_info = &rxr->rx_tpa[agg_id];
+ BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
+ tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
+}
+
static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
struct sk_buff *skb)
{
@@ -1555,6 +1689,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rxcmp = (struct rx_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+ cmp_type = RX_CMP_TYPE(rxcmp);
+
+ if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
+ bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
+ goto next_rx_no_prod_no_len;
+ }
+
tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
cp_cons = RING_CMP(tmp_raw_cons);
rxcmp1 = (struct rx_cmp_ext *)
@@ -1563,8 +1704,6 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
- cmp_type = RX_CMP_TYPE(rxcmp);
-
prod = rxr->rx_prod;
if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
@@ -1623,7 +1762,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
bnxt_reuse_rx_data(rxr, cons, data);
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
+ false);
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
@@ -1646,7 +1786,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
+ agg_bufs, false);
rc = -ENOMEM;
goto next_rx;
}
@@ -1666,7 +1807,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
+ skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
@@ -1765,6 +1906,33 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
return bnxt_rx_pkt(bp, cpr, raw_cons, event);
}
+u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg = fw_health->regs[reg_idx];
+ u32 reg_type, reg_off, val = 0;
+
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
+ reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
+ switch (reg_type) {
+ case BNXT_FW_HEALTH_REG_TYPE_CFG:
+ pci_read_config_dword(bp->pdev, reg_off, &val);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_GRC:
+ reg_off = fw_health->mapped_regs[reg_idx];
+ /* fall through */
+ case BNXT_FW_HEALTH_REG_TYPE_BAR0:
+ val = readl(bp->bar0 + reg_off);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_BAR1:
+ val = readl(bp->bar1 + reg_off);
+ break;
+ }
+ if (reg_idx == BNXT_FW_RESET_INPROG_REG)
+ val &= fw_health->fw_reset_inprog_reg_mask;
+ return val;
+}
+
#define BNXT_GET_EVENT_PORT(data) \
((data) & \
ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
@@ -1820,6 +1988,55 @@ static int bnxt_async_event_process(struct bnxt *bp,
goto async_event_process_exit;
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
+ case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
+ u32 data1 = le32_to_cpu(cmpl->event_data1);
+
+ bp->fw_reset_timestamp = jiffies;
+ bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
+ if (!bp->fw_reset_min_dsecs)
+ bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
+ bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
+ if (!bp->fw_reset_max_dsecs)
+ bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
+ if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
+ netdev_warn(bp->dev, "Firmware fatal reset event received\n");
+ set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ } else {
+ netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
+ bp->fw_reset_max_dsecs * 100);
+ }
+ set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
+ break;
+ }
+ case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 data1 = le32_to_cpu(cmpl->event_data1);
+
+ if (!fw_health)
+ goto async_event_process_exit;
+
+ fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
+ fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
+ if (!fw_health->enabled)
+ break;
+
+ if (netif_msg_drv(bp))
+ netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
+ fw_health->enabled, fw_health->master,
+ bnxt_fw_health_readl(bp,
+ BNXT_FW_RESET_CNT_REG),
+ bnxt_fw_health_readl(bp,
+ BNXT_FW_HEALTH_REG));
+ fw_health->tmr_multiplier =
+ DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
+ bp->current_interval * 10);
+ fw_health->tmr_counter = fw_health->tmr_multiplier;
+ fw_health->last_fw_heartbeat =
+ bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+ fw_health->last_fw_reset_cnt =
+ bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ goto async_event_process_exit;
+ }
default:
goto async_event_process_exit;
}
@@ -2325,10 +2542,11 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_tpa_idx_map *map;
int j;
if (rxr->rx_tpa) {
- for (j = 0; j < MAX_TPA; j++) {
+ for (j = 0; j < bp->max_tpa; j++) {
struct bnxt_tpa_info *tpa_info =
&rxr->rx_tpa[j];
u8 *data = tpa_info->data;
@@ -2395,6 +2613,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
__free_page(rxr->rx_page);
rxr->rx_page = NULL;
}
+ map = rxr->rx_tpa_idx_map;
+ if (map)
+ memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
}
}
@@ -2483,6 +2704,61 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
return 0;
}
+static void bnxt_free_tpa_info(struct bnxt *bp)
+{
+ int i;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+
+ kfree(rxr->rx_tpa_idx_map);
+ rxr->rx_tpa_idx_map = NULL;
+ if (rxr->rx_tpa) {
+ kfree(rxr->rx_tpa[0].agg_arr);
+ rxr->rx_tpa[0].agg_arr = NULL;
+ }
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+ }
+}
+
+static int bnxt_alloc_tpa_info(struct bnxt *bp)
+{
+ int i, j, total_aggs = 0;
+
+ bp->max_tpa = MAX_TPA;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (!bp->max_tpa_v2)
+ return 0;
+ bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
+ total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
+ }
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct rx_agg_cmp *agg;
+
+ rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ return -ENOMEM;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ continue;
+ agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
+ rxr->rx_tpa[0].agg_arr = agg;
+ if (!agg)
+ return -ENOMEM;
+ for (j = 1; j < bp->max_tpa; j++)
+ rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
+ rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa_idx_map)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
static void bnxt_free_rx_rings(struct bnxt *bp)
{
int i;
@@ -2490,6 +2766,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
if (!bp->rx_ring)
return;
+ bnxt_free_tpa_info(bp);
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
@@ -2503,9 +2780,6 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
page_pool_destroy(rxr->page_pool);
rxr->page_pool = NULL;
- kfree(rxr->rx_tpa);
- rxr->rx_tpa = NULL;
-
kfree(rxr->rx_agg_bmap);
rxr->rx_agg_bmap = NULL;
@@ -2539,7 +2813,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
- int i, rc, agg_rings = 0, tpa_rings = 0;
+ int i, rc = 0, agg_rings = 0;
if (!bp->rx_ring)
return -ENOMEM;
@@ -2547,9 +2821,6 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_AGG_RINGS)
agg_rings = 1;
- if (bp->flags & BNXT_FLAG_TPA)
- tpa_rings = 1;
-
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
@@ -2591,17 +2862,11 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
if (!rxr->rx_agg_bmap)
return -ENOMEM;
-
- if (tpa_rings) {
- rxr->rx_tpa = kcalloc(MAX_TPA,
- sizeof(struct bnxt_tpa_info),
- GFP_KERNEL);
- if (!rxr->rx_tpa)
- return -ENOMEM;
- }
}
}
- return 0;
+ if (bp->flags & BNXT_FLAG_TPA)
+ rc = bnxt_alloc_tpa_info(bp);
+ return rc;
}
static void bnxt_free_tx_rings(struct bnxt *bp)
@@ -2953,7 +3218,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
u8 *data;
dma_addr_t mapping;
- for (i = 0; i < MAX_TPA; i++) {
+ for (i = 0; i < bp->max_tpa; i++) {
data = __bnxt_alloc_rx_data(bp, &mapping,
GFP_KERNEL);
if (!data)
@@ -3376,6 +3641,9 @@ static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
+ if (bp->hwrm_cmd_kong_resp_addr)
+ return 0;
+
bp->hwrm_cmd_kong_resp_addr =
dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
&bp->hwrm_cmd_kong_resp_dma_addr,
@@ -3415,6 +3683,9 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
+ if (bp->hwrm_short_cmd_req_addr)
+ return 0;
+
bp->hwrm_short_cmd_req_addr =
dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
&bp->hwrm_short_cmd_req_dma_addr,
@@ -3468,7 +3739,7 @@ static void bnxt_free_ring_stats(struct bnxt *bp)
if (!bp->bnapi)
return;
- size = sizeof(struct ctx_hw_stats);
+ size = bp->hw_ring_stats_size;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -3487,7 +3758,7 @@ static int bnxt_alloc_stats(struct bnxt *bp)
u32 size, i;
struct pci_dev *pdev = bp->pdev;
- size = sizeof(struct ctx_hw_stats);
+ size = bp->hw_ring_stats_size;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -3869,6 +4140,32 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
}
+static int bnxt_hwrm_to_stderr(u32 hwrm_err)
+{
+ switch (hwrm_err) {
+ case HWRM_ERR_CODE_SUCCESS:
+ return 0;
+ case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
+ return -EACCES;
+ case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
+ return -ENOSPC;
+ case HWRM_ERR_CODE_INVALID_PARAMS:
+ case HWRM_ERR_CODE_INVALID_FLAGS:
+ case HWRM_ERR_CODE_INVALID_ENABLES:
+ case HWRM_ERR_CODE_UNSUPPORTED_TLV:
+ case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
+ return -EINVAL;
+ case HWRM_ERR_CODE_NO_BUFFER:
+ return -ENOMEM;
+ case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
+ return -EAGAIN;
+ case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ default:
+ return -EIO;
+ }
+}
+
static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
int timeout, bool silent)
{
@@ -3886,6 +4183,9 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
u16 dst = BNXT_HWRM_CHNL_CHIMP;
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ return -EBUSY;
+
if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
if (msg_len > bp->hwrm_max_ext_req_len ||
!bp->hwrm_short_cmd_req_addr)
@@ -3950,6 +4250,9 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
/* Ring channel doorbell */
writel(1, bp->bar0 + doorbell_offset);
+ if (!pci_is_enabled(bp->pdev))
+ return 0;
+
if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT;
/* convert timeout to usec */
@@ -3981,9 +4284,10 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
- netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
- le16_to_cpu(req->req_type));
- return -1;
+ if (!silent)
+ netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
+ le16_to_cpu(req->req_type));
+ return -EBUSY;
}
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
@@ -4007,11 +4311,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
if (i >= tmo_count) {
- netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
- HWRM_TOTAL_TIMEOUT(i),
- le16_to_cpu(req->req_type),
- le16_to_cpu(req->seq_id), len);
- return -1;
+ if (!silent)
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
+ HWRM_TOTAL_TIMEOUT(i),
+ le16_to_cpu(req->req_type),
+ le16_to_cpu(req->seq_id), len);
+ return -EBUSY;
}
/* Last byte of resp contains valid bit */
@@ -4025,11 +4330,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
- netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
- HWRM_TOTAL_TIMEOUT(i),
- le16_to_cpu(req->req_type),
- le16_to_cpu(req->seq_id), len, *valid);
- return -1;
+ if (!silent)
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
+ HWRM_TOTAL_TIMEOUT(i),
+ le16_to_cpu(req->req_type),
+ le16_to_cpu(req->seq_id), len,
+ *valid);
+ return -EBUSY;
}
}
@@ -4043,7 +4350,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
le16_to_cpu(resp->req_type),
le16_to_cpu(resp->seq_id), rc);
- return rc;
+ return bnxt_hwrm_to_stderr(rc);
}
int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
@@ -4092,9 +4399,14 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
memset(async_events_bmap, 0, sizeof(async_events_bmap));
- for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
- __set_bit(bnxt_async_events_arr[i], async_events_bmap);
+ for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
+ u16 event_id = bnxt_async_events_arr[i];
+ if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
+ !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ continue;
+ __set_bit(bnxt_async_events_arr[i], async_events_bmap);
+ }
if (bmap && bmap_size) {
for (i = 0; i < bmap_size; i++) {
if (test_bit(i, bmap))
@@ -4112,6 +4424,7 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_rgtr_input req = {0};
+ u32 flags;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
@@ -4121,7 +4434,11 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
FUNC_DRV_RGTR_REQ_ENABLES_VER);
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
- req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
+ flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE |
+ FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
+ flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT;
+ req.flags = cpu_to_le32(flags);
req.ver_maj_8b = DRV_VER_MAJ;
req.ver_min_8b = DRV_VER_MIN;
req.ver_upd_8b = DRV_VER_UPD;
@@ -4156,10 +4473,8 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
- else if (resp->flags &
- cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
+ if (!rc && (resp->flags &
+ cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)))
bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4414,6 +4729,7 @@ static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+ u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
struct hwrm_vnic_tpa_cfg_input req = {0};
if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
@@ -4453,9 +4769,14 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
nsegs = (MAX_SKB_FRAGS - n) / n;
}
- segs = ilog2(nsegs);
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ segs = MAX_TPA_SEGS_P5;
+ max_aggs = bp->max_tpa;
+ } else {
+ segs = ilog2(nsegs);
+ }
req.max_agg_segs = cpu_to_le16(segs);
- req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
+ req.max_aggs = cpu_to_le16(max_aggs);
req.min_agg_len = cpu_to_le32(512);
}
@@ -4576,7 +4897,7 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
}
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- return -EIO;
+ return rc;
}
return 0;
}
@@ -4739,8 +5060,6 @@ static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return rc;
bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
}
return rc;
@@ -4800,6 +5119,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
struct hwrm_vnic_qcaps_input req = {0};
int rc;
+ bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
+ bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
if (bp->hwrm_spec_code < 0x10600)
return 0;
@@ -4815,6 +5136,10 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
if (flags &
VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
+ bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
+ if (bp->max_tpa_v2)
+ bp->hw_ring_stats_size =
+ sizeof(struct ctx_hw_stats_ext);
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4874,8 +5199,6 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
- if (rc)
- break;
bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -5194,6 +5517,9 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
u16 error_code;
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ return 0;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
req.ring_type = ring_type;
req.ring_id = cpu_to_le16(ring->fw_ring_id);
@@ -5331,7 +5657,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
mutex_unlock(&bp->hwrm_cmd_lock);
- return -EIO;
+ return rc;
}
hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
@@ -5495,7 +5821,7 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- return -ENOMEM;
+ return rc;
if (bp->hwrm_spec_code < 0x10601)
bp->hw_resc.resv_tx_rings = tx_rings;
@@ -5520,7 +5846,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
cp_rings, stats, vnics);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- return -ENOMEM;
+ return rc;
rc = bnxt_hwrm_get_rings(bp);
return rc;
@@ -5701,9 +6027,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return -ENOMEM;
- return 0;
+ return rc;
}
static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
@@ -5731,9 +6055,7 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return -ENOMEM;
- return 0;
+ return rc;
}
static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
@@ -5995,8 +6317,6 @@ static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
- if (rc)
- break;
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
@@ -6016,6 +6336,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
+ req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
mutex_lock(&bp->hwrm_cmd_lock);
@@ -6057,6 +6378,8 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
struct bnxt_vf_info *vf = &bp->vf;
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+ } else {
+ bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
}
#endif
flags = le16_to_cpu(resp->flags);
@@ -6292,8 +6615,6 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
}
req.flags = cpu_to_le32(flags);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -6555,10 +6876,8 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
- if (rc) {
- rc = -EIO;
+ if (rc)
goto hwrm_func_resc_qcaps_exit;
- }
hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
if (!all)
@@ -6626,6 +6945,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
+ bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
bp->tx_push_thresh = 0;
if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
@@ -6657,6 +6978,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+ bp->flags &= ~BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
} else {
@@ -6726,6 +7048,103 @@ hwrm_cfa_adv_qcaps_exit:
return rc;
}
+static int bnxt_map_fw_health_regs(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg_base = 0xffffffff;
+ int i;
+
+ /* Only pre-map the monitoring GRC registers using window 3 */
+ for (i = 0; i < 4; i++) {
+ u32 reg = fw_health->regs[i];
+
+ if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
+ continue;
+ if (reg_base == 0xffffffff)
+ reg_base = reg & BNXT_GRC_BASE_MASK;
+ if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
+ return -ERANGE;
+ fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE +
+ (reg & BNXT_GRC_OFFSET_MASK);
+ }
+ if (reg_base == 0xffffffff)
+ return 0;
+
+ writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
+ BNXT_FW_HEALTH_WIN_MAP_OFF);
+ return 0;
+}
+
+static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
+{
+ struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct hwrm_error_recovery_qcfg_input req = {0};
+ int rc, i;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto err_recovery_out;
+ if (!fw_health) {
+ fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL);
+ bp->fw_health = fw_health;
+ if (!fw_health) {
+ rc = -ENOMEM;
+ goto err_recovery_out;
+ }
+ }
+ fw_health->flags = le32_to_cpu(resp->flags);
+ if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
+ !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
+ rc = -EINVAL;
+ goto err_recovery_out;
+ }
+ fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
+ fw_health->master_func_wait_dsecs =
+ le32_to_cpu(resp->master_func_wait_period);
+ fw_health->normal_func_wait_dsecs =
+ le32_to_cpu(resp->normal_func_wait_period);
+ fw_health->post_reset_wait_dsecs =
+ le32_to_cpu(resp->master_func_wait_period_after_reset);
+ fw_health->post_reset_max_wait_dsecs =
+ le32_to_cpu(resp->max_bailout_time_after_reset);
+ fw_health->regs[BNXT_FW_HEALTH_REG] =
+ le32_to_cpu(resp->fw_health_status_reg);
+ fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
+ le32_to_cpu(resp->fw_heartbeat_reg);
+ fw_health->regs[BNXT_FW_RESET_CNT_REG] =
+ le32_to_cpu(resp->fw_reset_cnt_reg);
+ fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
+ le32_to_cpu(resp->reset_inprogress_reg);
+ fw_health->fw_reset_inprog_reg_mask =
+ le32_to_cpu(resp->reset_inprogress_reg_mask);
+ fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
+ if (fw_health->fw_reset_seq_cnt >= 16) {
+ rc = -EINVAL;
+ goto err_recovery_out;
+ }
+ for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
+ fw_health->fw_reset_seq_regs[i] =
+ le32_to_cpu(resp->reset_reg[i]);
+ fw_health->fw_reset_seq_vals[i] =
+ le32_to_cpu(resp->reset_reg_val[i]);
+ fw_health->fw_reset_seq_delay_msec[i] =
+ resp->delay_after_reset[i];
+ }
+err_recovery_out:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ if (!rc)
+ rc = bnxt_map_fw_health_regs(bp);
+ if (rc)
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ return rc;
+}
+
static int bnxt_hwrm_func_reset(struct bnxt *bp)
{
struct hwrm_func_reset_input req = {0};
@@ -6785,20 +7204,30 @@ qportcfg_exit:
return rc;
}
-static int bnxt_hwrm_ver_get(struct bnxt *bp)
+static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
{
- int rc;
struct hwrm_ver_get_input req = {0};
- struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
- u32 dev_caps_cfg;
+ int rc;
- bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
req.hwrm_intf_min = HWRM_VERSION_MINOR;
req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+ rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
+ silent);
+ return rc;
+}
+
+static int bnxt_hwrm_ver_get(struct bnxt *bp)
+{
+ struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+ u32 dev_caps_cfg;
+ int rc;
+
+ bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = __bnxt_hwrm_ver_get(bp, false);
if (rc)
goto hwrm_ver_get_exit;
@@ -7001,6 +7430,8 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
if (set_tpa)
tpa_flags = bp->flags & BNXT_FLAG_TPA;
+ else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ return 0;
for (i = 0; i < bp->nr_vnics; i++) {
rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
if (rc) {
@@ -7066,8 +7497,6 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
else
return -EINVAL;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -7087,8 +7516,6 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -7971,6 +8398,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_link_info *link_info = &bp->link_info;
+ bp->flags &= ~BNXT_FLAG_EEE_CAP;
+ if (bp->test_info)
+ bp->test_info->flags &= ~BNXT_TEST_FL_EXT_LPBK;
if (bp->hwrm_spec_code < 0x10201)
return 0;
@@ -8292,11 +8722,14 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_fw_init_one(struct bnxt *bp);
+
static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_if_change_input req = {0};
- bool resc_reinit = false;
+ bool resc_reinit = false, fw_reset = false;
+ u32 flags = 0;
int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
@@ -8307,26 +8740,57 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc && (resp->flags &
- cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
- resc_reinit = true;
+ if (!rc)
+ flags = le32_to_cpu(resp->flags);
mutex_unlock(&bp->hwrm_cmd_lock);
+ if (rc)
+ return rc;
- if (up && resc_reinit && BNXT_NEW_RM(bp)) {
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ if (!up)
+ return 0;
- rc = bnxt_hwrm_func_resc_qcaps(bp, true);
- hw_resc->resv_cp_rings = 0;
- hw_resc->resv_stat_ctxs = 0;
- hw_resc->resv_irqs = 0;
- hw_resc->resv_tx_rings = 0;
- hw_resc->resv_rx_rings = 0;
- hw_resc->resv_hw_ring_grps = 0;
- hw_resc->resv_vnics = 0;
- bp->tx_nr_rings = 0;
- bp->rx_nr_rings = 0;
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
+ resc_reinit = true;
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
+ fw_reset = true;
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
+ netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
+ return -ENODEV;
}
- return rc;
+ if (resc_reinit || fw_reset) {
+ if (fw_reset) {
+ rc = bnxt_fw_init_one(bp);
+ if (rc) {
+ set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
+ return rc;
+ }
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (rc) {
+ netdev_err(bp->dev, "init int mode failed\n");
+ return rc;
+ }
+ set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
+ }
+ if (BNXT_NEW_RM(bp)) {
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ hw_resc->resv_cp_rings = 0;
+ hw_resc->resv_stat_ctxs = 0;
+ hw_resc->resv_irqs = 0;
+ hw_resc->resv_tx_rings = 0;
+ hw_resc->resv_rx_rings = 0;
+ hw_resc->resv_hw_ring_grps = 0;
+ hw_resc->resv_vnics = 0;
+ if (!fw_reset) {
+ bp->tx_nr_rings = 0;
+ bp->rx_nr_rings = 0;
+ }
+ }
+ }
+ return 0;
}
static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
@@ -8336,6 +8800,7 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
struct bnxt_pf_info *pf = &bp->pf;
int rc;
+ bp->num_leds = 0;
if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
return 0;
@@ -8430,6 +8895,7 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
{
u16 handle = 0;
+ bp->wol = 0;
if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
return;
@@ -8476,6 +8942,9 @@ static void bnxt_hwmon_open(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
+ if (bp->hwmon_dev)
+ return;
+
bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
DRV_MODULE_NAME, bp,
bnxt_groups);
@@ -8741,12 +9210,28 @@ static int bnxt_open(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
int rc;
- bnxt_hwrm_if_change(bp, true);
- rc = __bnxt_open_nic(bp, true, true);
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+ netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
+ return -ENODEV;
+ }
+
+ rc = bnxt_hwrm_if_change(bp, true);
if (rc)
+ return rc;
+ rc = __bnxt_open_nic(bp, true, true);
+ if (rc) {
bnxt_hwrm_if_change(bp, false);
+ } else {
+ if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state) &&
+ BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+ int n = pf->active_vfs;
- bnxt_hwmon_open(bp);
+ if (n)
+ bnxt_cfg_hw_sriov(bp, &n, true);
+ }
+ bnxt_hwmon_open(bp);
+ }
return rc;
}
@@ -8783,6 +9268,10 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
del_timer_sync(&bp->timer);
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) &&
+ pci_is_enabled(bp->pdev))
+ pci_disable_device(bp->pdev);
+
bnxt_free_skbs(bp);
/* Save ring stats before shutdown */
@@ -8799,6 +9288,18 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ /* If we get here, it means firmware reset is in progress
+ * while we are trying to close. We can safely proceed with
+ * the close because we are holding rtnl_lock(). Some firmware
+ * messages may fail as we proceed to close. We set the
+ * ABORT_ERR flag here so that the FW reset thread will later
+ * abort when it gets the rtnl_lock() and sees the flag.
+ */
+ netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
+ set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
+ }
+
#ifdef CONFIG_BNXT_SRIOV
if (bp->sriov_cfg) {
rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
@@ -9306,7 +9807,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (changes & BNXT_FLAG_TPA) {
update_tpa = true;
if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
- (flags & BNXT_FLAG_TPA) == 0)
+ (flags & BNXT_FLAG_TPA) == 0 ||
+ (bp->flags & BNXT_FLAG_CHIP_P5))
re_init = true;
}
@@ -9316,9 +9818,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (flags != bp->flags) {
u32 old_flags = bp->flags;
- bp->flags = flags;
-
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+ bp->flags = flags;
if (update_tpa)
bnxt_set_ring_params(bp);
return rc;
@@ -9326,12 +9827,14 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (re_init) {
bnxt_close_nic(bp, false, false);
+ bp->flags = flags;
if (update_tpa)
bnxt_set_ring_params(bp);
return bnxt_open_nic(bp, false, false);
}
if (update_tpa) {
+ bp->flags = flags;
rc = bnxt_set_tpa(bp,
(flags & BNXT_FLAG_TPA) ?
true : false);
@@ -9438,6 +9941,38 @@ static void bnxt_tx_timeout(struct net_device *dev)
bnxt_queue_sp_work(bp);
}
+static void bnxt_fw_health_check(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 val;
+
+ if (!fw_health || !fw_health->enabled ||
+ test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return;
+
+ if (fw_health->tmr_counter) {
+ fw_health->tmr_counter--;
+ return;
+ }
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+ if (val == fw_health->last_fw_heartbeat)
+ goto fw_reset;
+
+ fw_health->last_fw_heartbeat = val;
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ if (val != fw_health->last_fw_reset_cnt)
+ goto fw_reset;
+
+ fw_health->tmr_counter = fw_health->tmr_multiplier;
+ return;
+
+fw_reset:
+ set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+}
+
static void bnxt_timer(struct timer_list *t)
{
struct bnxt *bp = from_timer(bp, t, timer);
@@ -9449,6 +9984,9 @@ static void bnxt_timer(struct timer_list *t)
if (atomic_read(&bp->intr_sem) != 0)
goto bnxt_restart_timer;
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
+ bnxt_fw_health_check(bp);
+
if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
@@ -9504,6 +10042,130 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
bnxt_rtnl_unlock_sp(bp);
}
+static void bnxt_fw_reset_close(struct bnxt *bp)
+{
+ __bnxt_close_nic(bp, true, false);
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+ bnxt_hwrm_func_drv_unrgtr(bp);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
+}
+
+static bool is_bnxt_fw_ok(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ bool no_heartbeat = false, has_reset = false;
+ u32 val;
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+ if (val == fw_health->last_fw_heartbeat)
+ no_heartbeat = true;
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ if (val != fw_health->last_fw_reset_cnt)
+ has_reset = true;
+
+ if (!no_heartbeat && has_reset)
+ return true;
+
+ return false;
+}
+
+/* rtnl_lock is acquired before calling this function */
+static void bnxt_force_fw_reset(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 wait_dsecs;
+
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
+ test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return;
+
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bnxt_fw_reset_close(bp);
+ wait_dsecs = fw_health->master_func_wait_dsecs;
+ if (fw_health->master) {
+ if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
+ wait_dsecs = 0;
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
+ } else {
+ bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
+ wait_dsecs = fw_health->normal_func_wait_dsecs;
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ }
+ bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
+ bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
+}
+
+void bnxt_fw_exception(struct bnxt *bp)
+{
+ set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ bnxt_rtnl_lock_sp(bp);
+ bnxt_force_fw_reset(bp);
+ bnxt_rtnl_unlock_sp(bp);
+}
+
+/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
+ * < 0 on error.
+ */
+static int bnxt_get_registered_vfs(struct bnxt *bp)
+{
+#ifdef CONFIG_BNXT_SRIOV
+ int rc;
+
+ if (!BNXT_PF(bp))
+ return 0;
+
+ rc = bnxt_hwrm_func_qcfg(bp);
+ if (rc) {
+ netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
+ return rc;
+ }
+ if (bp->pf.registered_vfs)
+ return bp->pf.registered_vfs;
+ if (bp->sriov_cfg)
+ return 1;
+#endif
+ return 0;
+}
+
+void bnxt_fw_reset(struct bnxt *bp)
+{
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
+ !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ int n = 0;
+
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (bp->pf.active_vfs &&
+ !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ n = bnxt_get_registered_vfs(bp);
+ if (n < 0) {
+ netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
+ n);
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ dev_close(bp->dev);
+ goto fw_reset_exit;
+ } else if (n > 0) {
+ u16 vf_tmo_dsecs = n * 10;
+
+ if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
+ bp->fw_reset_max_dsecs = vf_tmo_dsecs;
+ bp->fw_reset_state =
+ BNXT_FW_RESET_STATE_POLL_VF;
+ bnxt_queue_fw_reset_work(bp, HZ / 10);
+ goto fw_reset_exit;
+ }
+ bnxt_fw_reset_close(bp);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
+ }
+fw_reset_exit:
+ bnxt_rtnl_unlock_sp(bp);
+}
+
static void bnxt_chk_missed_irq(struct bnxt *bp)
{
int i;
@@ -9634,6 +10296,15 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, true);
+ if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
+ bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
+
+ if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
+ if (!is_bnxt_fw_ok(bp))
+ bnxt_devlink_health_report(bp,
+ BNXT_FW_EXCEPTION_SP_EVENT);
+ }
+
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
@@ -9728,6 +10399,308 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
+static int bnxt_fw_init_one_p1(struct bnxt *bp)
+{
+ int rc;
+
+ bp->fw_cap = 0;
+ rc = bnxt_hwrm_ver_get(bp);
+ if (rc)
+ return rc;
+
+ if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
+ rc = bnxt_alloc_kong_hwrm_resources(bp);
+ if (rc)
+ bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
+ }
+
+ if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
+ bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
+ rc = bnxt_alloc_hwrm_short_cmd_req(bp);
+ if (rc)
+ return rc;
+ }
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc)
+ return -ENODEV;
+
+ bnxt_hwrm_fw_set_time(bp);
+ return 0;
+}
+
+static int bnxt_fw_init_one_p2(struct bnxt *bp)
+{
+ int rc;
+
+ /* Get the MAX capabilities for this function */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
+ rc);
+ return -ENODEV;
+ }
+
+ rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
+ if (rc)
+ netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
+ rc);
+
+ rc = bnxt_hwrm_error_recovery_qcfg(bp);
+ if (rc)
+ netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
+ rc);
+
+ rc = bnxt_hwrm_func_drv_rgtr(bp);
+ if (rc)
+ return -ENODEV;
+
+ rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+ if (rc)
+ return -ENODEV;
+
+ bnxt_hwrm_func_qcfg(bp);
+ bnxt_hwrm_vnic_qcaps(bp);
+ bnxt_hwrm_port_led_qcaps(bp);
+ bnxt_ethtool_init(bp);
+ bnxt_dcb_init(bp);
+ return 0;
+}
+
+static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
+{
+ bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
+ if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
+ bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
+ bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
+ VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
+ }
+}
+
+static void bnxt_set_dflt_rfs(struct bnxt *bp)
+{
+ struct net_device *dev = bp->dev;
+
+ dev->hw_features &= ~NETIF_F_NTUPLE;
+ dev->features &= ~NETIF_F_NTUPLE;
+ bp->flags &= ~BNXT_FLAG_RFS;
+ if (bnxt_rfs_supported(bp)) {
+ dev->hw_features |= NETIF_F_NTUPLE;
+ if (bnxt_rfs_capable(bp)) {
+ bp->flags |= BNXT_FLAG_RFS;
+ dev->features |= NETIF_F_NTUPLE;
+ }
+ }
+}
+
+static void bnxt_fw_init_one_p3(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bnxt_set_dflt_rss_hash_type(bp);
+ bnxt_set_dflt_rfs(bp);
+
+ bnxt_get_wol_settings(bp);
+ if (bp->flags & BNXT_FLAG_WOL_CAP)
+ device_set_wakeup_enable(&pdev->dev, bp->wol);
+ else
+ device_set_wakeup_capable(&pdev->dev, false);
+
+ bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
+ bnxt_hwrm_coal_params_qcaps(bp);
+}
+
+static int bnxt_fw_init_one(struct bnxt *bp)
+{
+ int rc;
+
+ rc = bnxt_fw_init_one_p1(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Firmware init phase 1 failed\n");
+ return rc;
+ }
+ rc = bnxt_fw_init_one_p2(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Firmware init phase 2 failed\n");
+ return rc;
+ }
+ rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
+ if (rc)
+ return rc;
+ bnxt_fw_init_one_p3(bp);
+ return 0;
+}
+
+static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
+ u32 val = fw_health->fw_reset_seq_vals[reg_idx];
+ u32 reg_type, reg_off, delay_msecs;
+
+ delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
+ reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
+ switch (reg_type) {
+ case BNXT_FW_HEALTH_REG_TYPE_CFG:
+ pci_write_config_dword(bp->pdev, reg_off, val);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_GRC:
+ writel(reg_off & BNXT_GRC_BASE_MASK,
+ bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
+ reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
+ /* fall through */
+ case BNXT_FW_HEALTH_REG_TYPE_BAR0:
+ writel(val, bp->bar0 + reg_off);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_BAR1:
+ writel(val, bp->bar1 + reg_off);
+ break;
+ }
+ if (delay_msecs) {
+ pci_read_config_dword(bp->pdev, 0, &val);
+ msleep(delay_msecs);
+ }
+}
+
+static void bnxt_reset_all(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ int i;
+
+ if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
+ for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
+ bnxt_fw_reset_writel(bp, i);
+ } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
+ struct hwrm_fw_reset_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
+ req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
+ req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
+ req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
+ req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
+ }
+ bp->fw_reset_timestamp = jiffies;
+}
+
+static void bnxt_fw_reset_task(struct work_struct *work)
+{
+ struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
+ int rc;
+
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
+ return;
+ }
+
+ switch (bp->fw_reset_state) {
+ case BNXT_FW_RESET_STATE_POLL_VF: {
+ int n = bnxt_get_registered_vfs(bp);
+
+ if (n < 0) {
+ netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
+ n, jiffies_to_msecs(jiffies -
+ bp->fw_reset_timestamp));
+ goto fw_reset_abort;
+ } else if (n > 0) {
+ if (time_after(jiffies, bp->fw_reset_timestamp +
+ (bp->fw_reset_max_dsecs * HZ / 10))) {
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bp->fw_reset_state = 0;
+ netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
+ n);
+ return;
+ }
+ bnxt_queue_fw_reset_work(bp, HZ / 10);
+ return;
+ }
+ bp->fw_reset_timestamp = jiffies;
+ rtnl_lock();
+ bnxt_fw_reset_close(bp);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ rtnl_unlock();
+ bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
+ return;
+ }
+ case BNXT_FW_RESET_STATE_RESET_FW: {
+ u32 wait_dsecs = bp->fw_health->post_reset_wait_dsecs;
+
+ bnxt_reset_all(bp);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
+ return;
+ }
+ case BNXT_FW_RESET_STATE_ENABLE_DEV:
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
+ bp->fw_health) {
+ u32 val;
+
+ val = bnxt_fw_health_readl(bp,
+ BNXT_FW_RESET_INPROG_REG);
+ if (val)
+ netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
+ val);
+ }
+ clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ if (pci_enable_device(bp->pdev)) {
+ netdev_err(bp->dev, "Cannot re-enable PCI device\n");
+ goto fw_reset_abort;
+ }
+ pci_set_master(bp->pdev);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
+ /* fall through */
+ case BNXT_FW_RESET_STATE_POLL_FW:
+ bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
+ rc = __bnxt_hwrm_ver_get(bp, true);
+ if (rc) {
+ if (time_after(jiffies, bp->fw_reset_timestamp +
+ (bp->fw_reset_max_dsecs * HZ / 10))) {
+ netdev_err(bp->dev, "Firmware reset aborted\n");
+ goto fw_reset_abort;
+ }
+ bnxt_queue_fw_reset_work(bp, HZ / 5);
+ return;
+ }
+ bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
+ /* fall through */
+ case BNXT_FW_RESET_STATE_OPENING:
+ while (!rtnl_trylock()) {
+ bnxt_queue_fw_reset_work(bp, HZ / 10);
+ return;
+ }
+ rc = bnxt_open(bp->dev);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_open_nic() failed\n");
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ dev_close(bp->dev);
+ }
+ bnxt_ulp_irq_restart(bp, rc);
+ rtnl_unlock();
+
+ bp->fw_reset_state = 0;
+ /* Make sure fw_reset_state is 0 before clearing the flag */
+ smp_mb__before_atomic();
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ break;
+ }
+ return;
+
+fw_reset_abort:
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bp->fw_reset_state = 0;
+ rtnl_lock();
+ dev_close(bp->dev);
+ rtnl_unlock();
+}
+
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
{
int rc;
@@ -9790,6 +10763,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
pci_enable_pcie_error_reporting(pdev);
INIT_WORK(&bp->sp_task, bnxt_sp_task);
+ INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
spin_lock_init(&bp->ntp_fltr_lock);
#if BITS_PER_LONG == 32
@@ -10333,7 +11307,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
free_netdev(dev);
}
-static int bnxt_probe_phy(struct bnxt *bp)
+static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
{
int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info;
@@ -10344,8 +11318,6 @@ static int bnxt_probe_phy(struct bnxt *bp)
rc);
return rc;
}
- mutex_init(&bp->link_lock);
-
rc = bnxt_update_link(bp, false);
if (rc) {
netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
@@ -10359,6 +11331,9 @@ static int bnxt_probe_phy(struct bnxt *bp)
if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
link_info->support_auto_speeds = link_info->support_speeds;
+ if (!fw_dflt)
+ return 0;
+
/*initialize the ethool setting copy with NVM settings */
if (BNXT_AUTO_MODE(link_info->auto_mode)) {
link_info->autoneg = BNXT_AUTONEG_SPEED;
@@ -10379,7 +11354,7 @@ static int bnxt_probe_phy(struct bnxt *bp)
link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
else
link_info->req_flow_ctrl = link_info->force_pause_setting;
- return rc;
+ return 0;
}
static int bnxt_get_max_irq(struct pci_dev *pdev)
@@ -10683,32 +11658,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_pci_clean;
mutex_init(&bp->hwrm_cmd_lock);
- rc = bnxt_hwrm_ver_get(bp);
+ mutex_init(&bp->link_lock);
+
+ rc = bnxt_fw_init_one_p1(bp);
if (rc)
goto init_err_pci_clean;
- if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
- rc = bnxt_alloc_kong_hwrm_resources(bp);
- if (rc)
- bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
- }
-
- if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
- bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
- rc = bnxt_alloc_hwrm_short_cmd_req(bp);
- if (rc)
- goto init_err_pci_clean;
- }
-
if (BNXT_CHIP_P5(bp))
bp->flags |= BNXT_FLAG_CHIP_P5;
- rc = bnxt_hwrm_func_reset(bp);
+ rc = bnxt_fw_init_one_p2(bp);
if (rc)
goto init_err_pci_clean;
- bnxt_hwrm_fw_set_time(bp);
-
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
@@ -10746,41 +11708,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->gro_func = bnxt_gro_func_5730x;
if (BNXT_CHIP_P4(bp))
bp->gro_func = bnxt_gro_func_5731x;
+ else if (BNXT_CHIP_P5(bp))
+ bp->gro_func = bnxt_gro_func_5750x;
}
if (!BNXT_CHIP_P4_PLUS(bp))
bp->flags |= BNXT_FLAG_DOUBLE_DB;
- rc = bnxt_hwrm_func_drv_rgtr(bp);
- if (rc)
- goto init_err_pci_clean;
-
- rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
- if (rc)
- goto init_err_pci_clean;
-
bp->ulp_probe = bnxt_ulp_probe;
- rc = bnxt_hwrm_queue_qportcfg(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
- rc);
- rc = -1;
- goto init_err_pci_clean;
- }
- /* Get the MAX capabilities for this function */
- rc = bnxt_hwrm_func_qcaps(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
- rc);
- rc = -1;
- goto init_err_pci_clean;
- }
-
- rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
- if (rc)
- netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
- rc);
-
rc = bnxt_init_mac_addr(bp);
if (rc) {
dev_err(&pdev->dev, "Unable to initialize mac address.\n");
@@ -10794,17 +11729,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
}
- bnxt_hwrm_func_qcfg(bp);
- bnxt_hwrm_vnic_qcaps(bp);
- bnxt_hwrm_port_led_qcaps(bp);
- bnxt_ethtool_init(bp);
- bnxt_dcb_init(bp);
/* MTU range: 60 - FW defined max */
dev->min_mtu = ETH_ZLEN;
dev->max_mtu = bp->max_mtu;
- rc = bnxt_probe_phy(bp);
+ rc = bnxt_probe_phy(bp, true);
if (rc)
goto init_err_pci_clean;
@@ -10818,24 +11748,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_pci_clean;
}
- /* Default RSS hash cfg. */
- bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
- if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
- bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
- bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
- VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
- }
-
- if (bnxt_rfs_supported(bp)) {
- dev->hw_features |= NETIF_F_NTUPLE;
- if (bnxt_rfs_capable(bp)) {
- bp->flags |= BNXT_FLAG_RFS;
- dev->features |= NETIF_F_NTUPLE;
- }
- }
+ bnxt_fw_init_one_p3(bp);
if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
bp->flags |= BNXT_FLAG_STRIP_VLAN;
@@ -10849,16 +11762,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- bnxt_get_wol_settings(bp);
- if (bp->flags & BNXT_FLAG_WOL_CAP)
- device_set_wakeup_enable(&pdev->dev, bp->wol);
- else
- device_set_wakeup_capable(&pdev->dev, false);
-
- bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
-
- bnxt_hwrm_coal_params_qcaps(bp);
-
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
@@ -10895,6 +11798,8 @@ init_err_pci_clean:
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
+ kfree(bp->fw_health);
+ bp->fw_health = NULL;
bnxt_cleanup_pci(bp);
init_err_free:
@@ -10934,8 +11839,7 @@ shutdown_exit:
#ifdef CONFIG_PM_SLEEP
static int bnxt_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
@@ -10951,8 +11855,7 @@ static int bnxt_suspend(struct device *device)
static int bnxt_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 16694b704d15..333b0a85a7fd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -113,6 +113,7 @@ struct tx_cmp {
#define CMP_TYPE_RX_AGG_CMP 18
#define CMP_TYPE_RX_L2_TPA_START_CMP 19
#define CMP_TYPE_RX_L2_TPA_END_CMP 21
+ #define CMP_TYPE_RX_TPA_AGG_CMP 22
#define CMP_TYPE_STATUS_CMP 32
#define CMP_TYPE_REMOTE_DRIVER_REQ 34
#define CMP_TYPE_REMOTE_DRIVER_RESP 36
@@ -263,14 +264,21 @@ struct rx_agg_cmp {
u32 rx_agg_cmp_opaque;
__le32 rx_agg_cmp_v;
#define RX_AGG_CMP_V (1 << 0)
+ #define RX_AGG_CMP_AGG_ID (0xffff << 16)
+ #define RX_AGG_CMP_AGG_ID_SHIFT 16
__le32 rx_agg_cmp_unused;
};
+#define TPA_AGG_AGG_ID(rx_agg) \
+ ((le32_to_cpu((rx_agg)->rx_agg_cmp_v) & \
+ RX_AGG_CMP_AGG_ID) >> RX_AGG_CMP_AGG_ID_SHIFT)
+
struct rx_tpa_start_cmp {
__le32 rx_tpa_start_cmp_len_flags_type;
#define RX_TPA_START_CMP_TYPE (0x3f << 0)
#define RX_TPA_START_CMP_FLAGS (0x3ff << 6)
#define RX_TPA_START_CMP_FLAGS_SHIFT 6
+ #define RX_TPA_START_CMP_FLAGS_ERROR (0x1 << 6)
#define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7)
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7)
@@ -278,6 +286,7 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7)
#define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7)
#define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10)
+ #define RX_TPA_START_CMP_FLAGS_TIMESTAMP (0x1 << 11)
#define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12)
#define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12
#define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12)
@@ -291,6 +300,8 @@ struct rx_tpa_start_cmp {
#define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9
#define RX_TPA_START_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_START_CMP_AGG_ID_SHIFT 25
+ #define RX_TPA_START_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_start_cmp_rss_hash;
};
@@ -308,6 +319,14 @@ struct rx_tpa_start_cmp {
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
+#define TPA_START_AGG_ID_P5(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \
+ RX_TPA_START_CMP_AGG_ID_P5) >> RX_TPA_START_CMP_AGG_ID_SHIFT_P5)
+
+#define TPA_START_ERROR(rx_tpa_start) \
+ ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \
+ cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR))
+
struct rx_tpa_start_cmp_ext {
__le32 rx_tpa_start_cmp_flags2;
#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0)
@@ -315,10 +334,20 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
#define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8)
+ #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID (0x1 << 9)
+ #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT (0x3 << 10)
+ #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10
+ #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL (0xffff << 16)
+ #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16
__le32 rx_tpa_start_cmp_metadata;
__le32 rx_tpa_start_cmp_cfa_code_v2;
#define RX_TPA_START_CMP_V2 (0x1 << 0)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK (0x7 << 1)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT 1
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+ #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
#define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
#define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
__le32 rx_tpa_start_cmp_hdr_info;
@@ -332,6 +361,11 @@ struct rx_tpa_start_cmp_ext {
(!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \
cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))
+#define TPA_START_ERROR_CODE(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
+ RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \
+ RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT)
+
struct rx_tpa_end_cmp {
__le32 rx_tpa_end_cmp_len_flags_type;
#define RX_TPA_END_CMP_TYPE (0x3f << 0)
@@ -361,6 +395,8 @@ struct rx_tpa_end_cmp {
#define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16
#define RX_TPA_END_CMP_AGG_ID (0x7f << 25)
#define RX_TPA_END_CMP_AGG_ID_SHIFT 25
+ #define RX_TPA_END_CMP_AGG_ID_P5 (0xffff << 16)
+ #define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16
__le32 rx_tpa_end_cmp_tsdelta;
#define RX_TPA_END_GRO_TS (0x1 << 31)
@@ -370,6 +406,18 @@ struct rx_tpa_end_cmp {
((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
+#define TPA_END_AGG_ID_P5(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_AGG_ID_P5) >> RX_TPA_END_CMP_AGG_ID_SHIFT_P5)
+
+#define TPA_END_PAYLOAD_OFF(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT)
+
+#define TPA_END_AGG_BUFS(rx_tpa_end) \
+ ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
+ RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT)
+
#define TPA_END_TPA_SEGS(rx_tpa_end) \
((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \
RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
@@ -389,6 +437,10 @@ struct rx_tpa_end_cmp {
struct rx_tpa_end_cmp_ext {
__le32 rx_tpa_end_cmp_dup_acks;
#define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0)
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET_P5 (0xff << 16)
+ #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5 16
+ #define RX_TPA_END_CMP_AGG_BUFS_P5 (0xff << 24)
+ #define RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5 24
__le32 rx_tpa_end_cmp_seg_len;
#define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0)
@@ -396,7 +448,13 @@ struct rx_tpa_end_cmp_ext {
__le32 rx_tpa_end_cmp_errors_v2;
#define RX_TPA_END_CMP_V2 (0x1 << 0)
#define RX_TPA_END_CMP_ERRORS (0x3 << 1)
+ #define RX_TPA_END_CMP_ERRORS_P5 (0x7 << 1)
#define RX_TPA_END_CMPL_ERRORS_SHIFT 1
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_RSV_ERROR (0x4 << 1)
+ #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1)
u32 rx_tpa_end_cmp_start_opaque;
};
@@ -405,6 +463,28 @@ struct rx_tpa_end_cmp_ext {
((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \
cpu_to_le32(RX_TPA_END_CMP_ERRORS))
+#define TPA_END_PAYLOAD_OFF_P5(rx_tpa_end_ext) \
+ ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_P5) >> \
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5)
+
+#define TPA_END_AGG_BUFS_P5(rx_tpa_end_ext) \
+ ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \
+ RX_TPA_END_CMP_AGG_BUFS_P5) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5)
+
+#define EVENT_DATA1_RESET_NOTIFY_FATAL(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
+
+#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \
+ !!((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC)
+
+#define EVENT_DATA1_RECOVERY_ENABLED(data1) \
+ !!((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED)
+
struct nqe_cn {
__le16 type;
#define NQ_CN_TYPE_MASK 0x3fUL
@@ -487,6 +567,9 @@ struct nqe_cn {
#define BNXT_DEFAULT_TX_RING_SIZE 511
#define MAX_TPA 64
+#define MAX_TPA_P5 256
+#define MAX_TPA_P5_MASK (MAX_TPA_P5 - 1)
+#define MAX_TPA_SEGS_P5 0x3f
#if (BNXT_PAGE_SHIFT == 16)
#define MAX_RX_PAGES 1
@@ -562,6 +645,7 @@ struct nqe_cn {
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
#define DFLT_HWRM_CMD_TIMEOUT 500
+#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
#define HWRM_RESP_ERR_CODE_MASK 0xffff
@@ -768,6 +852,15 @@ struct bnxt_tpa_info {
((hdr_info) & 0x1ff)
u16 cfa_code; /* cfa_code in TPA start compl */
+ u8 agg_count;
+ struct rx_agg_cmp *agg_arr;
+};
+
+#define BNXT_AGG_IDX_BMAP_SIZE (MAX_TPA_P5 / BITS_PER_LONG)
+
+struct bnxt_tpa_idx_map {
+ u16 agg_id_tbl[1024];
+ unsigned long agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE];
};
struct bnxt_rx_ring_info {
@@ -797,6 +890,7 @@ struct bnxt_rx_ring_info {
dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
struct bnxt_tpa_info *rx_tpa;
+ struct bnxt_tpa_idx_map *rx_tpa_idx_map;
struct bnxt_ring_struct rx_ring_struct;
struct bnxt_ring_struct rx_agg_ring_struct;
@@ -978,6 +1072,7 @@ struct bnxt_pf_info {
u8 mac_addr[ETH_ALEN];
u32 first_vf_id;
u16 active_vfs;
+ u16 registered_vfs;
u16 max_vfs;
u32 max_encap_records;
u32 max_decap_records;
@@ -1137,6 +1232,9 @@ struct bnxt_test_info {
#define BNXT_GRCPF_REG_KONG_COMM 0xA00
#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00
+#define BNXT_GRC_BASE_MASK 0xfffff000
+#define BNXT_GRC_OFFSET_MASK 0x00000ffc
+
struct bnxt_tc_flow_stats {
u64 packets;
u64 bytes;
@@ -1253,6 +1351,53 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info *tqm_mem[9];
};
+struct bnxt_fw_health {
+ u32 flags;
+ u32 polling_dsecs;
+ u32 master_func_wait_dsecs;
+ u32 normal_func_wait_dsecs;
+ u32 post_reset_wait_dsecs;
+ u32 post_reset_max_wait_dsecs;
+ u32 regs[4];
+ u32 mapped_regs[4];
+#define BNXT_FW_HEALTH_REG 0
+#define BNXT_FW_HEARTBEAT_REG 1
+#define BNXT_FW_RESET_CNT_REG 2
+#define BNXT_FW_RESET_INPROG_REG 3
+ u32 fw_reset_inprog_reg_mask;
+ u32 last_fw_heartbeat;
+ u32 last_fw_reset_cnt;
+ u8 enabled:1;
+ u8 master:1;
+ u8 tmr_multiplier;
+ u8 tmr_counter;
+ u8 fw_reset_seq_cnt;
+ u32 fw_reset_seq_regs[16];
+ u32 fw_reset_seq_vals[16];
+ u32 fw_reset_seq_delay_msec[16];
+ struct devlink_health_reporter *fw_reporter;
+ struct devlink_health_reporter *fw_reset_reporter;
+ struct devlink_health_reporter *fw_fatal_reporter;
+};
+
+struct bnxt_fw_reporter_ctx {
+ unsigned long sp_event;
+};
+
+#define BNXT_FW_HEALTH_REG_TYPE_MASK 3
+#define BNXT_FW_HEALTH_REG_TYPE_CFG 0
+#define BNXT_FW_HEALTH_REG_TYPE_GRC 1
+#define BNXT_FW_HEALTH_REG_TYPE_BAR0 2
+#define BNXT_FW_HEALTH_REG_TYPE_BAR1 3
+
+#define BNXT_FW_HEALTH_REG_TYPE(reg) ((reg) & BNXT_FW_HEALTH_REG_TYPE_MASK)
+#define BNXT_FW_HEALTH_REG_OFF(reg) ((reg) & ~BNXT_FW_HEALTH_REG_TYPE_MASK)
+
+#define BNXT_FW_HEALTH_WIN_BASE 0x3000
+#define BNXT_FW_HEALTH_WIN_MAP_OFF 8
+
+#define BNXT_FW_STATUS_HEALTHY 0x8000
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -1282,7 +1427,9 @@ struct bnxt {
#define CHIP_NUM_5745X 0xd730
-#define CHIP_NUM_57500 0x1750
+#define CHIP_NUM_57508 0x1750
+#define CHIP_NUM_57504 0x1751
+#define CHIP_NUM_57502 0x1752
#define CHIP_NUM_58802 0xd802
#define CHIP_NUM_58804 0xd804
@@ -1379,12 +1526,14 @@ struct bnxt {
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
- !(bp->flags & BNXT_FLAG_CHIP_P5) && \
- !is_kdump_kernel())
+ (!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
+ (bp)->max_tpa_v2) && !is_kdump_kernel())
/* Chip class phase 5 */
#define BNXT_CHIP_P5(bp) \
- ((bp)->chip_num == CHIP_NUM_57500)
+ ((bp)->chip_num == CHIP_NUM_57508 || \
+ (bp)->chip_num == CHIP_NUM_57504 || \
+ (bp)->chip_num == CHIP_NUM_57502)
/* Chip class phase 4.x */
#define BNXT_CHIP_P4(bp) \
@@ -1414,6 +1563,8 @@ struct bnxt {
u16, void *, u8 *, dma_addr_t,
unsigned int);
+ u16 max_tpa_v2;
+ u16 max_tpa;
u32 rx_buf_size;
u32 rx_buf_use_size; /* useable size */
u16 rx_offset;
@@ -1469,6 +1620,10 @@ struct bnxt {
#define BNXT_STATE_OPEN 0
#define BNXT_STATE_IN_SP_TASK 1
#define BNXT_STATE_READ_STATS 2
+#define BNXT_STATE_FW_RESET_DET 3
+#define BNXT_STATE_IN_FW_RESET 4
+#define BNXT_STATE_ABORT_ERR 5
+#define BNXT_STATE_FW_FATAL_COND 6
struct bnxt_irq *irq_tbl;
int total_irqs;
@@ -1493,6 +1648,7 @@ struct bnxt {
#define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
#define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
#define BNXT_FW_CAP_TRUSTED_VF 0x00000800
+ #define BNXT_FW_CAP_ERROR_RECOVERY 0x00002000
#define BNXT_FW_CAP_PKG_VER 0x00004000
#define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX 0x00010000
@@ -1525,6 +1681,7 @@ struct bnxt {
int hw_port_stats_size;
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
+ u16 hw_ring_stats_size;
u8 pri2cos[8];
u8 pri2cos_valid;
@@ -1576,6 +1733,24 @@ struct bnxt {
#define BNXT_FLOW_STATS_SP_EVENT 15
#define BNXT_UPDATE_PHY_SP_EVENT 16
#define BNXT_RING_COAL_NOW_SP_EVENT 17
+#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18
+#define BNXT_FW_EXCEPTION_SP_EVENT 19
+
+ struct delayed_work fw_reset_task;
+ int fw_reset_state;
+#define BNXT_FW_RESET_STATE_POLL_VF 1
+#define BNXT_FW_RESET_STATE_RESET_FW 2
+#define BNXT_FW_RESET_STATE_ENABLE_DEV 3
+#define BNXT_FW_RESET_STATE_POLL_FW 4
+#define BNXT_FW_RESET_STATE_OPENING 5
+
+ u16 fw_reset_min_dsecs;
+#define BNXT_DFLT_FW_RST_MIN_DSECS 20
+ u16 fw_reset_max_dsecs;
+#define BNXT_DFLT_FW_RST_MAX_DSECS 60
+ unsigned long fw_reset_timestamp;
+
+ struct bnxt_fw_health *fw_health;
struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf;
@@ -1637,7 +1812,6 @@ struct bnxt {
u8 switch_id[8];
struct bnxt_tc_info *tc_info;
struct dentry *debugfs_pdev;
- struct dentry *debugfs_dim;
struct device *hwmon_dev;
};
@@ -1782,6 +1956,7 @@ extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
u16 prod, gfp_t gfp);
void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
+u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
void bnxt_set_tpa_flags(struct bnxt *bp);
void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
@@ -1814,6 +1989,8 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
+void bnxt_fw_exception(struct bnxt *bp);
+void bnxt_fw_reset(struct bnxt *bp);
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 07301cb87c03..fb6f30d0d1d0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -377,8 +377,6 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app,
set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n);
set.hdr_cnt = 1;
rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
set_app_exit:
dma_free_coherent(&bp->pdev->dev, data_len, data, mapping);
@@ -391,6 +389,7 @@ static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
struct hwrm_queue_dscp_qcaps_input req = {0};
int rc;
+ bp->max_dscp_value = 0;
if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp))
return 0;
@@ -433,8 +432,6 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app,
dscp2pri->pri = app->priority;
req.entry_cnt = cpu_to_le16(1);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri,
mapping);
return rc;
@@ -722,6 +719,7 @@ static const struct dcbnl_rtnl_ops dcbnl_ops = {
void bnxt_dcb_init(struct bnxt *bp)
{
+ bp->dcbx_cap = 0;
if (bp->hwrm_spec_code < 0x10501)
return;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
index 61393f351a77..156c2404854f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c
@@ -61,45 +61,30 @@ static const struct file_operations debugfs_dim_fops = {
.read = debugfs_dim_read,
};
-static struct dentry *debugfs_dim_ring_init(struct dim *dim, int ring_idx,
- struct dentry *dd)
+static void debugfs_dim_ring_init(struct dim *dim, int ring_idx,
+ struct dentry *dd)
{
static char qname[16];
snprintf(qname, 10, "%d", ring_idx);
- return debugfs_create_file(qname, 0600, dd,
- dim, &debugfs_dim_fops);
+ debugfs_create_file(qname, 0600, dd, dim, &debugfs_dim_fops);
}
void bnxt_debug_dev_init(struct bnxt *bp)
{
const char *pname = pci_name(bp->pdev);
- struct dentry *pdevf;
+ struct dentry *dir;
int i;
bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt);
- if (bp->debugfs_pdev) {
- pdevf = debugfs_create_dir("dim", bp->debugfs_pdev);
- if (!pdevf) {
- pr_err("failed to create debugfs entry %s/dim\n",
- pname);
- return;
- }
- bp->debugfs_dim = pdevf;
- /* create files for each rx ring */
- for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+ dir = debugfs_create_dir("dim", bp->debugfs_pdev);
- if (cpr && bp->bnapi[i]->rx_ring) {
- pdevf = debugfs_dim_ring_init(&cpr->dim, i,
- bp->debugfs_dim);
- if (!pdevf)
- pr_err("failed to create debugfs entry %s/dim/%d\n",
- pname, i);
- }
- }
- } else {
- pr_err("failed to create debugfs entry %s\n", pname);
+ /* create files for each rx ring */
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+
+ if (cpr && bp->bnapi[i]->rx_ring)
+ debugfs_dim_ring_init(&cpr->dim, i, dir);
}
}
@@ -114,8 +99,6 @@ void bnxt_debug_dev_exit(struct bnxt *bp)
void bnxt_debug_init(void)
{
bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL);
- if (!bnxt_debug_mnt)
- pr_err("failed to init bnxt_en debugfs\n");
}
void bnxt_debug_exit(void)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index c05d663212b2..e664392dccc0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -15,6 +15,192 @@
#include "bnxt_vfr.h"
#include "bnxt_devlink.h"
+static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg)
+{
+ struct bnxt *bp = devlink_health_reporter_priv(reporter);
+ struct bnxt_fw_health *health = bp->fw_health;
+ u32 val, health_status;
+ int rc;
+
+ if (!health || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return 0;
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ health_status = val & 0xffff;
+
+ if (health_status == BNXT_FW_STATUS_HEALTHY) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
+ "Healthy;");
+ if (rc)
+ return rc;
+ } else if (health_status < BNXT_FW_STATUS_HEALTHY) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
+ "Not yet completed initialization;");
+ if (rc)
+ return rc;
+ } else if (health_status > BNXT_FW_STATUS_HEALTHY) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
+ "Encountered fatal error and cannot recover;");
+ if (rc)
+ return rc;
+ }
+
+ if (val >> 16) {
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Error", val >> 16);
+ if (rc)
+ return rc;
+ }
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Reset count", val);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = bnxt_fw_reporter_diagnose,
+};
+
+static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx)
+{
+ struct bnxt *bp = devlink_health_reporter_priv(reporter);
+
+ if (!priv_ctx)
+ return -EOPNOTSUPP;
+
+ bnxt_fw_reset(bp);
+ return 0;
+}
+
+static const
+struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = {
+ .name = "fw_reset",
+ .recover = bnxt_fw_reset_recover,
+};
+
+static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx)
+{
+ struct bnxt *bp = devlink_health_reporter_priv(reporter);
+ struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
+ unsigned long event;
+
+ if (!priv_ctx)
+ return -EOPNOTSUPP;
+
+ event = fw_reporter_ctx->sp_event;
+ if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT)
+ bnxt_fw_reset(bp);
+ else if (event == BNXT_FW_EXCEPTION_SP_EVENT)
+ bnxt_fw_exception(bp);
+
+ return 0;
+}
+
+static const
+struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
+ .name = "fw_fatal",
+ .recover = bnxt_fw_fatal_recover,
+};
+
+static void bnxt_dl_fw_reporters_create(struct bnxt *bp)
+{
+ struct bnxt_fw_health *health = bp->fw_health;
+
+ if (!health)
+ return;
+
+ health->fw_reporter =
+ devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
+ 0, false, bp);
+ if (IS_ERR(health->fw_reporter)) {
+ netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
+ PTR_ERR(health->fw_reporter));
+ health->fw_reporter = NULL;
+ }
+
+ health->fw_reset_reporter =
+ devlink_health_reporter_create(bp->dl,
+ &bnxt_dl_fw_reset_reporter_ops,
+ 0, true, bp);
+ if (IS_ERR(health->fw_reset_reporter)) {
+ netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
+ PTR_ERR(health->fw_reset_reporter));
+ health->fw_reset_reporter = NULL;
+ }
+
+ health->fw_fatal_reporter =
+ devlink_health_reporter_create(bp->dl,
+ &bnxt_dl_fw_fatal_reporter_ops,
+ 0, true, bp);
+ if (IS_ERR(health->fw_fatal_reporter)) {
+ netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
+ PTR_ERR(health->fw_fatal_reporter));
+ health->fw_fatal_reporter = NULL;
+ }
+}
+
+static void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
+{
+ struct bnxt_fw_health *health = bp->fw_health;
+
+ if (!health)
+ return;
+
+ if (health->fw_reporter)
+ devlink_health_reporter_destroy(health->fw_reporter);
+
+ if (health->fw_reset_reporter)
+ devlink_health_reporter_destroy(health->fw_reset_reporter);
+
+ if (health->fw_fatal_reporter)
+ devlink_health_reporter_destroy(health->fw_fatal_reporter);
+}
+
+void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct bnxt_fw_reporter_ctx fw_reporter_ctx;
+
+ if (!fw_health)
+ return;
+
+ fw_reporter_ctx.sp_event = event;
+ switch (event) {
+ case BNXT_FW_RESET_NOTIFY_SP_EVENT:
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
+ if (!fw_health->fw_fatal_reporter)
+ return;
+
+ devlink_health_report(fw_health->fw_fatal_reporter,
+ "FW fatal async event received",
+ &fw_reporter_ctx);
+ return;
+ }
+ if (!fw_health->fw_reset_reporter)
+ return;
+
+ devlink_health_report(fw_health->fw_reset_reporter,
+ "FW non-fatal reset event received",
+ &fw_reporter_ctx);
+ return;
+
+ case BNXT_FW_EXCEPTION_SP_EVENT:
+ if (!fw_health->fw_fatal_reporter)
+ return;
+
+ devlink_health_report(fw_health->fw_fatal_reporter,
+ "FW fatal error reported",
+ &fw_reporter_ctx);
+ return;
+ }
+}
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
@@ -109,13 +295,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
memcpy(buf, data_addr, bytesize);
dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
- if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
+ if (rc == -EACCES)
netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
- return -EACCES;
- } else if (rc) {
- return -EIO;
- }
- return 0;
+ return rc;
}
static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
@@ -251,6 +433,8 @@ int bnxt_dl_register(struct bnxt *bp)
devlink_params_publish(dl);
+ bnxt_dl_fw_reporters_create(bp);
+
return 0;
err_dl_port_unreg:
@@ -273,6 +457,7 @@ void bnxt_dl_unregister(struct bnxt *bp)
if (!dl)
return;
+ bnxt_dl_fw_reporters_destroy(bp);
devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
ARRAY_SIZE(bnxt_dl_port_params));
devlink_port_unregister(&bp->dl_port);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index 5b6b2c7d97cf..b97e0baeb42d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -55,6 +55,7 @@ struct bnxt_dl_nvm_param {
u16 num_bits;
};
+void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8445a0cce849..235265eeec7d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -137,7 +137,44 @@ reset_coalesce:
return rc;
}
-#define BNXT_NUM_STATS 22
+static const char * const bnxt_ring_stats_str[] = {
+ "rx_ucast_packets",
+ "rx_mcast_packets",
+ "rx_bcast_packets",
+ "rx_discards",
+ "rx_drops",
+ "rx_ucast_bytes",
+ "rx_mcast_bytes",
+ "rx_bcast_bytes",
+ "tx_ucast_packets",
+ "tx_mcast_packets",
+ "tx_bcast_packets",
+ "tx_discards",
+ "tx_drops",
+ "tx_ucast_bytes",
+ "tx_mcast_bytes",
+ "tx_bcast_bytes",
+};
+
+static const char * const bnxt_ring_tpa_stats_str[] = {
+ "tpa_packets",
+ "tpa_bytes",
+ "tpa_events",
+ "tpa_aborts",
+};
+
+static const char * const bnxt_ring_tpa2_stats_str[] = {
+ "rx_tpa_eligible_pkt",
+ "rx_tpa_eligible_bytes",
+ "rx_tpa_pkt",
+ "rx_tpa_bytes",
+ "rx_tpa_errors",
+};
+
+static const char * const bnxt_ring_sw_stats_str[] = {
+ "rx_l4_csum_errors",
+ "missed_irqs",
+};
#define BNXT_RX_STATS_ENTRY(counter) \
{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
@@ -207,6 +244,20 @@ reset_coalesce:
BNXT_TX_STATS_EXT_COS_ENTRY(6), \
BNXT_TX_STATS_EXT_COS_ENTRY(7) \
+#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \
+ BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \
+ BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
+
+#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
+
#define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
{ BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \
__stringify(counter##_pri##n) }
@@ -352,6 +403,7 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
+ BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
};
static const struct {
@@ -417,9 +469,29 @@ static const struct {
ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
+static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
+{
+ if (BNXT_SUPPORTS_TPA(bp)) {
+ if (bp->max_tpa_v2)
+ return ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
+ return ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ }
+ return 0;
+}
+
+static int bnxt_get_num_ring_stats(struct bnxt *bp)
+{
+ int num_stats;
+
+ num_stats = ARRAY_SIZE(bnxt_ring_stats_str) +
+ ARRAY_SIZE(bnxt_ring_sw_stats_str) +
+ bnxt_get_num_tpa_ring_stats(bp);
+ return num_stats * bp->cp_nr_rings;
+}
+
static int bnxt_get_num_stats(struct bnxt *bp)
{
- int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+ int num_stats = bnxt_get_num_ring_stats(bp);
num_stats += BNXT_NUM_SW_FUNC_STATS;
@@ -460,10 +532,11 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
{
u32 i, j = 0;
struct bnxt *bp = netdev_priv(dev);
- u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
+ u32 stat_fields = ARRAY_SIZE(bnxt_ring_stats_str) +
+ bnxt_get_num_tpa_ring_stats(bp);
if (!bp->bnapi) {
- j += BNXT_NUM_STATS * bp->cp_nr_rings + BNXT_NUM_SW_FUNC_STATS;
+ j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
goto skip_ring_stats;
}
@@ -551,56 +624,39 @@ skip_ring_stats:
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnxt *bp = netdev_priv(dev);
- u32 i;
+ static const char * const *str;
+ u32 i, j, num_str;
switch (stringset) {
- /* The number of strings must match BNXT_NUM_STATS defined above. */
case ETH_SS_STATS:
for (i = 0; i < bp->cp_nr_rings; i++) {
- sprintf(buf, "[%d]: rx_ucast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_mcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_bcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_discards", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_drops", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_ucast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_mcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_bcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_ucast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_mcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_bcast_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_discards", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_drops", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_ucast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_mcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tx_bcast_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_packets", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_bytes", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_events", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: tpa_aborts", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: rx_l4_csum_errors", i);
- buf += ETH_GSTRING_LEN;
- sprintf(buf, "[%d]: missed_irqs", i);
- buf += ETH_GSTRING_LEN;
+ num_str = ARRAY_SIZE(bnxt_ring_stats_str);
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+ if (!BNXT_SUPPORTS_TPA(bp))
+ goto skip_tpa_stats;
+
+ if (bp->max_tpa_v2) {
+ num_str = ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
+ str = bnxt_ring_tpa2_stats_str;
+ } else {
+ num_str = ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ str = bnxt_ring_tpa_stats_str;
+ }
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i, str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
+skip_tpa_stats:
+ num_str = ARRAY_SIZE(bnxt_ring_sw_stats_str);
+ for (j = 0; j < num_str; j++) {
+ sprintf(buf, "[%d]: %s", i,
+ bnxt_ring_sw_stats_str[j]);
+ buf += ETH_GSTRING_LEN;
+ }
}
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
strcpy(buf, bnxt_sw_func_stats[i].string);
@@ -1643,6 +1699,11 @@ static u32 bnxt_get_link(struct net_device *dev)
return bp->link_info.link_up;
}
+static void bnxt_print_admin_err(struct bnxt *bp)
+{
+ netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
+}
+
static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
u16 ext, u16 *index, u32 *item_length,
u32 *data_length);
@@ -1682,13 +1743,8 @@ static int bnxt_flash_nvram(struct net_device *dev,
rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
- if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
- netdev_info(dev,
- "PF does not have admin privileges to flash the device\n");
- rc = -EACCES;
- } else if (rc) {
- rc = -EIO;
- }
+ if (rc == -EACCES)
+ bnxt_print_admin_err(bp);
return rc;
}
@@ -1738,13 +1794,8 @@ static int bnxt_firmware_reset(struct net_device *dev,
}
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
- netdev_info(dev,
- "PF does not have admin privileges to reset the device\n");
- rc = -EACCES;
- } else if (rc) {
- rc = -EIO;
- }
+ if (rc == -EACCES)
+ bnxt_print_admin_err(bp);
return rc;
}
@@ -2039,13 +2090,8 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
flash_pkg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
err_exit:
- if (hwrm_err == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
- netdev_info(dev,
- "PF does not have admin privileges to flash the device\n");
- rc = -EACCES;
- } else if (hwrm_err) {
- rc = -EOPNOTSUPP;
- }
+ if (hwrm_err == -EACCES)
+ bnxt_print_admin_err(bp);
return rc;
}
@@ -2584,8 +2630,6 @@ static int bnxt_set_phys_id(struct net_device *dev,
led_cfg->led_group_id = bp->leds[i].led_group_id;
}
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -3306,6 +3350,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
bnxt_get_pkgver(dev);
+ bp->num_tests = 0;
if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
return;
@@ -3315,7 +3360,9 @@ void bnxt_ethtool_init(struct bnxt *bp)
if (rc)
goto ethtool_init_exit;
- test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
+ test_info = bp->test_info;
+ if (!test_info)
+ test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
if (!test_info)
goto ethtool_init_exit;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 12bbb2a207d0..2cdef753a1bc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,7 +1,8 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2019 Broadcom Limited
+ * Copyright (c) 2014-2018 Broadcom Limited
+ * Copyright (c) 2018-2019 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -39,15 +40,15 @@ struct hwrm_resp_hdr {
#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL
#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL
-#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL
-#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL
+#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL
#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
#define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL
-#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL
+#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL
#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
-#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE
+#define TLV_TYPE_ENGINE_CKV_SRT_ECC_PUBLIC_KEY 0x8009UL
+#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_SRT_ECC_PUBLIC_KEY
/* tlv (size:64b/8B) */
@@ -267,7 +268,6 @@ struct cmd_nums {
#define HWRM_CFA_EEM_OP 0x123UL
#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
#define HWRM_CFA_TFLIB 0x125UL
- #define HWRM_ENGINE_CKV_HELLO 0x12dUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
#define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
@@ -313,6 +313,7 @@ struct cmd_nums {
#define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL
#define HWRM_FUNC_VF_BW_CFG 0x195UL
#define HWRM_FUNC_VF_BW_QCFG 0x196UL
+ #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -410,8 +411,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_RSVD 69
-#define HWRM_VERSION_STR "1.10.0.69"
+#define HWRM_VERSION_RSVD 89
+#define HWRM_VERSION_STR "1.10.0.89"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -624,6 +625,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
#define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1122,6 +1125,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
#define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
#define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
+ #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1241,6 +1245,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
#define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL
#define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL
+ #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -2916,7 +2921,7 @@ struct tx_port_stats_ext {
__le64 pfc_pri7_tx_transitions;
};
-/* rx_port_stats_ext (size:2624b/328B) */
+/* rx_port_stats_ext (size:3648b/456B) */
struct rx_port_stats_ext {
__le64 link_down_events;
__le64 continuous_pause_events;
@@ -2959,6 +2964,22 @@ struct rx_port_stats_ext {
__le64 rx_buffer_passed_threshold;
__le64 rx_pcs_symbol_err;
__le64 rx_corrected_bits;
+ __le64 rx_discard_bytes_cos0;
+ __le64 rx_discard_bytes_cos1;
+ __le64 rx_discard_bytes_cos2;
+ __le64 rx_discard_bytes_cos3;
+ __le64 rx_discard_bytes_cos4;
+ __le64 rx_discard_bytes_cos5;
+ __le64 rx_discard_bytes_cos6;
+ __le64 rx_discard_bytes_cos7;
+ __le64 rx_discard_packets_cos0;
+ __le64 rx_discard_packets_cos1;
+ __le64 rx_discard_packets_cos2;
+ __le64 rx_discard_packets_cos3;
+ __le64 rx_discard_packets_cos4;
+ __le64 rx_discard_packets_cos5;
+ __le64 rx_discard_packets_cos6;
+ __le64 rx_discard_packets_cos7;
};
/* hwrm_port_qstats_ext_input (size:320b/40B) */
@@ -6115,6 +6136,21 @@ struct hwrm_cfa_flow_alloc_output {
u8 valid;
};
+/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */
+struct hwrm_cfa_flow_alloc_cmd_err {
+ u8 code;
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL
+ #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB
+ u8 unused_0[7];
+};
+
/* hwrm_cfa_flow_free_input (size:256b/32B) */
struct hwrm_cfa_flow_free_input {
__le16 req_type;
@@ -6305,7 +6341,7 @@ struct hwrm_cfa_eem_qcaps_input {
__le32 unused_0;
};
-/* hwrm_cfa_eem_qcaps_output (size:256b/32B) */
+/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */
struct hwrm_cfa_eem_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -6322,15 +6358,17 @@ struct hwrm_cfa_eem_qcaps_output {
#define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
#define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
#define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL
__le32 max_entries_supported;
__le16 key_entry_size;
__le16 record_entry_size;
__le16 efc_entry_size;
- u8 unused_1;
+ __le16 fid_entry_size;
+ u8 unused_1[7];
u8 valid;
};
-/* hwrm_cfa_eem_cfg_input (size:320b/40B) */
+/* hwrm_cfa_eem_cfg_input (size:384b/48B) */
struct hwrm_cfa_eem_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -6350,6 +6388,9 @@ struct hwrm_cfa_eem_cfg_input {
__le16 key1_ctx_id;
__le16 record_ctx_id;
__le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ __le16 unused_2;
+ __le32 unused_3;
};
/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
@@ -6375,7 +6416,7 @@ struct hwrm_cfa_eem_qcfg_input {
__le32 unused_0;
};
-/* hwrm_cfa_eem_qcfg_output (size:192b/24B) */
+/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */
struct hwrm_cfa_eem_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -6386,7 +6427,12 @@ struct hwrm_cfa_eem_qcfg_output {
#define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
#define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
__le32 num_entries;
- u8 unused_0[7];
+ __le16 key0_ctx_id;
+ __le16 key1_ctx_id;
+ __le16 record_ctx_id;
+ __le16 efc_ctx_id;
+ __le16 fid_ctx_id;
+ u8 unused_2[5];
u8 valid;
};
@@ -6567,6 +6613,31 @@ struct ctx_hw_stats {
__le64 tpa_aborts;
};
+/* ctx_hw_stats_ext (size:1344b/168B) */
+struct ctx_hw_stats_ext {
+ __le64 rx_ucast_pkts;
+ __le64 rx_mcast_pkts;
+ __le64 rx_bcast_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_drop_pkts;
+ __le64 rx_ucast_bytes;
+ __le64 rx_mcast_bytes;
+ __le64 rx_bcast_bytes;
+ __le64 tx_ucast_pkts;
+ __le64 tx_mcast_pkts;
+ __le64 tx_bcast_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_drop_pkts;
+ __le64 tx_ucast_bytes;
+ __le64 tx_mcast_bytes;
+ __le64 tx_bcast_bytes;
+ __le64 rx_tpa_eligible_pkt;
+ __le64 rx_tpa_eligible_bytes;
+ __le64 rx_tpa_pkt;
+ __le64 rx_tpa_bytes;
+ __le64 rx_tpa_errors;
+};
+
/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
struct hwrm_stat_ctx_alloc_input {
__le16 req_type;
@@ -6578,7 +6649,8 @@ struct hwrm_stat_ctx_alloc_input {
__le32 update_period_ms;
u8 stat_ctx_flags;
#define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
- u8 unused_0[3];
+ u8 unused_0;
+ __le16 stats_dma_length;
};
/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
@@ -7204,7 +7276,9 @@ struct coredump_segment_record {
u8 version_hi;
u8 version_low;
u8 seg_flags;
- u8 unused_0[7];
+ u8 compress_flags;
+ #define SFLAG_COMPRESSED_ZLIB 0x1UL
+ u8 unused_0[6];
};
/* hwrm_dbg_coredump_list_input (size:256b/32B) */
@@ -7729,6 +7803,9 @@ struct hwrm_nvm_set_variable_input {
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1)
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1)
#define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4
+ #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL
u8 unused_0;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 2b90a2bb1a1d..f6f3454d6059 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -25,7 +25,6 @@
static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
struct bnxt_vf_info *vf, u16 event_id)
{
- struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_fwd_async_event_cmpl_input req = {0};
struct hwrm_async_event_cmpl *async_cmpl;
int rc = 0;
@@ -40,23 +39,10 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
async_cmpl->event_id = cpu_to_le16(event_id);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
rc);
- goto fwd_async_event_cmpl_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-fwd_async_event_cmpl_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -133,7 +119,7 @@ static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
mutex_unlock(&bp->hwrm_cmd_lock);
- return -EIO;
+ return rc;
}
vf->func_qcfg_flags = le16_to_cpu(resp->flags);
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -164,9 +150,7 @@ static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
else
req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
- return -EIO;
- return 0;
+ return rc;
}
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
@@ -486,10 +470,43 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+/* Caller holds bp->hwrm_cmd_lock mutex lock */
+static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
+{
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt_vf_info *vf;
+
+ vf = &bp->pf.vf[vf_id];
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(vf->fw_fid);
+ req.flags = cpu_to_le32(vf->func_flags);
+
+ if (is_valid_ether_addr(vf->mac_addr)) {
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN);
+ }
+ if (vf->vlan) {
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
+ req.dflt_vlan = cpu_to_le16(vf->vlan);
+ }
+ if (vf->max_tx_rate) {
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
+ req.max_bw = cpu_to_le32(vf->max_tx_rate);
+#ifdef HAVE_IFLA_TX_RATE
+ req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
+ req.min_bw = cpu_to_le32(vf->min_tx_rate);
+#endif
+ }
+ if (vf->flags & BNXT_VF_TRUST)
+ req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
+
+ _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
/* Only called by PF to reserve resources for VFs, returns actual number of
* VFs configured, or < 0 on error.
*/
-static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
+static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
{
struct hwrm_func_vf_resource_cfg_input req = {0};
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@@ -561,13 +578,14 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < num_vfs; i++) {
+ if (reset)
+ __bnxt_set_vf_params(bp, i);
+
req.vf_id = cpu_to_le16(pf->first_vf_id + i);
rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT);
- if (rc) {
- rc = -ENOMEM;
+ if (rc)
break;
- }
pf->active_vfs = i + 1;
pf->vf[i].fw_fid = pf->first_vf_id + i;
}
@@ -664,8 +682,6 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
total_vf_tx_rings += vf_tx_rsvd;
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (rc)
- rc = -ENOMEM;
if (pf->active_vfs) {
hw_resc->max_tx_rings -= total_vf_tx_rings;
hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
@@ -679,14 +695,40 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
return rc;
}
-static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
+static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
{
if (BNXT_NEW_RM(bp))
- return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
+ return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
else
return bnxt_hwrm_func_cfg(bp, num_vfs);
}
+int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
+{
+ int rc;
+
+ /* Register buffers for VFs */
+ rc = bnxt_hwrm_func_buf_rgtr(bp);
+ if (rc)
+ return rc;
+
+ /* Reserve resources for VFs */
+ rc = bnxt_func_cfg(bp, *num_vfs, reset);
+ if (rc != *num_vfs) {
+ if (rc <= 0) {
+ netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
+ *num_vfs = 0;
+ return rc;
+ }
+ netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
+ rc);
+ *num_vfs = rc;
+ }
+
+ bnxt_ulp_sriov_cfg(bp, *num_vfs);
+ return 0;
+}
+
static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
{
int rc = 0, vfs_supported;
@@ -752,25 +794,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
if (rc)
goto err_out1;
- /* Reserve resources for VFs */
- rc = bnxt_func_cfg(bp, *num_vfs);
- if (rc != *num_vfs) {
- if (rc <= 0) {
- netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
- *num_vfs = 0;
- goto err_out2;
- }
- netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
- *num_vfs = rc;
- }
-
- /* Register buffers for VFs */
- rc = bnxt_hwrm_func_buf_rgtr(bp);
+ rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
if (rc)
goto err_out2;
- bnxt_ulp_sriov_cfg(bp, *num_vfs);
-
rc = pci_enable_sriov(bp->pdev, *num_vfs);
if (rc)
goto err_out2;
@@ -837,6 +864,11 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
rtnl_unlock();
return 0;
}
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
+ rtnl_unlock();
+ return 0;
+ }
bp->sriov_cfg = true;
rtnl_unlock();
@@ -870,7 +902,6 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
{
int rc = 0;
struct hwrm_fwd_resp_input req = {0};
- struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
@@ -885,22 +916,9 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
req.encap_resp_cmpl_ring = encap_resp_cpr;
memcpy(req.encap_resp, encap_resp, msg_size);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
- goto fwd_resp_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-fwd_resp_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -909,7 +927,6 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
{
int rc = 0;
struct hwrm_reject_fwd_resp_input req = {0};
- struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
@@ -920,22 +937,9 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
- goto fwd_err_resp_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-fwd_err_resp_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -944,7 +948,6 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
{
int rc = 0;
struct hwrm_exec_fwd_resp_input req = {0};
- struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
return -EINVAL;
@@ -955,22 +958,9 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
- mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-
- if (rc) {
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
- goto exec_fwd_resp_exit;
- }
-
- if (resp->error_code) {
- netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
- resp->error_code);
- rc = -1;
- }
-
-exec_fwd_resp_exit:
- mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -1190,6 +1180,13 @@ mac_done:
}
#else
+int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
+{
+ if (*num_vfs)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
void bnxt_sriov_disable(struct bnxt *bp)
{
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 2eed9eda1195..629641bf6fc5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -36,6 +36,7 @@ int bnxt_set_vf_link_state(struct net_device *, int, int);
int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index dd621f6bd127..c8062d020d1e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -319,8 +319,6 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -515,11 +513,6 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
-
- if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
- rc = -ENOSPC;
- else if (rc)
- rc = -EIO;
return rc;
}
@@ -591,8 +584,6 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -609,8 +600,6 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -660,8 +649,6 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -678,8 +665,6 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp,
if (rc)
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
- if (rc)
- rc = -EIO;
return rc;
}
@@ -1457,8 +1442,6 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
}
mutex_unlock(&bp->hwrm_cmd_lock);
- if (rc)
- rc = -EIO;
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index fc77caf0a076..b2c160947fc8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -226,6 +226,9 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
struct input *req;
int rc;
+ if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state)
+ return -EBUSY;
+
mutex_lock(&bp->hwrm_cmd_lock);
req = fw_msg->msg;
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 57dc3cbff36e..155599dcee76 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4096,12 +4096,16 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
u32 port_id;
+ int i;
cp->csk_tbl = kvcalloc(MAX_CM_SK_TBL_SZ, sizeof(struct cnic_sock),
GFP_KERNEL);
if (!cp->csk_tbl)
return -ENOMEM;
+ for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
+ atomic_set(&cp->csk_tbl[i].ref_count, 0);
+
port_id = prandom_u32();
port_id %= CNIC_LOCAL_PORT_RANGE;
if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
@@ -5480,6 +5484,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
cdev->unregister_device = cnic_unregister_device;
cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
+ atomic_set(&cdev->ref_count, 0);
cp = cdev->cnic_priv;
cp->dev = cdev;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b22196880d6d..12cb77ef1081 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2516,19 +2516,14 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
struct netdev_queue *txq;
- struct sk_buff *skb;
- struct enet_cb *cb;
int i;
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
- for (i = 0; i < priv->num_tx_bds; i++) {
- cb = priv->tx_cbs + i;
- skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb);
- if (skb)
- dev_kfree_skb(skb);
- }
+ for (i = 0; i < priv->num_tx_bds; i++)
+ dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
+ priv->tx_cbs + i));
for (i = 0; i < priv->hw_params->tx_queues; i++) {
txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
@@ -3438,7 +3433,6 @@ static int bcmgenet_probe(struct platform_device *pdev)
struct bcmgenet_priv *priv;
struct net_device *dev;
const void *macaddr;
- struct resource *r;
unsigned int i;
int err = -EIO;
const char *phy_mode_str;
@@ -3478,8 +3472,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
macaddr = pd->mac_address;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, r);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
err = PTR_ERR(priv->base);
goto err;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 4c404d2213f9..77f3511b97de 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -18041,8 +18041,7 @@ static void tg3_remove_one(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int tg3_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct tg3 *tp = netdev_priv(dev);
int err = 0;
@@ -18098,8 +18097,7 @@ unlock:
static int tg3_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct tg3 *tp = netdev_priv(dev);
int err = 0;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7767ae6fa1fd..e338272931d1 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3032,7 +3032,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
head_unmap->nvecs++;
for (i = 0, vect_id = 0; i < vectors - 1; i++) {
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 size = skb_frag_size(frag);
if (unlikely(size == 0)) {
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 99f49d059414..f96a42af1014 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1104,7 +1104,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ len = skb_frag_size(frag);
paddr = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index eab805579f96..7f3b2e3b0868 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1492,11 +1492,11 @@ static void free_netsgbuf(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -1535,11 +1535,11 @@ static void free_netsgbuf_with_resp(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -2424,7 +2424,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
} else {
int i, frags;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct octnic_gather *g;
spin_lock(&lio->glist_lock[q_idx]);
@@ -2462,11 +2462,9 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[i - 1];
g->sg[(i >> 2)].ptr[(i & 3)] =
- dma_map_page(&oct->pci_dev->dev,
- frag->page.p,
- frag->page_offset,
- frag->size,
- DMA_TO_DEVICE);
+ skb_frag_dma_map(&oct->pci_dev->dev,
+ frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
if (dma_mapping_error(&oct->pci_dev->dev,
g->sg[i >> 2].ptr[i & 3])) {
@@ -2478,7 +2476,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[j - 1];
dma_unmap_page(&oct->pci_dev->dev,
g->sg[j >> 2].ptr[j & 3],
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
}
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
@@ -2486,7 +2484,8 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+ add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
+ (i & 3));
i++;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index db0b90555acb..370d76822ee0 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -837,11 +837,11 @@ static void free_netsgbuf(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -881,11 +881,11 @@ static void free_netsgbuf_with_resp(void *buf)
i = 1;
while (frags--) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
pci_unmap_page((lio->oct_dev)->pci_dev,
g->sg[(i >> 2)].ptr[(i & 3)],
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag), DMA_TO_DEVICE);
i++;
}
@@ -1497,7 +1497,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
ndata.reqtype = REQTYPE_NORESP_NET;
} else {
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct octnic_gather *g;
int i, frags;
@@ -1535,11 +1535,9 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[i - 1];
g->sg[(i >> 2)].ptr[(i & 3)] =
- dma_map_page(&oct->pci_dev->dev,
- frag->page.p,
- frag->page_offset,
- frag->size,
- DMA_TO_DEVICE);
+ skb_frag_dma_map(&oct->pci_dev->dev,
+ frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
if (dma_mapping_error(&oct->pci_dev->dev,
g->sg[i >> 2].ptr[i & 3])) {
dma_unmap_single(&oct->pci_dev->dev,
@@ -1550,7 +1548,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag = &skb_shinfo(skb)->frags[j - 1];
dma_unmap_page(&oct->pci_dev->dev,
g->sg[j >> 2].ptr[j & 3],
- frag->size,
+ skb_frag_size(frag),
DMA_TO_DEVICE);
}
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
@@ -1558,7 +1556,8 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
+ add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
+ (i & 3));
i++;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
index 021d99cd1665..614d07be7181 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -260,9 +260,7 @@ static int octeon_mbox_process_cmd(struct octeon_mbox *mbox,
dev_info(&oct->pci_dev->dev,
"got a request for FLR from VF that owns DPI ring %u\n",
mbox->q_no);
- pcie_capability_set_word(
- oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no],
- PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+ pcie_flr(oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no]);
break;
case OCTEON_PF_CHANGED_VF_MACADDR:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 192bc92da881..4ab57d33a87e 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1588,15 +1588,13 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
goto doorbell;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
qentry = nicvf_get_nxt_sqentry(sq, qentry);
size = skb_frag_size(frag);
dma_addr = dma_map_page_attrs(&nic->pdev->dev,
skb_frag_page(frag),
- frag->page_offset, size,
+ skb_frag_off(frag), size,
DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 89db739b7819..6dabbf1502c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2132,7 +2132,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
struct port_info *pi = netdev_priv(qs->netdev);
struct sk_buff *skb = NULL;
struct cpl_rx_pkt *cpl;
- struct skb_frag_struct *rx_frag;
+ skb_frag_t *rx_frag;
int nr_frags;
int offset = 0;
@@ -2182,7 +2182,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
rx_frag += nr_frags;
__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
- rx_frag->page_offset = sd->pg_chunk.offset + offset;
+ skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset);
skb_frag_size_set(rx_frag, len);
skb->len += len;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index d692251ee252..ae6a47dd7dc9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3531,7 +3531,6 @@ int t4_setup_debugfs(struct adapter *adap)
{
int i;
u32 size = 0;
- struct dentry *de;
static struct t4_debugfs_entry t4_debugfs_files[] = {
{ "cim_la", &cim_la_fops, 0400, 0 },
@@ -3642,8 +3641,8 @@ int t4_setup_debugfs(struct adapter *adap)
}
}
- de = debugfs_create_file_size("flash", 0400, adap->debugfs_root, adap,
- &flash_debugfs_fops, adap->params.sf_size);
+ debugfs_create_file_size("flash", 0400, adap->debugfs_root, adap,
+ &flash_debugfs_fops, adap->params.sf_size);
debugfs_create_bool("use_backdoor", 0600,
adap->debugfs_root, &adap->use_bd);
debugfs_create_bool("trace_rss", 0600,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 4311ad9c84b2..71854a19cebe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6269,10 +6269,7 @@ static int __init cxgb4_init_module(void)
{
int ret;
- /* Debugfs support is optional, just warn if this fails */
cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!cxgb4_debugfs_root)
- pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4_driver);
if (ret < 0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c
index eaf1fb74689c..01c65d13fc0e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c
@@ -57,7 +57,7 @@ struct smt_data *t4_init_smt(void)
s->smtab[i].state = SMT_STATE_UNUSED;
memset(&s->smtab[i].src_mac, 0, ETH_ALEN);
spin_lock_init(&s->smtab[i].lock);
- atomic_set(&s->smtab[i].refcnt, 0);
+ s->smtab[i].refcnt = 0;
}
return s;
}
@@ -68,7 +68,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
struct smt_entry *e, *end;
for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
- if (atomic_read(&e->refcnt) == 0) {
+ if (e->refcnt == 0) {
if (!first_free)
first_free = e;
} else {
@@ -97,11 +97,9 @@ found_reuse:
static void t4_smte_free(struct smt_entry *e)
{
- spin_lock_bh(&e->lock);
- if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
+ if (e->refcnt == 0) { /* hasn't been recycled */
e->state = SMT_STATE_UNUSED;
}
- spin_unlock_bh(&e->lock);
}
/**
@@ -111,8 +109,10 @@ static void t4_smte_free(struct smt_entry *e)
*/
void cxgb4_smt_release(struct smt_entry *e)
{
- if (atomic_dec_and_test(&e->refcnt))
+ spin_lock_bh(&e->lock);
+ if ((--e->refcnt) == 0)
t4_smte_free(e);
+ spin_unlock_bh(&e->lock);
}
EXPORT_SYMBOL(cxgb4_smt_release);
@@ -215,14 +215,14 @@ static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
e = find_or_alloc_smte(s, smac);
if (e) {
spin_lock(&e->lock);
- if (!atomic_read(&e->refcnt)) {
- atomic_set(&e->refcnt, 1);
+ if (!e->refcnt) {
+ e->refcnt = 1;
e->state = SMT_STATE_SWITCHING;
e->pfvf = pfvf;
memcpy(e->src_mac, smac, ETH_ALEN);
write_smt_entry(adap, e);
} else {
- atomic_inc(&e->refcnt);
+ ++e->refcnt;
}
spin_unlock(&e->lock);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.h b/drivers/net/ethernet/chelsio/cxgb4/smt.h
index d6c2cc271398..1268d6e93a47 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.h
@@ -59,7 +59,7 @@ struct smt_entry {
u16 idx;
u16 pfvf;
u8 src_mac[ETH_ALEN];
- atomic_t refcnt;
+ int refcnt;
spinlock_t lock; /* protect smt entry add,removal */
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 6d4cf3d0b2f0..f6fc0875d5b0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2478,11 +2478,10 @@ static int setup_debugfs(struct adapter *adapter)
* Debugfs support is best effort.
*/
for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
- (void)debugfs_create_file(debugfs_files[i].name,
- debugfs_files[i].mode,
- adapter->debugfs_root,
- (void *)adapter,
- debugfs_files[i].fops);
+ debugfs_create_file(debugfs_files[i].name,
+ debugfs_files[i].mode,
+ adapter->debugfs_root, (void *)adapter,
+ debugfs_files[i].fops);
return 0;
}
@@ -3257,11 +3256,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
adapter->debugfs_root =
debugfs_create_dir(pci_name(pdev),
cxgb4vf_debugfs_root);
- if (IS_ERR_OR_NULL(adapter->debugfs_root))
- dev_warn(&pdev->dev, "could not create debugfs"
- " directory");
- else
- setup_debugfs(adapter);
+ setup_debugfs(adapter);
}
/*
@@ -3486,13 +3481,11 @@ static int __init cxgb4vf_module_init(void)
return -EINVAL;
}
- /* Debugfs support is optional, just warn if this fails */
+ /* Debugfs support is optional, debugfs will warn if this fails */
cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
- pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4vf_driver);
- if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
+ if (ret < 0)
debugfs_remove(cxgb4vf_debugfs_root);
return ret;
}
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index b3e7fafee3df..c9aebcde403a 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1844,16 +1844,12 @@ cleanup_module(void)
static int __init cs89x0_platform_probe(struct platform_device *pdev)
{
struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- struct net_local *lp;
- struct resource *mem_res;
void __iomem *virt_addr;
int err;
if (!dev)
return -ENOMEM;
- lp = netdev_priv(dev);
-
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq <= 0) {
dev_warn(&dev->dev, "interrupt resource missing\n");
@@ -1861,8 +1857,7 @@ static int __init cs89x0_platform_probe(struct platform_device *pdev)
goto free;
}
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- virt_addr = devm_ioremap_resource(&pdev->dev, mem_res);
+ virt_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(virt_addr)) {
err = PTR_ERR(virt_addr);
goto free;
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 9003eb6716cd..e736ce2c58ca 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1182,9 +1182,8 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
buflen = skb_headlen(skb);
} else {
skb_frag = skb_si->frags + frag;
- buffer = page_address(skb_frag_page(skb_frag)) +
- skb_frag->page_offset;
- buflen = skb_frag->size;
+ buffer = skb_frag_address(skb_frag);
+ buflen = skb_frag_size(skb_frag);
}
if (frag == last_frag) {
@@ -2423,10 +2422,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
/* Interrupt */
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(dev, "no IRQ\n");
+ if (irq <= 0)
return irq ? irq : -ENODEV;
- }
port->irq = irq;
/* Clock the port */
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 386bdc1378d1..cce90b5925d9 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1500,8 +1500,6 @@ dm9000_probe(struct platform_device *pdev)
ndev->irq = platform_get_irq(pdev, 0);
if (ndev->irq < 0) {
- dev_err(db->dev, "interrupt resource unavailable: %d\n",
- ndev->irq);
ret = ndev->irq;
goto out;
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index f287b5da5546..cf3e6f2892ff 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -192,7 +192,6 @@ struct be_eq_obj {
} ____cacheline_aligned_in_smp;
struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
- bool enable;
u32 min_eqd; /* in usecs */
u32 max_eqd; /* in usecs */
u32 prev_eqd; /* in usecs */
@@ -589,6 +588,7 @@ struct be_adapter {
struct be_drv_stats drv_stats;
struct be_aic_obj aic_obj[MAX_EVT_QS];
+ bool aic_enabled;
u8 vlan_prio_bmap; /* Available Priority BitMap */
u16 recommended_prio_bits;/* Recommended Priority bits in vlan tag */
struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 492f8769ac12..5bb5abf99588 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -329,8 +329,8 @@ static int be_get_coalesce(struct net_device *netdev,
et->tx_coalesce_usecs_high = aic->max_eqd;
et->tx_coalesce_usecs_low = aic->min_eqd;
- et->use_adaptive_rx_coalesce = aic->enable;
- et->use_adaptive_tx_coalesce = aic->enable;
+ et->use_adaptive_rx_coalesce = adapter->aic_enabled;
+ et->use_adaptive_tx_coalesce = adapter->aic_enabled;
return 0;
}
@@ -346,8 +346,9 @@ static int be_set_coalesce(struct net_device *netdev,
struct be_eq_obj *eqo;
int i;
+ adapter->aic_enabled = et->use_adaptive_rx_coalesce;
+
for_all_evt_queues(adapter, eqo, i) {
- aic->enable = et->use_adaptive_rx_coalesce;
aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 4d8e40ac66d2..39eb7d525043 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1014,7 +1014,7 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
@@ -2147,7 +2147,7 @@ static int be_get_new_eqd(struct be_eq_obj *eqo)
int i;
aic = &adapter->aic_obj[eqo->idx];
- if (!aic->enable) {
+ if (!adapter->aic_enabled) {
if (aic->jiffies)
aic->jiffies = 0;
eqd = aic->et_eqd;
@@ -2204,7 +2204,7 @@ static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
int eqd;
u32 mult_enc;
- if (!aic->enable)
+ if (!adapter->aic_enabled)
return 0;
if (jiffies_to_msecs(now - aic->jiffies) < 1)
@@ -2346,8 +2346,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
memcpy(skb->data, start, hdr_len);
skb_shinfo(skb)->nr_frags = 1;
skb_frag_set_page(skb, 0, page_info->page);
- skb_shinfo(skb)->frags[0].page_offset =
- page_info->page_offset + hdr_len;
+ skb_frag_off_set(&skb_shinfo(skb)->frags[0],
+ page_info->page_offset + hdr_len);
skb_frag_size_set(&skb_shinfo(skb)->frags[0],
curr_frag_len - hdr_len);
skb->data_len = curr_frag_len - hdr_len;
@@ -2372,8 +2372,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
/* Fresh page */
j++;
skb_frag_set_page(skb, j, page_info->page);
- skb_shinfo(skb)->frags[j].page_offset =
- page_info->page_offset;
+ skb_frag_off_set(&skb_shinfo(skb)->frags[j],
+ page_info->page_offset);
skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
skb_shinfo(skb)->nr_frags++;
} else {
@@ -2454,8 +2454,8 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
/* First frag or Fresh page */
j++;
skb_frag_set_page(skb, j, page_info->page);
- skb_shinfo(skb)->frags[j].page_offset =
- page_info->page_offset;
+ skb_frag_off_set(&skb_shinfo(skb)->frags[j],
+ page_info->page_offset);
skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
} else {
put_page(page_info->page);
@@ -2959,6 +2959,8 @@ static int be_evt_queues_create(struct be_adapter *adapter)
max(adapter->cfg_num_rx_irqs,
adapter->cfg_num_tx_irqs));
+ adapter->aic_enabled = true;
+
for_all_evt_queues(adapter, eqo, i) {
int numa_node = dev_to_node(&adapter->pdev->dev);
@@ -2966,7 +2968,6 @@ static int be_evt_queues_create(struct be_adapter *adapter)
eqo->adapter = adapter;
eqo->idx = i;
aic->max_eqd = BE_MAX_EQD;
- aic->enable = true;
eq = &eqo->q;
rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 027225e1ade2..815fb62c4b02 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -576,7 +576,6 @@ static s32 nps_enet_probe(struct platform_device *pdev)
struct nps_enet_priv *priv;
s32 err = 0;
const char *mac_addr;
- struct resource *res_regs;
if (!dev->of_node)
return -ENODEV;
@@ -595,8 +594,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
/* FIXME :: no multicast support yet */
ndev->flags &= ~IFF_MULTICAST;
- res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs_base = devm_ioremap_resource(dev, res_regs);
+ priv->regs_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs_base)) {
err = PTR_ERR(priv->regs_base);
goto out_netdev;
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index a9b105803fb7..73e4f2648e49 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -32,6 +32,7 @@ config FTGMAC100
depends on ARM || NDS32 || COMPILE_TEST
depends on !64BIT || BROKEN
select PHYLIB
+ select MDIO_ASPEED if MACH_ASPEED_G6
---help---
This driver supports the FTGMAC100 Gigabit Ethernet controller
from Faraday. It is used on Faraday A369, Andes AG102 and some
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 030fed65393e..9b7af94a40bb 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -774,7 +775,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- len = frag->size;
+ len = skb_frag_size(frag);
/* Map it */
map = skb_frag_dma_map(priv->dev, frag, 0, len,
@@ -1619,8 +1620,13 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
if (!priv->mii_bus)
return -EIO;
- if (priv->is_aspeed) {
- /* This driver supports the old MDIO interface */
+ if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2500-mac")) {
+ /* The AST2600 has a separate MDIO controller */
+
+ /* For the AST2400 and AST2500 this driver only supports the
+ * old MDIO interface
+ */
reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
@@ -1797,7 +1803,8 @@ static int ftgmac100_probe(struct platform_device *pdev)
np = pdev->dev.of_node;
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
- of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
+ of_device_is_compatible(np, "aspeed,ast2500-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2600-mac"))) {
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
priv->is_aspeed = true;
@@ -1817,7 +1824,29 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
if (!priv->ndev)
goto err_ncsi_dev;
- } else {
+ } else if (np && of_get_property(np, "phy-handle", NULL)) {
+ struct phy_device *phy;
+
+ phy = of_phy_get_and_connect(priv->netdev, np,
+ &ftgmac100_adjust_link);
+ if (!phy) {
+ dev_err(&pdev->dev, "Failed to connect to phy\n");
+ goto err_setup_mdio;
+ }
+
+ /* Indicate that we support PAUSE frames (see comment in
+ * Documentation/networking/phy.txt)
+ */
+ phy_support_asym_pause(phy);
+
+ /* Display what we found */
+ phy_attached_info(phy);
+ } else if (np && !of_get_child_by_name(np, "mdio")) {
+ /* Support legacy ASPEED devicetree descriptions that decribe a
+ * MAC with an embedded MDIO controller but have no "mdio"
+ * child node. Automatically scan the MDIO bus for available
+ * PHYs.
+ */
priv->use_ncsi = false;
err = ftgmac100_setup_mdio(netdev);
if (err)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index f38c3fa7d705..b4b82b9c5cd6 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -485,7 +485,7 @@ static struct dpaa_bp *dpaa_bpid2pool(int bpid)
static bool dpaa_bpid2pool_use(int bpid)
{
if (dpaa_bpid2pool(bpid)) {
- atomic_inc(&dpaa_bp_array[bpid]->refs);
+ refcount_inc(&dpaa_bp_array[bpid]->refs);
return true;
}
@@ -496,7 +496,7 @@ static bool dpaa_bpid2pool_use(int bpid)
static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
{
dpaa_bp_array[bpid] = dpaa_bp;
- atomic_set(&dpaa_bp->refs, 1);
+ refcount_set(&dpaa_bp->refs, 1);
}
static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
@@ -584,7 +584,7 @@ static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
if (!bp)
return;
- if (!atomic_dec_and_test(&bp->refs))
+ if (!refcount_dec_and_test(&bp->refs))
return;
if (bp->free_buf_cb)
@@ -1958,7 +1958,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
/* populate the rest of SGT entries */
for (i = 0; i < nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
- frag_len = frag->size;
+ frag_len = skb_frag_size(frag);
WARN_ON(!skb_frag_page(frag));
addr = skb_frag_dma_map(dev, frag, 0,
frag_len, dma_dir);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index af320f83c742..f7e59e8db075 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -32,6 +32,7 @@
#define __DPAA_H
#include <linux/netdevice.h>
+#include <linux/refcount.h>
#include <soc/fsl/qman.h>
#include <soc/fsl/bman.h>
@@ -99,7 +100,7 @@ struct dpaa_bp {
int (*seed_cb)(struct dpaa_bp *);
/* bpool can be emptied before freeing by this cb */
void (*free_buf_cb)(const struct dpaa_bp *, struct bm_buffer *);
- atomic_t refs;
+ refcount_t refs;
};
struct dpaa_rx_errors {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
index a027f4a9d0cc..a9afe46b837f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -164,70 +164,30 @@ static const struct file_operations dpaa2_dbg_ch_ops = {
void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
{
- if (!dpaa2_dbg_root)
- return;
+ struct dentry *dir;
/* Create a directory for the interface */
- priv->dbg.dir = debugfs_create_dir(priv->net_dev->name,
- dpaa2_dbg_root);
- if (!priv->dbg.dir) {
- netdev_err(priv->net_dev, "debugfs_create_dir() failed\n");
- return;
- }
+ dir = debugfs_create_dir(priv->net_dev->name, dpaa2_dbg_root);
+ priv->dbg.dir = dir;
/* per-cpu stats file */
- priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444,
- priv->dbg.dir, priv,
- &dpaa2_dbg_cpu_ops);
- if (!priv->dbg.cpu_stats) {
- netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
- goto err_cpu_stats;
- }
+ debugfs_create_file("cpu_stats", 0444, dir, priv, &dpaa2_dbg_cpu_ops);
/* per-fq stats file */
- priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444,
- priv->dbg.dir, priv,
- &dpaa2_dbg_fq_ops);
- if (!priv->dbg.fq_stats) {
- netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
- goto err_fq_stats;
- }
+ debugfs_create_file("fq_stats", 0444, dir, priv, &dpaa2_dbg_fq_ops);
/* per-fq stats file */
- priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444,
- priv->dbg.dir, priv,
- &dpaa2_dbg_ch_ops);
- if (!priv->dbg.fq_stats) {
- netdev_err(priv->net_dev, "debugfs_create_file() failed\n");
- goto err_ch_stats;
- }
-
- return;
-
-err_ch_stats:
- debugfs_remove(priv->dbg.fq_stats);
-err_fq_stats:
- debugfs_remove(priv->dbg.cpu_stats);
-err_cpu_stats:
- debugfs_remove(priv->dbg.dir);
+ debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_ops);
}
void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
{
- debugfs_remove(priv->dbg.fq_stats);
- debugfs_remove(priv->dbg.ch_stats);
- debugfs_remove(priv->dbg.cpu_stats);
- debugfs_remove(priv->dbg.dir);
+ debugfs_remove_recursive(priv->dbg.dir);
}
void dpaa2_eth_dbg_init(void)
{
dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL);
- if (!dpaa2_dbg_root) {
- pr_err("DPAA2-ETH: debugfs create failed\n");
- return;
- }
-
pr_debug("DPAA2-ETH: debugfs created\n");
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
index 4f63de997a26..15598b28f03b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h
@@ -11,9 +11,6 @@ struct dpaa2_eth_priv;
struct dpaa2_debugfs {
struct dentry *dir;
- struct dentry *fq_stats;
- struct dentry *ch_stats;
- struct dentry *cpu_stats;
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 0acb11557ed1..5402867272be 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1208,9 +1208,37 @@ static void disable_ch_napi(struct dpaa2_eth_priv *priv)
}
}
+static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
+{
+ struct dpni_taildrop td = {0};
+ int i, err;
+
+ if (priv->rx_td_enabled == enable)
+ return;
+
+ td.enable = enable;
+ td.threshold = DPAA2_ETH_TAILDROP_THRESH;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ if (priv->fq[i].type != DPAA2_RX_FQ)
+ continue;
+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+ DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
+ priv->fq[i].flowid, &td);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_taildrop() failed\n");
+ break;
+ }
+ }
+
+ priv->rx_td_enabled = enable;
+}
+
static int link_state_update(struct dpaa2_eth_priv *priv)
{
struct dpni_link_state state = {0};
+ bool tx_pause;
int err;
err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
@@ -1220,11 +1248,18 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
return err;
}
+ /* If Tx pause frame settings have changed, we need to update
+ * Rx FQ taildrop configuration as well. We configure taildrop
+ * only when pause frame generation is disabled.
+ */
+ tx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE) ^
+ !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
+ dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
+
/* Chech link state; speed / duplex changes are not treated yet */
if (priv->link_state.up == state.up)
- return 0;
+ goto out;
- priv->link_state = state;
if (state.up) {
netif_carrier_on(priv->net_dev);
netif_tx_start_all_queues(priv->net_dev);
@@ -1236,6 +1271,9 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
netdev_info(priv->net_dev, "Link Event: state %s\n",
state.up ? "up" : "down");
+out:
+ priv->link_state = state;
+
return 0;
}
@@ -2443,6 +2481,33 @@ static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
priv->enqueue = dpaa2_eth_enqueue_fq;
}
+static int set_pause(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_link_cfg link_cfg = {0};
+ int err;
+
+ /* Get the default link options so we don't override other flags */
+ err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+ if (err) {
+ dev_err(dev, "dpni_get_link_cfg() failed\n");
+ return err;
+ }
+
+ /* By default, enable both Rx and Tx pause frames */
+ link_cfg.options |= DPNI_LINK_OPT_PAUSE;
+ link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_link_cfg() failed\n");
+ return err;
+ }
+
+ priv->link_state.options = link_cfg.options;
+
+ return 0;
+}
+
/* Configure the DPNI object this interface is associated with */
static int setup_dpni(struct fsl_mc_device *ls_dev)
{
@@ -2498,6 +2563,13 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
set_enqueue_mode(priv);
+ /* Enable pause frame support */
+ if (dpaa2_eth_has_pause_support(priv)) {
+ err = set_pause(priv);
+ if (err)
+ goto close;
+ }
+
priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
dpaa2_eth_fs_count(priv), GFP_KERNEL);
if (!priv->cls_rules)
@@ -2529,7 +2601,6 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
struct device *dev = priv->net_dev->dev.parent;
struct dpni_queue queue;
struct dpni_queue_id qid;
- struct dpni_taildrop td;
int err;
err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
@@ -2554,15 +2625,6 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
return err;
}
- td.enable = 1;
- td.threshold = DPAA2_ETH_TAILDROP_THRESH;
- err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
- DPNI_QUEUE_RX, 0, fq->flowid, &td);
- if (err) {
- dev_err(dev, "dpni_set_threshold() failed\n");
- return err;
- }
-
/* xdp_rxq setup */
err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
fq->flowid);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 9af18c24221f..8a0e65b3267f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -392,6 +392,7 @@ struct dpaa2_eth_priv {
struct dpaa2_eth_drv_stats __percpu *percpu_extras;
u16 mc_token;
+ u8 rx_td_enabled;
struct dpni_link_state link_state;
bool do_link_poll;
@@ -476,6 +477,12 @@ enum dpaa2_eth_rx_dist {
#define DPAA2_ETH_DIST_L4DST BIT(8)
#define DPAA2_ETH_DIST_ALL (~0ULL)
+#define DPNI_PAUSE_VER_MAJOR 7
+#define DPNI_PAUSE_VER_MINOR 13
+#define dpaa2_eth_has_pause_support(priv) \
+ (dpaa2_eth_cmp_dpni_ver((priv), DPNI_PAUSE_VER_MAJOR, \
+ DPNI_PAUSE_VER_MINOR) >= 0)
+
static inline
unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 7b182f4b263c..93076fe01b45 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -78,71 +78,67 @@ static int
dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *link_settings)
{
- struct dpni_link_state state = {0};
- int err = 0;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
- if (err) {
- netdev_err(net_dev, "ERROR %d getting link state\n", err);
- goto out;
- }
-
- /* At the moment, we have no way of interrogating the DPMAC
- * from the DPNI side - and for that matter there may exist
- * no DPMAC at all. So for now we just don't report anything
- * beyond the DPNI attributes.
- */
- if (state.options & DPNI_LINK_OPT_AUTONEG)
- link_settings->base.autoneg = AUTONEG_ENABLE;
- if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX))
+ link_settings->base.autoneg = AUTONEG_DISABLE;
+ if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
link_settings->base.duplex = DUPLEX_FULL;
- link_settings->base.speed = state.rate;
+ link_settings->base.speed = priv->link_state.rate;
-out:
- return err;
+ return 0;
}
-#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7
-#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1
-static int
-dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
- const struct ethtool_link_ksettings *link_settings)
+static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
{
- struct dpni_link_cfg cfg = {0};
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- int err = 0;
+ u64 link_options = priv->link_state.options;
- /* If using an older MC version, the DPNI must be down
- * in order to be able to change link settings. Taking steps to let
- * the user know that.
- */
- if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR,
- DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) {
- if (netif_running(net_dev)) {
- netdev_info(net_dev, "Interface must be brought down first.\n");
- return -EACCES;
- }
+ pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE);
+ pause->tx_pause = pause->rx_pause ^
+ !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
+ pause->autoneg = AUTONEG_DISABLE;
+}
+
+static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ struct dpni_link_cfg cfg = {0};
+ int err;
+
+ if (!dpaa2_eth_has_pause_support(priv)) {
+ netdev_info(net_dev, "No pause frame support for DPNI version < %d.%d\n",
+ DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR);
+ return -EOPNOTSUPP;
}
- cfg.rate = link_settings->base.speed;
- if (link_settings->base.autoneg == AUTONEG_ENABLE)
- cfg.options |= DPNI_LINK_OPT_AUTONEG;
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ cfg.rate = priv->link_state.rate;
+ cfg.options = priv->link_state.options;
+ if (pause->rx_pause)
+ cfg.options |= DPNI_LINK_OPT_PAUSE;
else
- cfg.options &= ~DPNI_LINK_OPT_AUTONEG;
- if (link_settings->base.duplex == DUPLEX_HALF)
- cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX;
+ cfg.options &= ~DPNI_LINK_OPT_PAUSE;
+ if (!!pause->rx_pause ^ !!pause->tx_pause)
+ cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
else
- cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX;
+ cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
+
+ if (cfg.options == priv->link_state.options)
+ return 0;
err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
- if (err)
- /* ethtool will be loud enough if we return an error; no point
- * in putting our own error message on the console by default
- */
- netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err);
+ if (err) {
+ netdev_err(net_dev, "dpni_set_link_state failed\n");
+ return err;
+ }
- return err;
+ priv->link_state.options = cfg.options;
+
+ return 0;
}
static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
@@ -721,7 +717,8 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_drvinfo = dpaa2_eth_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = dpaa2_eth_get_link_ksettings,
- .set_link_ksettings = dpaa2_eth_set_link_ksettings,
+ .get_pauseparam = dpaa2_eth_get_pauseparam,
+ .set_pauseparam = dpaa2_eth_set_pauseparam,
.get_sset_count = dpaa2_eth_get_sset_count,
.get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
.get_strings = dpaa2_eth_get_strings,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 7b44d7d9b19a..d9b6918807af 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -84,6 +84,7 @@
#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273)
#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
+#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
@@ -284,7 +285,7 @@ struct dpni_rsp_get_statistics {
__le64 counter[DPNI_STATISTICS_CNT];
};
-struct dpni_cmd_set_link_cfg {
+struct dpni_cmd_link_cfg {
/* cmd word 0 */
__le64 pad0;
/* cmd word 1 */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index 220dfc806a24..05e30893dee6 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -838,13 +838,13 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
const struct dpni_link_cfg *cfg)
{
struct fsl_mc_command cmd = { 0 };
- struct dpni_cmd_set_link_cfg *cmd_params;
+ struct dpni_cmd_link_cfg *cmd_params;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG,
cmd_flags,
token);
- cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params;
+ cmd_params = (struct dpni_cmd_link_cfg *)cmd.params;
cmd_params->rate = cpu_to_le32(cfg->rate);
cmd_params->options = cpu_to_le64(cfg->options);
@@ -853,6 +853,42 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
}
/**
+ * dpni_get_link_cfg() - return the link configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Link configuration from dpni object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpni_cmd_link_cfg *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_CFG,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpni_cmd_link_cfg *)cmd.params;
+ cfg->rate = le32_to_cpu(rsp_params->rate);
+ cfg->options = le64_to_cpu(rsp_params->options);
+
+ return err;
+}
+
+/**
* dpni_get_link_state() - Return the link state (either up or down)
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index a521242e2353..3e8fc6c7a8da 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -485,6 +485,11 @@ int dpni_set_link_cfg(struct fsl_mc_io *mc_io,
u16 token,
const struct dpni_link_cfg *cfg);
+int dpni_get_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_link_cfg *cfg);
+
/**
* struct dpni_link_state - Structure representing DPNI link state
* @rate: Rate
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 04a59db03f2b..c219587bd334 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -20,6 +20,15 @@ config FSL_ENETC_VF
If compiled as module (M), the module name is fsl-enetc-vf.
+config FSL_ENETC_MDIO
+ tristate "ENETC MDIO driver"
+ depends on PCI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ help
+ This driver supports NXP ENETC Central MDIO controller as a PCIe
+ physical function (PF) device.
+
+ If compiled as module (M), the module name is fsl-enetc-mdio.
+
config FSL_ENETC_PTP_CLOCK
tristate "ENETC PTP clock driver"
depends on PTP_1588_CLOCK_QORIQ && (FSL_ENETC || FSL_ENETC_VF)
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index 7139e414dccf..d200c27c3bf6 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -1,19 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
+
+common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
+
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
-fsl-enetc-$(CONFIG_FSL_ENETC) += enetc.o enetc_cbdr.o enetc_ethtool.o \
- enetc_mdio.o
+fsl-enetc-y := enetc_pf.o enetc_mdio.o $(common-objs)
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
-fsl-enetc-objs := enetc_pf.o $(fsl-enetc-y)
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
+fsl-enetc-vf-y := enetc_vf.o $(common-objs)
-ifeq ($(CONFIG_FSL_ENETC)$(CONFIG_FSL_ENETC_VF), yy)
-fsl-enetc-vf-objs := enetc_vf.o
-else
-fsl-enetc-vf-$(CONFIG_FSL_ENETC_VF) += enetc.o enetc_cbdr.o \
- enetc_ethtool.o
-fsl-enetc-vf-objs := enetc_vf.o $(fsl-enetc-vf-y)
-endif
+obj-$(CONFIG_FSL_ENETC_MDIO) += fsl-enetc-mdio.o
+fsl-enetc-mdio-y := enetc_pci_mdio.o enetc_mdio.o
obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o
-fsl-enetc-ptp-$(CONFIG_FSL_ENETC_PTP_CLOCK) += enetc_ptp.o
+fsl-enetc-ptp-y := enetc_ptp.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 223709443ea4..b6ff89307409 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -110,7 +110,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
int active_offloads)
{
struct enetc_tx_swbd *tx_swbd;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int len = skb_headlen(skb);
union enetc_tx_bd temp_bd;
union enetc_tx_bd *txbd;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 77b9cd10ba2b..149883c8f0b8 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -6,18 +6,20 @@
#include <linux/iopoll.h>
#include <linux/of.h>
-#include "enetc_pf.h"
+#include "enetc_mdio.h"
-struct enetc_mdio_regs {
- u32 mdio_cfg; /* MDIO configuration and status */
- u32 mdio_ctl; /* MDIO control */
- u32 mdio_data; /* MDIO data */
- u32 mdio_addr; /* MDIO address */
-};
+#define ENETC_MDIO_REG_OFFSET 0x1c00
+#define ENETC_MDIO_CFG 0x0 /* MDIO configuration and status */
+#define ENETC_MDIO_CTL 0x4 /* MDIO control */
+#define ENETC_MDIO_DATA 0x8 /* MDIO data */
+#define ENETC_MDIO_ADDR 0xc /* MDIO address */
-#define bus_to_enetc_regs(bus) (struct enetc_mdio_regs __iomem *)((bus)->priv)
+#define enetc_mdio_rd(hw, off) \
+ enetc_port_rd(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET)
+#define enetc_mdio_wr(hw, off, val) \
+ enetc_port_wr(hw, ENETC_##off + ENETC_MDIO_REG_OFFSET, val)
+#define enetc_mdio_rd_reg(off) enetc_mdio_rd(hw, off)
-#define ENETC_MDIO_REG_OFFSET 0x1c00
#define ENETC_MDC_DIV 258
#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8)
@@ -33,18 +35,18 @@ struct enetc_mdio_regs {
#define MDIO_DATA(x) ((x) & 0xffff)
#define TIMEOUT 1000
-static int enetc_mdio_wait_complete(struct enetc_mdio_regs __iomem *regs)
+static int enetc_mdio_wait_complete(struct enetc_hw *hw)
{
u32 val;
- return readx_poll_timeout(enetc_rd_reg, &regs->mdio_cfg, val,
+ return readx_poll_timeout(enetc_mdio_rd_reg, MDIO_CFG, val,
!(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT);
}
-static int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
- u16 value)
+int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
{
- struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus);
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ struct enetc_hw *hw = mdio_priv->hw;
u32 mdio_ctl, mdio_cfg;
u16 dev_addr;
int ret;
@@ -59,38 +61,39 @@ static int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_wr_reg(&regs->mdio_cfg, mdio_cfg);
+ enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
/* set port and dev addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_wr_reg(&regs->mdio_ctl, mdio_ctl);
+ enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_wr_reg(&regs->mdio_addr, regnum & 0xffff);
+ enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
}
/* write the value */
- enetc_wr_reg(&regs->mdio_data, MDIO_DATA(value));
+ enetc_mdio_wr(hw, MDIO_DATA, MDIO_DATA(value));
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
return 0;
}
-static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
- struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus);
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ struct enetc_hw *hw = mdio_priv->hw;
u32 mdio_ctl, mdio_cfg;
u16 dev_addr, value;
int ret;
@@ -104,41 +107,41 @@ static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_wr_reg(&regs->mdio_cfg, mdio_cfg);
+ enetc_mdio_wr(hw, MDIO_CFG, mdio_cfg);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
/* set port and device addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_wr_reg(&regs->mdio_ctl, mdio_ctl);
+ enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_wr_reg(&regs->mdio_addr, regnum & 0xffff);
+ enetc_mdio_wr(hw, MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
}
/* initiate the read */
- enetc_wr_reg(&regs->mdio_ctl, mdio_ctl | MDIO_CTL_READ);
+ enetc_mdio_wr(hw, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
- ret = enetc_mdio_wait_complete(regs);
+ ret = enetc_mdio_wait_complete(hw);
if (ret)
return ret;
/* return all Fs if nothing was there */
- if (enetc_rd_reg(&regs->mdio_cfg) & MDIO_CFG_RD_ER) {
+ if (enetc_mdio_rd(hw, MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
- value = enetc_rd_reg(&regs->mdio_data) & 0xffff;
+ value = enetc_mdio_rd(hw, MDIO_DATA) & 0xffff;
return value;
}
@@ -146,12 +149,12 @@ static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
int enetc_mdio_probe(struct enetc_pf *pf)
{
struct device *dev = &pf->si->pdev->dev;
- struct enetc_mdio_regs __iomem *regs;
+ struct enetc_mdio_priv *mdio_priv;
struct device_node *np;
struct mii_bus *bus;
- int ret;
+ int err;
- bus = mdiobus_alloc_size(sizeof(regs));
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
if (!bus)
return -ENOMEM;
@@ -159,41 +162,31 @@ int enetc_mdio_probe(struct enetc_pf *pf)
bus->read = enetc_mdio_read;
bus->write = enetc_mdio_write;
bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = &pf->si->hw;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
- /* store the enetc mdio base address for this bus */
- regs = pf->si->hw.port + ENETC_MDIO_REG_OFFSET;
- bus->priv = regs;
-
np = of_get_child_by_name(dev->of_node, "mdio");
if (!np) {
dev_err(dev, "MDIO node missing\n");
- ret = -EINVAL;
- goto err_registration;
+ return -EINVAL;
}
- ret = of_mdiobus_register(bus, np);
- if (ret) {
+ err = of_mdiobus_register(bus, np);
+ if (err) {
of_node_put(np);
dev_err(dev, "cannot register MDIO bus\n");
- goto err_registration;
+ return err;
}
of_node_put(np);
pf->mdio = bus;
return 0;
-
-err_registration:
- mdiobus_free(bus);
-
- return ret;
}
void enetc_mdio_remove(struct enetc_pf *pf)
{
- if (pf->mdio) {
+ if (pf->mdio)
mdiobus_unregister(pf->mdio);
- mdiobus_free(pf->mdio);
- }
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.h b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
new file mode 100644
index 000000000000..60c9a3889824
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+
+#include <linux/phy.h>
+#include "enetc_pf.h"
+
+struct enetc_mdio_priv {
+ struct enetc_hw *hw;
+};
+
+int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
+int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
new file mode 100644
index 000000000000..fbd41ce01f06
--- /dev/null
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2019 NXP */
+#include <linux/of_mdio.h>
+#include "enetc_mdio.h"
+
+#define ENETC_MDIO_DEV_ID 0xee01
+#define ENETC_MDIO_DEV_NAME "FSL PCIe IE Central MDIO"
+#define ENETC_MDIO_BUS_NAME ENETC_MDIO_DEV_NAME " Bus"
+#define ENETC_MDIO_DRV_NAME ENETC_MDIO_DEV_NAME " driver"
+
+static int enetc_pci_mdio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct enetc_mdio_priv *mdio_priv;
+ struct device *dev = &pdev->dev;
+ struct enetc_hw *hw;
+ struct mii_bus *bus;
+ int err;
+
+ hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return -ENOMEM;
+
+ bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = ENETC_MDIO_BUS_NAME;
+ bus->read = enetc_mdio_read;
+ bus->write = enetc_mdio_write;
+ bus->parent = dev;
+ mdio_priv = bus->priv;
+ mdio_priv->hw = hw;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ pcie_flr(pdev);
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(dev, "device enable failed\n");
+ return err;
+ }
+
+ err = pci_request_region(pdev, 0, KBUILD_MODNAME);
+ if (err) {
+ dev_err(dev, "pci_request_region failed\n");
+ goto err_pci_mem_reg;
+ }
+
+ hw->port = pci_iomap(pdev, 0, 0);
+ if (!hw->port) {
+ err = -ENXIO;
+ dev_err(dev, "iomap failed\n");
+ goto err_ioremap;
+ }
+
+ err = of_mdiobus_register(bus, dev->of_node);
+ if (err)
+ goto err_mdiobus_reg;
+
+ pci_set_drvdata(pdev, bus);
+
+ return 0;
+
+err_mdiobus_reg:
+ iounmap(mdio_priv->hw->port);
+err_ioremap:
+ pci_release_mem_regions(pdev);
+err_pci_mem_reg:
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static void enetc_pci_mdio_remove(struct pci_dev *pdev)
+{
+ struct mii_bus *bus = pci_get_drvdata(pdev);
+ struct enetc_mdio_priv *mdio_priv;
+
+ mdiobus_unregister(bus);
+ mdio_priv = bus->priv;
+ iounmap(mdio_priv->hw->port);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id enetc_pci_mdio_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_MDIO_DEV_ID) },
+ { 0, } /* End of table. */
+};
+MODULE_DEVICE_TABLE(pci, enetc_pci_mdio_id_table);
+
+static struct pci_driver enetc_pci_mdio_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = enetc_pci_mdio_id_table,
+ .probe = enetc_pci_mdio_probe,
+ .remove = enetc_pci_mdio_remove,
+};
+module_pci_driver(enetc_pci_mdio_driver);
+
+MODULE_DESCRIPTION(ENETC_MDIO_DRV_NAME);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 258b3cb38a6f..7d6513ff8507 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -750,6 +750,7 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
{
struct enetc_pf *pf = enetc_si_priv(priv->si);
struct device_node *np = priv->dev->of_node;
+ struct device_node *mdio_np;
int err;
if (!np) {
@@ -773,7 +774,9 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
priv->phy_node = of_node_get(np);
}
- if (!of_phy_is_fixed_link(np)) {
+ mdio_np = of_get_child_by_name(np, "mdio");
+ if (mdio_np) {
+ of_node_put(mdio_np);
err = enetc_mdio_probe(pf);
if (err) {
of_node_put(priv->phy_node);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index e5610a4da539..d4d4c72adf49 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -208,8 +208,11 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
/* FEC MII MMFR bits definition */
#define FEC_MMFR_ST (1 << 30)
+#define FEC_MMFR_ST_C45 (0)
#define FEC_MMFR_OP_READ (2 << 28)
+#define FEC_MMFR_OP_READ_C45 (3 << 28)
#define FEC_MMFR_OP_WRITE (1 << 28)
+#define FEC_MMFR_OP_ADDR_WRITE (0)
#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
#define FEC_MMFR_TA (2 << 16)
@@ -365,7 +368,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
status = fec16_to_cpu(bdp->cbd_sc);
status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
- frag_len = skb_shinfo(skb)->frags[frag].size;
+ frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
/* Handle the last BD specially */
if (frag == nr_frags - 1) {
@@ -387,7 +390,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
ebdp->cbd_esc = cpu_to_fec32(estatus);
}
- bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
+ bufaddr = skb_frag_address(this_frag);
index = fec_enet_get_bd_index(bdp, &txq->bd);
if (((unsigned long) bufaddr) & fep->tx_align ||
@@ -1767,7 +1770,8 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
unsigned long time_left;
- int ret = 0;
+ int ret = 0, frame_start, frame_addr, frame_op;
+ bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
@@ -1775,9 +1779,37 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
reinit_completion(&fep->mdio_done);
+ if (is_c45) {
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ frame_addr = (regnum >> 16);
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ time_left = wait_for_completion_timeout(&fep->mdio_done,
+ usecs_to_jiffies(FEC_MII_TIMEOUT));
+ if (time_left == 0) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ frame_op = FEC_MMFR_OP_READ_C45;
+
+ } else {
+ /* C22 read */
+ frame_op = FEC_MMFR_OP_READ;
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
+ }
+
/* start a read op */
- writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ writel(frame_start | frame_op |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
@@ -1804,7 +1836,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
unsigned long time_left;
- int ret;
+ int ret, frame_start, frame_addr;
+ bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
@@ -1814,9 +1847,33 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
reinit_completion(&fep->mdio_done);
+ if (is_c45) {
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ frame_addr = (regnum >> 16);
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ time_left = wait_for_completion_timeout(&fep->mdio_done,
+ usecs_to_jiffies(FEC_MII_TIMEOUT));
+ if (time_left == 0) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+ } else {
+ /* C22 write */
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
+ }
+
/* start a write op */
- writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
+ writel(frame_start | FEC_MMFR_OP_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
FEC_MMFR_TA | FEC_MMFR_DATA(value),
fep->hwp + FEC_MII_DATA);
@@ -1828,6 +1885,7 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
ret = -ETIMEDOUT;
}
+out:
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
@@ -3338,7 +3396,6 @@ fec_probe(struct platform_device *pdev)
struct fec_platform_data *pdata;
struct net_device *ndev;
int i, irq, ret = 0;
- struct resource *r;
const struct of_device_id *of_id;
static int dev_id;
struct device_node *np = pdev->dev.of_node, *phy_node;
@@ -3378,8 +3435,7 @@ fec_probe(struct platform_device *pdev)
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fep->hwp = devm_ioremap_resource(&pdev->dev, r);
+ fep->hwp = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fep->hwp)) {
ret = PTR_ERR(fep->hwp);
goto failed_ioremap;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 5fad73b2e123..3981c06f082f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -501,7 +501,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
nr_frags = skb_shinfo(skb)->nr_frags;
frag = skb_shinfo(skb)->frags;
for (i = 0; i < nr_frags; i++, frag++) {
- if (!IS_ALIGNED(frag->page_offset, 4)) {
+ if (!IS_ALIGNED(skb_frag_off(frag), 4)) {
is_aligned = 0;
break;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7ea19e173339..412c0340fed9 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2005,8 +2005,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
- if (rx_queue->skb)
- dev_kfree_skb(rx_queue->skb);
+ dev_kfree_skb(rx_queue->skb);
for (i = 0; i < rx_queue->rx_ring_size; i++) {
struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 689f18e3100f..90ab7ade44c4 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -877,7 +877,6 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
ndev->irq = platform_get_irq(pdev, 0);
if (ndev->irq <= 0) {
- dev_err(dev, "No irq resource\n");
ret = -ENODEV;
goto out_disconnect_phy;
}
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 349970557c52..95a6b0926170 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -719,7 +719,7 @@ static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- int len = frag->size;
+ int len = skb_frag_size(frag);
addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
ret = dma_mapping_error(priv->dev, addr);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 2235dd55fab2..a48396dd4ebb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -245,7 +245,7 @@ static int hns_nic_maybe_stop_tso(
int frag_num;
struct sk_buff *skb = *out_skb;
struct sk_buff *new_skb = NULL;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
size = skb_headlen(skb);
buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
@@ -309,7 +309,7 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
struct hnae_ring *ring = ring_data->ring;
struct device *dev = ring_to_dev(ring);
struct netdev_queue *dev_queue;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int buf_num;
int seg_num;
dma_addr_t dma;
@@ -1182,6 +1182,8 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
if (unlikely(ret))
return -ENODEV;
+ phy_attached_info(phy_dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 75329ab775a6..f8a87f8ca983 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -47,6 +47,8 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
+ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
+ HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */
};
/* below are per-VF mac-vlan subcodes */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 908d4f45c06a..528f6243cdc6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -46,7 +46,7 @@ void hnae3_set_client_init_flag(struct hnae3_client *client,
EXPORT_SYMBOL(hnae3_set_client_init_flag);
static int hnae3_get_client_init_flag(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev)
+ struct hnae3_ae_dev *ae_dev)
{
int inited = 0;
@@ -104,7 +104,6 @@ int hnae3_register_client(struct hnae3_client *client)
{
struct hnae3_client *client_tmp;
struct hnae3_ae_dev *ae_dev;
- int ret = 0;
if (!client)
return -ENODEV;
@@ -123,7 +122,7 @@ int hnae3_register_client(struct hnae3_client *client)
/* if the client could not be initialized on current port, for
* any error reasons, move on to next available port
*/
- ret = hnae3_init_client_instance(client, ae_dev);
+ int ret = hnae3_init_client_instance(client, ae_dev);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed for port, ret = %d\n",
@@ -164,7 +163,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
const struct pci_device_id *id;
struct hnae3_ae_dev *ae_dev;
struct hnae3_client *client;
- int ret = 0;
+ int ret;
if (!ae_algo)
return;
@@ -258,7 +257,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
- int ret = 0;
+ int ret;
if (!ae_dev)
return -ENODEV;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 48c7b70fc2c4..c4b7bf851a28 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -58,10 +58,10 @@
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
#define hnae3_dev_roce_supported(hdev) \
- hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
#define hnae3_dev_dcb_supported(hdev) \
- hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
#define hnae3_dev_fd_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
@@ -85,13 +85,18 @@ struct hnae3_queue {
void __iomem *io_base;
struct hnae3_ae_algo *ae_algo;
struct hnae3_handle *handle;
- int tqp_index; /* index in a handle */
- u32 buf_size; /* size for hnae_desc->addr, preset by AE */
- u16 tx_desc_num;/* total number of tx desc */
- u16 rx_desc_num;/* total number of rx desc */
+ int tqp_index; /* index in a handle */
+ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
+ u16 tx_desc_num; /* total number of tx desc */
+ u16 rx_desc_num; /* total number of rx desc */
};
-/*hnae3 loop mode*/
+struct hns3_mac_stats {
+ u64 tx_pause_cnt;
+ u64 rx_pause_cnt;
+};
+
+/* hnae3 loop mode */
enum hnae3_loop {
HNAE3_LOOP_APP,
HNAE3_LOOP_SERIAL_SERDES,
@@ -141,6 +146,12 @@ enum hnae3_reset_notify_type {
HNAE3_RESTORE_CLIENT,
};
+enum hnae3_hw_error_type {
+ HNAE3_PPU_POISON_ERROR,
+ HNAE3_CMDQ_ECC_ERROR,
+ HNAE3_IMP_RD_POISON_ERROR,
+};
+
enum hnae3_reset_type {
HNAE3_VF_RESET,
HNAE3_VF_FUNC_RESET,
@@ -179,6 +190,15 @@ struct hnae3_vector_info {
#define HNAE3_RING_GL_RX 0
#define HNAE3_RING_GL_TX 1
+#define HNAE3_FW_VERSION_BYTE3_SHIFT 24
+#define HNAE3_FW_VERSION_BYTE3_MASK GENMASK(31, 24)
+#define HNAE3_FW_VERSION_BYTE2_SHIFT 16
+#define HNAE3_FW_VERSION_BYTE2_MASK GENMASK(23, 16)
+#define HNAE3_FW_VERSION_BYTE1_SHIFT 8
+#define HNAE3_FW_VERSION_BYTE1_MASK GENMASK(15, 8)
+#define HNAE3_FW_VERSION_BYTE0_SHIFT 0
+#define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0)
+
struct hnae3_ring_chain_node {
struct hnae3_ring_chain_node *next;
u32 tqp_index;
@@ -196,7 +216,8 @@ struct hnae3_client_ops {
int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
int (*reset_notify)(struct hnae3_handle *handle,
enum hnae3_reset_notify_type type);
- enum hnae3_reset_type (*process_hw_error)(struct hnae3_handle *handle);
+ void (*process_hw_error)(struct hnae3_handle *handle,
+ enum hnae3_hw_error_type);
};
#define HNAE3_CLIENT_NAME_LENGTH 16
@@ -289,6 +310,8 @@ struct hnae3_ae_dev {
* Remove multicast address from mac table
* update_stats()
* Update Old network device statistics
+ * get_mac_stats()
+ * get mac pause statistics including tx_cnt and rx_cnt
* get_ethtool_stats()
* Get ethtool network device statistics
* get_strings()
@@ -417,8 +440,8 @@ struct hnae3_ae_ops {
void (*update_stats)(struct hnae3_handle *handle,
struct net_device_stats *net_stats);
void (*get_stats)(struct hnae3_handle *handle, u64 *data);
- void (*get_mac_pause_stats)(struct hnae3_handle *handle, u64 *tx_cnt,
- u64 *rx_cnt);
+ void (*get_mac_stats)(struct hnae3_handle *handle,
+ struct hns3_mac_stats *mac_stats);
void (*get_strings)(struct hnae3_handle *handle,
u32 stringset, u8 *data);
int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
@@ -605,7 +628,7 @@ struct hnae3_handle {
struct pci_dev *pdev;
void *priv;
struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
- u64 flags; /* Indicate the capabilities for this handle*/
+ u64 flags; /* Indicate the capabilities for this handle */
union {
struct net_device *netdev; /* first member */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index a4b937286f55..5cf4c1ecc5b8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -8,6 +8,7 @@
#include "hns3_enet.h"
#define HNS3_DBG_READ_LEN 256
+#define HNS3_DBG_WRITE_LEN 1024
static struct dentry *hns3_dbgfs_root;
@@ -38,7 +39,7 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
if (queue_num >= h->kinfo.num_tqps) {
dev_err(&h->pdev->dev,
- "Queue number(%u) is out of range(%u)\n", queue_num,
+ "Queue number(%u) is out of range(0-%u)\n", queue_num,
h->kinfo.num_tqps - 1);
return -EINVAL;
}
@@ -176,7 +177,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
}
if (q_num >= h->kinfo.num_tqps) {
- dev_err(dev, "Queue number(%u) is out of range(%u)\n", q_num,
+ dev_err(dev, "Queue number(%u) is out of range(0-%u)\n", q_num,
h->kinfo.num_tqps - 1);
return -EINVAL;
}
@@ -187,14 +188,14 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
tx_index = (cnt == 1) ? value : tx_index;
if (tx_index >= ring->desc_num) {
- dev_err(dev, "bd index (%u) is out of range(%u)\n", tx_index,
+ dev_err(dev, "bd index(%u) is out of range(0-%u)\n", tx_index,
ring->desc_num - 1);
return -EINVAL;
}
tx_desc = &ring->desc[tx_index];
dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
- dev_info(dev, "(TX) addr: 0x%llx\n", tx_desc->addr);
+ dev_info(dev, "(TX)addr: 0x%llx\n", tx_desc->addr);
dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.vlan_tag);
dev_info(dev, "(TX)send_size: %u\n", tx_desc->tx.send_size);
dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso);
@@ -218,6 +219,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
dev_info(dev, "(RX)addr: 0x%llx\n", rx_desc->addr);
+ dev_info(dev, "(RX)l234_info: %u\n", rx_desc->rx.l234_info);
dev_info(dev, "(RX)pkt_len: %u\n", rx_desc->rx.pkt_len);
dev_info(dev, "(RX)size: %u\n", rx_desc->rx.size);
dev_info(dev, "(RX)rss_hash: %u\n", rx_desc->rx.rss_hash);
@@ -237,16 +239,16 @@ static void hns3_dbg_help(struct hnae3_handle *h)
char printf_buf[HNS3_DBG_BUF_LEN];
dev_info(&h->pdev->dev, "available commands\n");
- dev_info(&h->pdev->dev, "queue info [number]\n");
+ dev_info(&h->pdev->dev, "queue info <number>\n");
dev_info(&h->pdev->dev, "queue map\n");
- dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n");
+ dev_info(&h->pdev->dev, "bd info <q_num> <bd index>\n");
if (!hns3_is_phys_func(h->pdev))
return;
dev_info(&h->pdev->dev, "dump fd tcam\n");
dev_info(&h->pdev->dev, "dump tc\n");
- dev_info(&h->pdev->dev, "dump tm map [q_num]\n");
+ dev_info(&h->pdev->dev, "dump tm map <q_num>\n");
dev_info(&h->pdev->dev, "dump tm\n");
dev_info(&h->pdev->dev, "dump qos pause cfg\n");
dev_info(&h->pdev->dev, "dump qos pri map\n");
@@ -258,20 +260,20 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump mac tnl status\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
- strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]",
+ strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
HNS3_DBG_BUF_LEN - 1);
strncat(printf_buf + strlen(printf_buf),
- " [igu egu <prt_id>] [rpu <tc_queue_num>]",
+ " [igu egu <port_id>] [rpu <tc_queue_num>]",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
strncat(printf_buf + strlen(printf_buf),
- " [rtc] [ppp] [rcb] [tqp <q_num>]]\n",
+ " [rtc] [ppp] [rcb] [tqp <queue_num>]]\n",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
dev_info(&h->pdev->dev, "%s", printf_buf);
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
- strncat(printf_buf, "dump reg dcb [port_id] [pri_id] [pg_id]",
+ strncat(printf_buf, "dump reg dcb <port_id> <pri_id> <pg_id>",
HNS3_DBG_BUF_LEN - 1);
- strncat(printf_buf + strlen(printf_buf), " [rq_id] [nq_id] [qset_id]\n",
+ strncat(printf_buf + strlen(printf_buf), " <rq_id> <nq_id> <qset_id>\n",
HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
dev_info(&h->pdev->dev, "%s", printf_buf);
}
@@ -322,6 +324,9 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return 0;
+ if (count > HNS3_DBG_WRITE_LEN)
+ return -ENOSPC;
+
cmd_buf = kzalloc(count + 1, GFP_KERNEL);
if (!cmd_buf)
return count;
@@ -372,20 +377,11 @@ static const struct file_operations hns3_dbg_cmd_fops = {
void hns3_dbg_init(struct hnae3_handle *handle)
{
const char *name = pci_name(handle->pdev);
- struct dentry *pfile;
handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
- if (!handle->hnae3_dbgfs)
- return;
- pfile = debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
- &hns3_dbg_cmd_fops);
- if (!pfile) {
- debugfs_remove_recursive(handle->hnae3_dbgfs);
- handle->hnae3_dbgfs = NULL;
- dev_warn(&handle->pdev->dev, "create file for %s fail\n",
- name);
- }
+ debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
+ &hns3_dbg_cmd_fops);
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
@@ -397,10 +393,6 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
{
hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
- if (!hns3_dbgfs_root) {
- pr_warn("Register debugfs for %s fail\n", debugfs_dir_name);
- return;
- }
}
void hns3_dbg_unregister_debugfs(void)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 310afa708831..9f3f8e3cb2c2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -28,6 +28,12 @@
#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
+#define hns3_rl_err(fmt, ...) \
+ do { \
+ if (net_ratelimit()) \
+ netdev_err(fmt, ##__VA_ARGS__); \
+ } while (0)
+
static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
static void hns3_remove_hw_addr(struct net_device *netdev);
@@ -45,6 +51,9 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+#define HNS3_INNER_VLAN_TAG 1
+#define HNS3_OUTER_VLAN_TAG 2
+
/* hns3_pci_tbl - PCI Device ID Table
*
* Last entry must be all 0s
@@ -220,9 +229,9 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
/* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter)
+ *
+ * Default: enable interrupt coalescing self-adaptive and GL
*/
-
- /* Default: enable interrupt coalescing self-adaptive and GL */
tqp_vector->tx_group.coal.gl_adapt_enable = 1;
tqp_vector->rx_group.coal.gl_adapt_enable = 1;
@@ -459,6 +468,9 @@ static int hns3_nic_net_open(struct net_device *netdev)
h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
hns3_config_xps(priv);
+
+ netif_dbg(h, drv, netdev, "net open\n");
+
return 0;
}
@@ -519,6 +531,8 @@ static int hns3_nic_net_stop(struct net_device *netdev)
if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return 0;
+ netif_dbg(h, drv, netdev, "net stop\n");
+
if (h->ae_algo->ops->set_timer_task)
h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
@@ -956,16 +970,16 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U);
}
-static int hns3_fill_desc_vtags(struct sk_buff *skb,
- struct hns3_enet_ring *tx_ring,
- u32 *inner_vlan_flag,
- u32 *out_vlan_flag,
- u16 *inner_vtag,
- u16 *out_vtag)
+static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
+ struct sk_buff *skb)
{
-#define HNS3_TX_VLAN_PRIO_SHIFT 13
-
struct hnae3_handle *handle = tx_ring->tqp->handle;
+ struct vlan_ethhdr *vhdr;
+ int rc;
+
+ if (!(skb->protocol == htons(ETH_P_8021Q) ||
+ skb_vlan_tag_present(skb)))
+ return 0;
/* Since HW limitation, if port based insert VLAN enabled, only one VLAN
* header is allowed in skb, otherwise it will cause RAS error.
@@ -976,8 +990,7 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
return -EINVAL;
if (skb->protocol == htons(ETH_P_8021Q) &&
- !(tx_ring->tqp->handle->kinfo.netdev->features &
- NETIF_F_HW_VLAN_CTAG_TX)) {
+ !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
/* When HW VLAN acceleration is turned off, and the stack
* sets the protocol to 802.1q, the driver just need to
* set the protocol to the encapsulated ethertype.
@@ -987,45 +1000,107 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
}
if (skb_vlan_tag_present(skb)) {
- u16 vlan_tag;
-
- vlan_tag = skb_vlan_tag_get(skb);
- vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
-
/* Based on hw strategy, use out_vtag in two layer tag case,
* and use inner_vtag in one tag case.
*/
- if (skb->protocol == htons(ETH_P_8021Q)) {
- if (handle->port_base_vlan_state ==
- HNAE3_PORT_BASE_VLAN_DISABLE){
- hns3_set_field(*out_vlan_flag,
- HNS3_TXD_OVLAN_B, 1);
- *out_vtag = vlan_tag;
- } else {
- hns3_set_field(*inner_vlan_flag,
- HNS3_TXD_VLAN_B, 1);
- *inner_vtag = vlan_tag;
- }
- } else {
- hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
- *inner_vtag = vlan_tag;
- }
- } else if (skb->protocol == htons(ETH_P_8021Q)) {
- struct vlan_ethhdr *vhdr;
- int rc;
+ if (skb->protocol == htons(ETH_P_8021Q) &&
+ handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ rc = HNS3_OUTER_VLAN_TAG;
+ else
+ rc = HNS3_INNER_VLAN_TAG;
- rc = skb_cow_head(skb, 0);
- if (unlikely(rc < 0))
- return rc;
- vhdr = (struct vlan_ethhdr *)skb->data;
- vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
- << HNS3_TX_VLAN_PRIO_SHIFT);
+ skb->protocol = vlan_get_protocol(skb);
+ return rc;
}
+ rc = skb_cow_head(skb, 0);
+ if (unlikely(rc < 0))
+ return rc;
+
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
+ & VLAN_PRIO_MASK);
+
skb->protocol = vlan_get_protocol(skb);
return 0;
}
+static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, struct hns3_desc *desc)
+{
+ u32 ol_type_vlan_len_msec = 0;
+ u32 type_cs_vlan_tso = 0;
+ u32 paylen = skb->len;
+ u16 inner_vtag = 0;
+ u16 out_vtag = 0;
+ u16 mss = 0;
+ int ret;
+
+ ret = hns3_handle_vtags(ring, skb);
+ if (unlikely(ret < 0)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_vlan_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ } else if (ret == HNS3_INNER_VLAN_TAG) {
+ inner_vtag = skb_vlan_tag_get(skb);
+ inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ VLAN_PRIO_MASK;
+ hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
+ } else if (ret == HNS3_OUTER_VLAN_TAG) {
+ out_vtag = skb_vlan_tag_get(skb);
+ out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ VLAN_PRIO_MASK;
+ hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
+ 1);
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 ol4_proto, il4_proto;
+
+ skb_reset_mac_len(skb);
+
+ ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_l4_proto_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
+ &type_cs_vlan_tso,
+ &ol_type_vlan_len_msec);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_l2l3l4_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ ret = hns3_set_tso(skb, &paylen, &mss,
+ &type_cs_vlan_tso);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_tso_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+ }
+
+ /* Set txbd */
+ desc->tx.ol_type_vlan_len_msec =
+ cpu_to_le32(ol_type_vlan_len_msec);
+ desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
+ desc->tx.paylen = cpu_to_le32(paylen);
+ desc->tx.mss = cpu_to_le16(mss);
+ desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
+ desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
+
+ return 0;
+}
+
static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
unsigned int size, int frag_end,
enum hns_desc_type type)
@@ -1033,65 +1108,29 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
struct device *dev = ring_to_dev(ring);
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int frag_buf_num;
int k, sizeoflast;
dma_addr_t dma;
if (type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv;
- u32 ol_type_vlan_len_msec = 0;
- u32 type_cs_vlan_tso = 0;
- u32 paylen = skb->len;
- u16 inner_vtag = 0;
- u16 out_vtag = 0;
- u16 mss = 0;
int ret;
- ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
- &ol_type_vlan_len_msec,
- &inner_vtag, &out_vtag);
+ ret = hns3_fill_skb_desc(ring, skb, desc);
if (unlikely(ret))
return ret;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 ol4_proto, il4_proto;
-
- skb_reset_mac_len(skb);
-
- ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
- if (unlikely(ret))
- return ret;
-
- ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
- &type_cs_vlan_tso,
- &ol_type_vlan_len_msec);
- if (unlikely(ret))
- return ret;
-
- ret = hns3_set_tso(skb, &paylen, &mss,
- &type_cs_vlan_tso);
- if (unlikely(ret))
- return ret;
- }
-
- /* Set txbd */
- desc->tx.ol_type_vlan_len_msec =
- cpu_to_le32(ol_type_vlan_len_msec);
- desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
- desc->tx.paylen = cpu_to_le32(paylen);
- desc->tx.mss = cpu_to_le16(mss);
- desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
- desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
-
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
} else {
- frag = (struct skb_frag_struct *)priv;
+ frag = (skb_frag_t *)priv;
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
}
if (unlikely(dma_mapping_error(dev, dma))) {
+ u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
return -ENOMEM;
}
@@ -1147,28 +1186,20 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return 0;
}
-static int hns3_nic_bd_num(struct sk_buff *skb)
+static unsigned int hns3_nic_bd_num(struct sk_buff *skb)
{
- int size = skb_headlen(skb);
- int i, bd_num;
+ unsigned int bd_num;
+ int i;
/* if the total len is within the max bd limit */
if (likely(skb->len <= HNS3_MAX_BD_SIZE))
return skb_shinfo(skb)->nr_frags + 1;
- bd_num = hns3_tx_bd_count(size);
+ bd_num = hns3_tx_bd_count(skb_headlen(skb));
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
- int frag_bd_num;
-
- size = skb_frag_size(frag);
- frag_bd_num = hns3_tx_bd_count(size);
-
- if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
- return -ENOMEM;
-
- bd_num += frag_bd_num;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ bd_num += hns3_tx_bd_count(skb_frag_size(frag));
}
return bd_num;
@@ -1189,7 +1220,7 @@ static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
*/
static bool hns3_skb_need_linearized(struct sk_buff *skb)
{
- int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
+ int bd_limit = HNS3_MAX_BD_NUM_NORMAL - 1;
unsigned int tot_len = 0;
int i;
@@ -1219,21 +1250,16 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct sk_buff **out_skb)
{
struct sk_buff *skb = *out_skb;
- int bd_num;
+ unsigned int bd_num;
bd_num = hns3_nic_bd_num(skb);
- if (bd_num < 0)
- return bd_num;
-
- if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
+ if (unlikely(bd_num > HNS3_MAX_BD_NUM_NORMAL)) {
struct sk_buff *new_skb;
- if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
+ if (skb_is_gso(skb) && bd_num <= HNS3_MAX_BD_NUM_TSO &&
+ !hns3_skb_need_linearized(skb))
goto out;
- bd_num = hns3_tx_bd_count(skb->len);
- if (unlikely(ring_space(ring) < bd_num))
- return -EBUSY;
/* manual split the send packet */
new_skb = skb_copy(skb, GFP_ATOMIC);
if (!new_skb)
@@ -1241,6 +1267,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
dev_kfree_skb_any(skb);
*out_skb = new_skb;
+ bd_num = hns3_nic_bd_num(new_skb);
+ if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_TSO) ||
+ (!skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_NORMAL))
+ return -ENOMEM;
+
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_copy++;
u64_stats_update_end(&ring->syncp);
@@ -1290,7 +1321,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
&tx_ring_data(priv, skb->queue_mapping);
struct hns3_enet_ring *ring = ring_data->ring;
struct netdev_queue *dev_queue;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int next_to_use_head;
int buf_num;
int seg_num;
@@ -1314,9 +1345,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
u64_stats_update_end(&ring->syncp);
}
- if (net_ratelimit())
- netdev_err(netdev, "xmit error: %d!\n", buf_num);
-
+ hns3_rl_err(netdev, "xmit error: %d!\n", buf_num);
goto out_err_tx_ok;
}
@@ -1482,7 +1511,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
tx_bytes += ring->stats.tx_bytes;
tx_pkts += ring->stats.tx_pkts;
tx_drop += ring->stats.sw_err_cnt;
+ tx_drop += ring->stats.tx_vlan_err;
+ tx_drop += ring->stats.tx_l4_proto_err;
+ tx_drop += ring->stats.tx_l2l3l4_err;
+ tx_drop += ring->stats.tx_tso_err;
tx_errors += ring->stats.sw_err_cnt;
+ tx_errors += ring->stats.tx_vlan_err;
+ tx_errors += ring->stats.tx_l4_proto_err;
+ tx_errors += ring->stats.tx_l2l3l4_err;
+ tx_errors += ring->stats.tx_tso_err;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
@@ -1550,6 +1587,8 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
h = hns3_get_handle(netdev);
kinfo = &h->kinfo;
+ netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
+
return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
}
@@ -1593,6 +1632,10 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
struct hnae3_handle *h = hns3_get_handle(netdev);
int ret = -EIO;
+ netif_dbg(h, drv, netdev,
+ "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=%u\n",
+ vf, vlan, qos, vlan_proto);
+
if (h->ae_algo->ops->set_vf_vlan_filter)
ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
qos, vlan_proto);
@@ -1611,6 +1654,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP;
+ netif_dbg(h, drv, netdev,
+ "change mtu from %u to %d\n", netdev->mtu, new_mtu);
+
ret = h->ae_algo->ops->set_mtu(h, new_mtu);
if (ret)
netdev_err(netdev, "failed to change MTU in hardware %d\n",
@@ -1680,15 +1726,12 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
/* When mac received many pause frames continuous, it's unable to send
* packets, which may cause tx timeout
*/
- if (h->ae_algo->ops->update_stats &&
- h->ae_algo->ops->get_mac_pause_stats) {
- u64 tx_pause_cnt, rx_pause_cnt;
+ if (h->ae_algo->ops->get_mac_stats) {
+ struct hns3_mac_stats mac_stats;
- h->ae_algo->ops->update_stats(h, &ndev->stats);
- h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
- &rx_pause_cnt);
+ h->ae_algo->ops->get_mac_stats(h, &mac_stats);
netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
- tx_pause_cnt, rx_pause_cnt);
+ mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
}
hw_head = readl_relaxed(tx_ring->tqp->io_base +
@@ -1963,7 +2006,7 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
ops = ae_dev->ops;
/* request the reset */
- if (ops->reset_event) {
+ if (ops->reset_event && ops->get_reset_level) {
if (ae_dev->hw_err_reset_req) {
reset_type = ops->get_reset_level(ae_dev,
&ae_dev->hw_err_reset_req);
@@ -2067,7 +2110,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb)
{
- unsigned int order = hnae3_page_order(ring);
+ unsigned int order = hns3_page_order(ring);
struct page *p;
p = dev_alloc_pages(order);
@@ -2078,7 +2121,7 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->page_offset = 0;
cb->reuse_flag = 0;
cb->buf = page_address(p);
- cb->length = hnae3_page_size(ring);
+ cb->length = hns3_page_size(ring);
cb->type = DESC_TYPE_PAGE;
return 0;
@@ -2357,8 +2400,9 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
- netdev_err(ring->tqp->handle->kinfo.netdev,
- "hnae reserve buffer map failed.\n");
+ hns3_rl_err(ring->tqp_vector->napi.dev,
+ "alloc rx buffer failed: %d\n",
+ ret);
break;
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
@@ -2381,7 +2425,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
{
struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
int size = le16_to_cpu(desc->rx.size);
- u32 truesize = hnae3_buf_size(ring);
+ u32 truesize = hns3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
@@ -2396,7 +2440,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
/* Move offset up to the next cache line */
desc_cb->page_offset += truesize;
- if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
+ if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
desc_cb->reuse_flag = 1;
/* Bump ref count on page before it is given */
get_page(desc_cb->priv);
@@ -2443,9 +2487,9 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
&iph->daddr, 0);
} else {
- netdev_err(skb->dev,
- "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
- be16_to_cpu(type), depth);
+ hns3_rl_err(skb->dev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
+ be16_to_cpu(type), depth);
return -EFAULT;
}
@@ -2587,7 +2631,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
skb = ring->skb;
if (unlikely(!skb)) {
- netdev_err(netdev, "alloc rx skb fail\n");
+ hns3_rl_err(netdev, "alloc rx skb fail\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
@@ -2662,8 +2706,8 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
HNS3_RX_HEAD_SIZE);
if (unlikely(!new_skb)) {
- netdev_err(ring->tqp->handle->kinfo.netdev,
- "alloc rx skb frag fail\n");
+ hns3_rl_err(ring->tqp_vector->napi.dev,
+ "alloc rx fraglist skb fail\n");
return -ENXIO;
}
ring->frag_num = 0;
@@ -2678,7 +2722,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
}
if (ring->tail_skb) {
- head_skb->truesize += hnae3_buf_size(ring);
+ head_skb->truesize += hns3_buf_size(ring);
head_skb->data_len += le16_to_cpu(desc->rx.size);
head_skb->len += le16_to_cpu(desc->rx.size);
skb = ring->tail_skb;
@@ -2895,24 +2939,22 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
- int recv_pkts, recv_bds, clean_count, err;
int unused_count = hns3_desc_unused(ring);
struct sk_buff *skb = ring->skb;
- int num;
+ int recv_pkts = 0;
+ int recv_bds = 0;
+ int err, num;
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
rmb(); /* Make sure num taken effect before the other data is touched */
- recv_pkts = 0, recv_bds = 0, clean_count = 0;
num -= unused_count;
unused_count -= ring->pending_buf;
while (recv_pkts < budget && recv_bds < num) {
/* Reuse or realloc buffers */
- if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
- hns3_nic_alloc_rx_buffers(ring,
- clean_count + unused_count);
- clean_count = 0;
+ if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
unused_count = hns3_desc_unused(ring) -
ring->pending_buf;
}
@@ -2926,7 +2968,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
goto out;
} else if (unlikely(err)) { /* Do jump the err */
recv_bds += ring->pending_buf;
- clean_count += ring->pending_buf;
+ unused_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
continue;
@@ -2934,7 +2976,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
rx_fn(ring, skb);
recv_bds += ring->pending_buf;
- clean_count += ring->pending_buf;
+ unused_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
@@ -2943,8 +2985,8 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
out:
/* Make all data has been write before submit */
- if (clean_count + unused_count > 0)
- hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count);
+ if (unused_count > 0)
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
return recv_pkts;
}
@@ -3574,7 +3616,7 @@ out:
return ret;
}
-static void hns3_fini_ring(struct hns3_enet_ring *ring)
+void hns3_fini_ring(struct hns3_enet_ring *ring)
{
hns3_free_desc(ring);
devm_kfree(ring_to_dev(ring), ring->desc_cb);
@@ -4165,8 +4207,8 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
static void hns3_store_coal(struct hns3_nic_priv *priv)
{
/* ethtool only support setting and querying one coal
- * configuation for now, so save the vector 0' coal
- * configuation here in order to restore it.
+ * configuration for now, so save the vector 0' coal
+ * configuration here in order to restore it.
*/
memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
sizeof(struct hns3_enet_coalesce));
@@ -4378,6 +4420,9 @@ int hns3_set_channels(struct net_device *netdev,
u16 org_tqp_num;
int ret;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
if (ch->rx_count || ch->tx_count)
return -EINVAL;
@@ -4392,6 +4437,10 @@ int hns3_set_channels(struct net_device *netdev,
if (kinfo->rss_size == new_tqp_num)
return 0;
+ netif_dbg(h, drv, netdev,
+ "set channels: tqp_num=%u, rxfh=%d\n",
+ new_tqp_num, rxfh_configured);
+
ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
if (ret)
return ret;
@@ -4421,12 +4470,36 @@ int hns3_set_channels(struct net_device *netdev,
return hns3_reset_notify(h, HNAE3_UP_CLIENT);
}
+static const struct hns3_hw_error_info hns3_hw_err[] = {
+ { .type = HNAE3_PPU_POISON_ERROR,
+ .msg = "PPU poison" },
+ { .type = HNAE3_CMDQ_ECC_ERROR,
+ .msg = "IMP CMDQ error" },
+ { .type = HNAE3_IMP_RD_POISON_ERROR,
+ .msg = "IMP RD poison" },
+};
+
+static void hns3_process_hw_error(struct hnae3_handle *handle,
+ enum hnae3_hw_error_type type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
+ if (hns3_hw_err[i].type == type) {
+ dev_err(&handle->pdev->dev, "Detected %s!\n",
+ hns3_hw_err[i].msg);
+ break;
+ }
+ }
+}
+
static const struct hnae3_client_ops client_ops = {
.init_instance = hns3_client_init,
.uninit_instance = hns3_client_uninit,
.link_status_change = hns3_link_status_change,
.setup_tc = hns3_client_setup_tc,
.reset_notify = hns3_reset_notify,
+ .process_hw_error = hns3_process_hw_error,
};
/* hns3_init_module - Driver registration routine
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 848b866761df..2110fa3b4479 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -75,7 +75,7 @@ enum hns3_nic_state {
#define HNS3_TX_TIMEOUT (5 * HZ)
#define HNS3_RING_NAME_LEN 16
#define HNS3_BUFFER_SIZE_2048 2048
-#define HNS3_RING_MAX_PENDING 32768
+#define HNS3_RING_MAX_PENDING 32760
#define HNS3_RING_MIN_PENDING 24
#define HNS3_RING_BD_MULTIPLE 8
/* max frame size of mac */
@@ -195,7 +195,8 @@ enum hns3_nic_state {
#define HNS3_VECTOR_INITED 1
#define HNS3_MAX_BD_SIZE 65535
-#define HNS3_MAX_BD_PER_FRAG 8
+#define HNS3_MAX_BD_NUM_NORMAL 8
+#define HNS3_MAX_BD_NUM_TSO 63
#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
#define HNS3_VECTOR_GL0_OFFSET 0x100
@@ -301,7 +302,7 @@ struct hns3_desc_cb {
dma_addr_t dma; /* dma address of this desc */
void *buf; /* cpu addr for a desc */
- /* priv data for the desc, e.g. skb when use with ip stack*/
+ /* priv data for the desc, e.g. skb when use with ip stack */
void *priv;
u32 page_offset;
u32 length; /* length of the buffer */
@@ -324,11 +325,11 @@ enum hns3_pkt_l3type {
HNS3_L3_TYPE_MAC_PAUSE,
HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/
- /* reserved for 0xA~0xB*/
+ /* reserved for 0xA~0xB */
HNS3_L3_TYPE_CNM = 0xc,
- /* reserved for 0xD~0xE*/
+ /* reserved for 0xD~0xE */
HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */
};
@@ -353,7 +354,7 @@ enum hns3_pkt_ol3type {
HNS3_OL3_TYPE_IPV4_OPT = 4,
HNS3_OL3_TYPE_IPV6_EXT,
- /* reserved for 0x6~0xE*/
+ /* reserved for 0x6~0xE */
HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */
};
@@ -377,6 +378,10 @@ struct ring_stats {
u64 restart_queue;
u64 tx_busy;
u64 tx_copy;
+ u64 tx_vlan_err;
+ u64 tx_l4_proto_err;
+ u64 tx_l2l3l4_err;
+ u64 tx_tso_err;
};
struct {
u64 rx_pkts;
@@ -547,6 +552,11 @@ union l4_hdr_info {
unsigned char *hdr;
};
+struct hns3_hw_error_info {
+ enum hnae3_hw_error_type type;
+ const char *msg;
+};
+
static inline int ring_space(struct hns3_enet_ring *ring)
{
/* This smp_load_acquire() pairs with smp_store_release() in
@@ -608,9 +618,18 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
-#define hnae3_buf_size(_ring) ((_ring)->buf_size)
-#define hnae3_page_order(_ring) (get_order(hnae3_buf_size(_ring)))
-#define hnae3_page_size(_ring) (PAGE_SIZE << (u32)hnae3_page_order(_ring))
+#define hns3_buf_size(_ring) ((_ring)->buf_size)
+
+static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->buf_size > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
@@ -633,6 +652,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
+void hns3_fini_ring(struct hns3_enet_ring *ring);
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
bool hns3_is_phys_func(struct pci_dev *pdev);
int hns3_clean_rx_ring(
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 5bff98a9b0dc..c52eccc1621d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -30,6 +30,10 @@ static const struct hns3_stats hns3_txq_stats[] = {
HNS3_TQP_STAT("wake", restart_queue),
HNS3_TQP_STAT("busy", tx_busy),
HNS3_TQP_STAT("copy", tx_copy),
+ HNS3_TQP_STAT("vlan_err", tx_vlan_err),
+ HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
+ HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
+ HNS3_TQP_STAT("tso_err", tx_tso_err),
};
#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
@@ -55,7 +59,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
-#define HNS3_SELF_TEST_TYPE_NUM 3
+#define HNS3_SELF_TEST_TYPE_NUM 4
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
@@ -85,6 +89,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
case HNAE3_LOOP_SERIAL_SERDES:
case HNAE3_LOOP_PARALLEL_SERDES:
case HNAE3_LOOP_APP:
+ case HNAE3_LOOP_PHY:
ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
default:
@@ -92,7 +97,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
break;
}
- if (ret)
+ if (ret || h->pdev->revision >= 0x21)
return ret;
if (en) {
@@ -139,7 +144,10 @@ static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
static void hns3_lp_setup_skb(struct sk_buff *skb)
{
+#define HNS3_NIC_LB_DST_MAC_ADDR 0x1f
+
struct net_device *ndev = skb->dev;
+ struct hnae3_handle *handle;
unsigned char *packet;
struct ethhdr *ethh;
unsigned int i;
@@ -155,7 +163,9 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
* before the packet reaches mac or serdes, which will defect
* the purpose of mac or serdes selftest.
*/
- ethh->h_dest[5] += 0x1f;
+ handle = hns3_get_handle(ndev);
+ if (handle->pdev->revision == 0x20)
+ ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
skb_reset_mac_header(skb);
@@ -311,6 +321,8 @@ static void hns3_self_test(struct net_device *ndev,
if (eth_test->flags != ETH_TEST_FL_OFFLINE)
return;
+ netif_dbg(h, drv, ndev, "self test start");
+
st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
st_param[HNAE3_LOOP_APP][1] =
h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
@@ -324,6 +336,10 @@ static void hns3_self_test(struct net_device *ndev,
st_param[HNAE3_LOOP_PARALLEL_SERDES][1] =
h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+ st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY;
+ st_param[HNAE3_LOOP_PHY][1] =
+ h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
+
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
@@ -374,6 +390,8 @@ static void hns3_self_test(struct net_device *ndev,
if (if_running)
ndev->netdev_ops->ndo_open(ndev);
+
+ netif_dbg(h, drv, ndev, "self test end\n");
}
static int hns3_get_sset_count(struct net_device *netdev, int stringset)
@@ -527,6 +545,7 @@ static void hns3_get_drvinfo(struct net_device *netdev,
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
+ u32 fw_version;
if (!h->ae_algo->ops->get_fw_version) {
netdev_err(netdev, "could not get fw version!\n");
@@ -545,8 +564,18 @@ static void hns3_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->bus_info));
drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
- priv->ae_handle->ae_algo->ops->get_fw_version(h));
+ fw_version = priv->ae_handle->ae_algo->ops->get_fw_version(h);
+
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%lu.%lu.%lu.%lu",
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
}
static u32 hns3_get_link(struct net_device *netdev)
@@ -593,6 +622,10 @@ static int hns3_set_pauseparam(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ netif_dbg(h, drv, netdev,
+ "set pauseparam: autoneg=%u, rx:%u, tx:%u\n",
+ param->autoneg, param->rx_pause, param->tx_pause);
+
if (h->ae_algo->ops->set_pauseparam)
return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
param->rx_pause,
@@ -612,7 +645,7 @@ static void hns3_get_ksettings(struct hnae3_handle *h,
&cmd->base.speed,
&cmd->base.duplex);
- /* 2.get link mode*/
+ /* 2.get link mode */
if (ops->get_link_mode)
ops->get_link_mode(h,
cmd->link_modes.supported,
@@ -681,7 +714,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
return 0;
}
-static int hns3_check_ksettings_param(struct net_device *netdev,
+static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
@@ -726,12 +759,17 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
- int ret = 0;
+ int ret;
/* Chip don't support this mode. */
if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL;
+ netif_dbg(handle, drv, netdev,
+ "set link(%s): autoneg=%u, speed=%u, duplex=%u\n",
+ netdev->phydev ? "phy" : "mac",
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
+
/* Only support ksettings_set for netdev with phy attached for now */
if (netdev->phydev)
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
@@ -843,8 +881,8 @@ static int hns3_get_rxnfc(struct net_device *netdev,
}
}
-static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
- u32 tx_desc_num, u32 rx_desc_num)
+static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
+ u32 tx_desc_num, u32 rx_desc_num)
{
struct hnae3_handle *h = priv->ae_handle;
int i;
@@ -857,21 +895,29 @@ static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num =
rx_desc_num;
}
-
- return hns3_init_all_ring(priv);
}
-static int hns3_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *param)
+static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
{
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
- bool if_running = netif_running(ndev);
- u32 old_tx_desc_num, new_tx_desc_num;
- u32 old_rx_desc_num, new_rx_desc_num;
- int queue_num = h->kinfo.num_tqps;
- int ret;
+ struct hnae3_handle *handle = priv->ae_handle;
+ struct hns3_enet_ring *tmp_rings;
+ int i;
+
+ tmp_rings = kcalloc(handle->kinfo.num_tqps * 2,
+ sizeof(struct hns3_enet_ring), GFP_KERNEL);
+ if (!tmp_rings)
+ return NULL;
+ for (i = 0; i < handle->kinfo.num_tqps * 2; i++)
+ memcpy(&tmp_rings[i], priv->ring_data[i].ring,
+ sizeof(struct hns3_enet_ring));
+
+ return tmp_rings;
+}
+
+static int hns3_check_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *param)
+{
if (hns3_nic_resetting(ndev))
return -EBUSY;
@@ -887,6 +933,25 @@ static int hns3_set_ringparam(struct net_device *ndev,
return -EINVAL;
}
+ return 0;
+}
+
+static int hns3_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *param)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hns3_enet_ring *tmp_rings;
+ bool if_running = netif_running(ndev);
+ u32 old_tx_desc_num, new_tx_desc_num;
+ u32 old_rx_desc_num, new_rx_desc_num;
+ u16 queue_num = h->kinfo.num_tqps;
+ int ret, i;
+
+ ret = hns3_check_ringparam(ndev, param);
+ if (ret)
+ return ret;
+
/* Hardware requires that its descriptors must be multiple of eight */
new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
@@ -896,6 +961,13 @@ static int hns3_set_ringparam(struct net_device *ndev,
old_rx_desc_num == new_rx_desc_num)
return 0;
+ tmp_rings = hns3_backup_ringparam(priv);
+ if (!tmp_rings) {
+ netdev_err(ndev,
+ "backup ring param failed by allocating memory fail\n");
+ return -ENOMEM;
+ }
+
netdev_info(ndev,
"Changing Tx/Rx ring depth from %d/%d to %d/%d\n",
old_tx_desc_num, old_rx_desc_num,
@@ -904,22 +976,24 @@ static int hns3_set_ringparam(struct net_device *ndev,
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
- ret = hns3_uninit_all_ring(priv);
- if (ret)
- return ret;
-
- ret = hns3_change_all_ring_bd_num(priv, new_tx_desc_num,
- new_rx_desc_num);
+ hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
+ ret = hns3_init_all_ring(priv);
if (ret) {
- ret = hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
- old_rx_desc_num);
- if (ret) {
- netdev_err(ndev,
- "Revert to old bd num fail, ret=%d.\n", ret);
- return ret;
- }
+ netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n",
+ ret);
+
+ hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
+ old_rx_desc_num);
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
+ memcpy(priv->ring_data[i].ring, &tmp_rings[i],
+ sizeof(struct hns3_enet_ring));
+ } else {
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
+ hns3_fini_ring(&tmp_rings[i]);
}
+ kfree(tmp_rings);
+
if (if_running)
ret = ndev->netdev_ops->ndo_open(ndev);
@@ -973,6 +1047,9 @@ static int hns3_nway_reset(struct net_device *netdev)
return -EINVAL;
}
+ netif_dbg(handle, drv, netdev,
+ "nway reset (using %s)\n", phy ? "phy" : "mac");
+
if (phy)
return genphy_restart_aneg(phy);
@@ -1297,6 +1374,9 @@ static int hns3_set_fecparam(struct net_device *netdev,
if (!ops->set_fec)
return -EOPNOTSUPP;
fec_mode = eth_to_loc_fec(fec->fec);
+
+ netif_dbg(handle, drv, netdev, "set fecparam: mode=%u\n", fec_mode);
+
return ops->set_fec(handle, fec_mode);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 22f6acd45d9a..ecf58cfd253d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -103,14 +103,17 @@ static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
dma_addr_t dma = ring->desc_dma_addr;
struct hclge_dev *hdev = ring->dev;
struct hclge_hw *hw = &hdev->hw;
+ u32 reg_val;
if (ring->ring_type == HCLGE_TYPE_CSQ) {
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
lower_32_bits(dma));
hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
upper_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
- ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
+ reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ reg_val &= HCLGE_NIC_SW_RST_RDY;
+ reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
} else {
@@ -383,6 +386,23 @@ err_csq:
return ret;
}
+static int hclge_firmware_compat_config(struct hclge_dev *hdev)
+{
+ struct hclge_firmware_compat_cmd *req;
+ struct hclge_desc desc;
+ u32 compat = 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_COMPAT_CFG, false);
+
+ req = (struct hclge_firmware_compat_cmd *)desc.data;
+
+ hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
+ req->compat = cpu_to_le32(compat);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
int hclge_cmd_init(struct hclge_dev *hdev)
{
u32 version;
@@ -419,7 +439,24 @@ int hclge_cmd_init(struct hclge_dev *hdev)
}
hdev->fw_version = version;
- dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+ dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ /* ask the firmware to enable some features, driver can work without
+ * it.
+ */
+ ret = hclge_firmware_compat_config(hdev);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "Firmware compatible features not enabled(%d).\n",
+ ret);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 96840d8f3e24..4821fe08b5e4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -86,6 +86,8 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
HCLGE_OPC_GET_CFG_PARAM = 0x0025,
+ HCLGE_OPC_PF_RST_DONE = 0x0026,
+ HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
HCLGE_OPC_STATS_64_BIT = 0x0030,
HCLGE_OPC_STATS_32_BIT = 0x0031,
@@ -221,6 +223,9 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
+ /* MAC VLAN commands */
+ HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033,
+
/* VLAN commands */
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
@@ -257,6 +262,7 @@ enum hclge_opcode_type {
/* M7 stats command */
HCLGE_OPC_M7_STATS_BD = 0x7012,
HCLGE_OPC_M7_STATS_INFO = 0x7013,
+ HCLGE_OPC_M7_COMPAT_CFG = 0x701A,
/* SFP command */
HCLGE_OPC_GET_SFP_INFO = 0x7104,
@@ -586,6 +592,12 @@ struct hclge_config_mac_mode_cmd {
u8 rsv[20];
};
+struct hclge_pf_rst_sync_cmd {
+#define HCLGE_PF_RST_ALL_VF_RDY_B 0
+ u8 all_vf_ready;
+ u8 rsv[23];
+};
+
#define HCLGE_CFG_SPEED_S 0
#define HCLGE_CFG_SPEED_M GENMASK(5, 0)
@@ -762,6 +774,31 @@ struct hclge_vlan_filter_vf_cfg_cmd {
u8 vf_bitmap[16];
};
+#define HCLGE_SWITCH_ANTI_SPOOF_B 0U
+#define HCLGE_SWITCH_ALW_LPBK_B 1U
+#define HCLGE_SWITCH_ALW_LCL_LPBK_B 2U
+#define HCLGE_SWITCH_ALW_DST_OVRD_B 3U
+#define HCLGE_SWITCH_NO_MASK 0x0
+#define HCLGE_SWITCH_ANTI_SPOOF_MASK 0xFE
+#define HCLGE_SWITCH_ALW_LPBK_MASK 0xFD
+#define HCLGE_SWITCH_ALW_LCL_LPBK_MASK 0xFB
+#define HCLGE_SWITCH_LW_DST_OVRD_MASK 0xF7
+
+struct hclge_mac_vlan_switch_cmd {
+ u8 roce_sel;
+ u8 rsv1[3];
+ __le32 func_id;
+ u8 switch_param;
+ u8 rsv2[3];
+ u8 param_mask;
+ u8 rsv3[11];
+};
+
+enum hclge_mac_vlan_cfg_sel {
+ HCLGE_MAC_VLAN_NIC_SEL = 0,
+ HCLGE_MAC_VLAN_ROCE_SEL,
+};
+
#define HCLGE_ACCEPT_TAG1_B 0
#define HCLGE_ACCEPT_UNTAG1_B 1
#define HCLGE_PORT_INS_TAG1_EN_B 2
@@ -827,7 +864,7 @@ struct hclge_mac_ethertype_idx_rd_cmd {
u8 flags;
u8 resp_code;
__le16 vlan_tag;
- u8 mac_add[6];
+ u8 mac_addr[6];
__le16 index;
__le16 ethter_type;
__le16 egress_port;
@@ -877,6 +914,13 @@ struct hclge_reset_cmd {
u8 rsv[22];
};
+#define HCLGE_PF_RESET_DONE_BIT BIT(0)
+
+struct hclge_pf_rst_done_cmd {
+ u8 pf_rst_done;
+ u8 rsv[23];
+};
+
#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0)
#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2)
#define HCLGE_CMD_SERDES_DONE_B BIT(0)
@@ -906,8 +950,11 @@ struct hclge_serdes_lb_cmd {
#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020
#define HCLGE_NIC_CRQ_TAIL_REG 0x27024
#define HCLGE_NIC_CRQ_HEAD_REG 0x27028
-#define HCLGE_NIC_CMQ_EN_B 16
-#define HCLGE_NIC_CMQ_ENABLE BIT(HCLGE_NIC_CMQ_EN_B)
+
+/* this bit indicates that the driver is ready for hardware reset */
+#define HCLGE_NIC_SW_RST_RDY_B 16
+#define HCLGE_NIC_SW_RST_RDY BIT(HCLGE_NIC_SW_RST_RDY_B)
+
#define HCLGE_NIC_CMQ_DESC_NUM 1024
#define HCLGE_NIC_CMQ_DESC_NUM_S 3
@@ -1009,6 +1056,13 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd {
u8 rsv[4];
};
+#define HCLGE_LINK_EVENT_REPORT_EN_B 0
+#define HCLGE_NCSI_ERROR_REPORT_EN_B 1
+struct hclge_firmware_compat_cmd {
+ __le32 compat;
+ u8 rsv[20];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index bac4ce13f6ae..816f92084138 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -198,9 +198,32 @@ static int hclge_client_setup_tc(struct hclge_dev *hdev)
return 0;
}
+static int hclge_notify_down_uinit(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+}
+
+static int hclge_notify_init_up(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
{
struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
bool map_changed = false;
u8 num_tc = 0;
@@ -215,11 +238,9 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
return ret;
if (map_changed) {
- ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
- if (ret)
- return ret;
+ netif_dbg(h, drv, netdev, "set ets\n");
- ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ ret = hclge_notify_down_uinit(hdev);
if (ret)
return ret;
}
@@ -239,11 +260,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
goto err_out;
- ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- if (ret)
- return ret;
-
- ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ ret = hclge_notify_init_up(hdev);
if (ret)
return ret;
}
@@ -254,10 +271,8 @@ err_out:
if (!map_changed)
return ret;
- if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT))
- return ret;
+ hclge_notify_init_up(hdev);
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
return ret;
}
@@ -300,6 +315,7 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
{
struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
u8 i, j, pfc_map, *prio_tc;
@@ -325,6 +341,10 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
hdev->tm_info.hw_pfc_map = pfc_map;
hdev->tm_info.pfc_en = pfc->pfc_en;
+ netif_dbg(h, drv, netdev,
+ "set pfc: pfc_en=%u, pfc_map=%u, num_tc=%u\n",
+ pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
+
hclge_tm_pfc_info_update(hdev);
return hclge_pause_setup_hw(hdev, false);
@@ -345,8 +365,11 @@ static u8 hclge_getdcbx(struct hnae3_handle *h)
static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
{
struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
struct hclge_dev *hdev = vport->back;
+ netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
+
/* No support for LLD_MANAGED modes or CEE */
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
(mode & DCB_CAP_DCBX_VER_CEE) ||
@@ -372,11 +395,7 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
if (ret)
return -EINVAL;
- ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
- if (ret)
- return ret;
-
- ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ ret = hclge_notify_down_uinit(hdev);
if (ret)
return ret;
@@ -398,17 +417,11 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
else
hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
- ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
- if (ret)
- return ret;
-
- return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ return hclge_notify_init_up(hdev);
err_out:
- if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT))
- return ret;
+ hclge_notify_init_up(hdev);
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index ab625c757a95..1c6b501fb7ca 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -4,32 +4,92 @@
#include <linux/device.h>
#include "hclge_debugfs.h"
-#include "hclge_cmd.h"
#include "hclge_main.h"
#include "hclge_tm.h"
#include "hnae3.h"
+static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
+ { .reg_type = "bios common",
+ .dfx_msg = &hclge_dbg_bios_common_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
+ .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
+ .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
+ { .reg_type = "ssu",
+ .dfx_msg = &hclge_dbg_ssu_reg_0[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
+ .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
+ .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
+ { .reg_type = "ssu",
+ .dfx_msg = &hclge_dbg_ssu_reg_1[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
+ .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
+ .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
+ { .reg_type = "ssu",
+ .dfx_msg = &hclge_dbg_ssu_reg_2[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
+ .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
+ .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
+ { .reg_type = "igu egu",
+ .dfx_msg = &hclge_dbg_igu_egu_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
+ .offset = HCLGE_DBG_DFX_IGU_OFFSET,
+ .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
+ { .reg_type = "rpu",
+ .dfx_msg = &hclge_dbg_rpu_reg_0[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
+ .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
+ { .reg_type = "rpu",
+ .dfx_msg = &hclge_dbg_rpu_reg_1[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
+ .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
+ { .reg_type = "ncsi",
+ .dfx_msg = &hclge_dbg_ncsi_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
+ .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
+ .cmd = HCLGE_OPC_DFX_NCSI_REG } },
+ { .reg_type = "rtc",
+ .dfx_msg = &hclge_dbg_rtc_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
+ .offset = HCLGE_DBG_DFX_RTC_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RTC_REG } },
+ { .reg_type = "ppp",
+ .dfx_msg = &hclge_dbg_ppp_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
+ .offset = HCLGE_DBG_DFX_PPP_OFFSET,
+ .cmd = HCLGE_OPC_DFX_PPP_REG } },
+ { .reg_type = "rcb",
+ .dfx_msg = &hclge_dbg_rcb_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
+ .offset = HCLGE_DBG_DFX_RCB_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RCB_REG } },
+ { .reg_type = "tqp",
+ .dfx_msg = &hclge_dbg_tqp_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
+ .offset = HCLGE_DBG_DFX_TQP_OFFSET,
+ .cmd = HCLGE_OPC_DFX_TQP_REG } },
+};
+
static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
{
- struct hclge_desc desc[4];
- int ret;
+#define HCLGE_GET_DFX_REG_TYPE_CNT 4
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
- desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
+ struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
+ int entries_per_desc;
+ int index;
+ int ret;
- ret = hclge_cmd_send(&hdev->hw, desc, 4);
- if (ret != HCLGE_CMD_EXEC_SUCCESS) {
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
+ if (ret) {
dev_err(&hdev->pdev->dev,
- "get dfx bdnum fail, status is %d.\n", ret);
+ "get dfx bdnum fail, ret = %d\n", ret);
return ret;
}
- return (int)desc[offset / 6].data[offset % 6];
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
+ index = offset % entries_per_desc;
+ return (int)desc[offset / entries_per_desc].data[index];
}
static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
@@ -50,35 +110,40 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
}
ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
- "read reg cmd send fail, status is %d.\n", ret);
- return ret;
- }
-
+ "cmd(0x%x) send fail, ret = %d\n", cmd, ret);
return ret;
}
static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
- struct hclge_dbg_dfx_message *dfx_message,
- const char *cmd_buf, int msg_num,
- int offset, enum hclge_opcode_type cmd)
+ struct hclge_dbg_reg_type_info *reg_info,
+ const char *cmd_buf)
{
-#define BD_DATA_NUM 6
+#define IDX_OFFSET 1
+ const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET];
+ struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
+ struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
struct hclge_desc *desc_src;
struct hclge_desc *desc;
+ int entries_per_desc;
int bd_num, buf_len;
+ int index = 0;
+ int min_num;
int ret, i;
- int index;
- int max;
- ret = kstrtouint(cmd_buf, 10, &index);
- index = (ret != 0) ? 0 : index;
+ if (*s) {
+ ret = kstrtouint(s, 0, &index);
+ index = (ret != 0) ? 0 : index;
+ }
- bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset);
- if (bd_num <= 0)
+ bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset);
+ if (bd_num <= 0) {
+ dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n",
+ reg_msg->offset, bd_num);
return;
+ }
buf_len = sizeof(struct hclge_desc) * bd_num;
desc_src = kzalloc(buf_len, GFP_KERNEL);
@@ -88,22 +153,23 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
}
desc = desc_src;
- ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, cmd);
- if (ret != HCLGE_CMD_EXEC_SUCCESS) {
+ ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
+ if (ret) {
kfree(desc_src);
return;
}
- max = (bd_num * BD_DATA_NUM) <= msg_num ?
- (bd_num * BD_DATA_NUM) : msg_num;
+ entries_per_desc = ARRAY_SIZE(desc->data);
+ min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num);
desc = desc_src;
- for (i = 0; i < max; i++) {
- ((i > 0) && ((i % BD_DATA_NUM) == 0)) ? desc++ : desc;
+ for (i = 0; i < min_num; i++) {
+ if (i > 0 && (i % entries_per_desc) == 0)
+ desc++;
if (dfx_message->flag)
dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
dfx_message->message,
- desc->data[i % BD_DATA_NUM]);
+ desc->data[i % entries_per_desc]);
dfx_message++;
}
@@ -213,95 +279,25 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
{
- int msg_num;
-
- if (strncmp(&cmd_buf[9], "bios common", 11) == 0) {
- msg_num = sizeof(hclge_dbg_bios_common_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg,
- &cmd_buf[21], msg_num,
- HCLGE_DBG_DFX_BIOS_OFFSET,
- HCLGE_OPC_DFX_BIOS_COMMON_REG);
- } else if (strncmp(&cmd_buf[9], "ssu", 3) == 0) {
- msg_num = sizeof(hclge_dbg_ssu_reg_0) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_SSU_0_OFFSET,
- HCLGE_OPC_DFX_SSU_REG_0);
-
- msg_num = sizeof(hclge_dbg_ssu_reg_1) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_SSU_1_OFFSET,
- HCLGE_OPC_DFX_SSU_REG_1);
-
- msg_num = sizeof(hclge_dbg_ssu_reg_2) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_SSU_2_OFFSET,
- HCLGE_OPC_DFX_SSU_REG_2);
- } else if (strncmp(&cmd_buf[9], "igu egu", 7) == 0) {
- msg_num = sizeof(hclge_dbg_igu_egu_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg,
- &cmd_buf[17], msg_num,
- HCLGE_DBG_DFX_IGU_OFFSET,
- HCLGE_OPC_DFX_IGU_EGU_REG);
- } else if (strncmp(&cmd_buf[9], "rpu", 3) == 0) {
- msg_num = sizeof(hclge_dbg_rpu_reg_0) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_RPU_0_OFFSET,
- HCLGE_OPC_DFX_RPU_REG_0);
-
- msg_num = sizeof(hclge_dbg_rpu_reg_1) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_RPU_1_OFFSET,
- HCLGE_OPC_DFX_RPU_REG_1);
- } else if (strncmp(&cmd_buf[9], "ncsi", 4) == 0) {
- msg_num = sizeof(hclge_dbg_ncsi_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg,
- &cmd_buf[14], msg_num,
- HCLGE_DBG_DFX_NCSI_OFFSET,
- HCLGE_OPC_DFX_NCSI_REG);
- } else if (strncmp(&cmd_buf[9], "rtc", 3) == 0) {
- msg_num = sizeof(hclge_dbg_rtc_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_RTC_OFFSET,
- HCLGE_OPC_DFX_RTC_REG);
- } else if (strncmp(&cmd_buf[9], "ppp", 3) == 0) {
- msg_num = sizeof(hclge_dbg_ppp_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_PPP_OFFSET,
- HCLGE_OPC_DFX_PPP_REG);
- } else if (strncmp(&cmd_buf[9], "rcb", 3) == 0) {
- msg_num = sizeof(hclge_dbg_rcb_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_RCB_OFFSET,
- HCLGE_OPC_DFX_RCB_REG);
- } else if (strncmp(&cmd_buf[9], "tqp", 3) == 0) {
- msg_num = sizeof(hclge_dbg_tqp_reg) /
- sizeof(struct hclge_dbg_dfx_message);
- hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg,
- &cmd_buf[13], msg_num,
- HCLGE_DBG_DFX_TQP_OFFSET,
- HCLGE_OPC_DFX_TQP_REG);
- } else if (strncmp(&cmd_buf[9], "dcb", 3) == 0) {
- hclge_dbg_dump_dcb(hdev, &cmd_buf[13]);
- } else {
+ struct hclge_dbg_reg_type_info *reg_info;
+ bool has_dump = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
+ reg_info = &hclge_dbg_reg_info[i];
+ if (!strncmp(cmd_buf, reg_info->reg_type,
+ strlen(reg_info->reg_type))) {
+ hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf);
+ has_dump = true;
+ }
+ }
+
+ if (strncmp(cmd_buf, "dcb", 3) == 0) {
+ hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
+ has_dump = true;
+ }
+
+ if (!has_dump) {
dev_info(&hdev->pdev->dev, "unknown command\n");
return;
}
@@ -325,11 +321,17 @@ static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
struct hclge_desc desc;
int i, ret;
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tc\n");
+ return;
+ }
+
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev, "dump tc fail, status is %d.\n", ret);
+ dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret);
return;
}
@@ -409,6 +411,12 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tm mapping\n");
+ return;
+ }
+
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -425,7 +433,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
return;
err_tm_pg_cmd_send:
- dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), status is %d\n",
+ dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n",
cmd, ret);
}
@@ -537,7 +545,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
return;
err_tm_cmd_send:
- dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), status is %d\n",
+ dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n",
cmd, ret);
}
@@ -556,7 +564,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
int pri_id, ret;
u32 i;
- ret = kstrtouint(&cmd_buf[12], 10, &queue_id);
+ ret = kstrtouint(cmd_buf, 0, &queue_id);
queue_id = (ret != 0) ? 0 : queue_id;
cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
@@ -590,6 +598,12 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n",
queue_id, qset_id, pri_id, tc_id);
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tm mapping\n");
+ return;
+ }
+
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
for (group_id = 0; group_id < 32; group_id++) {
@@ -620,7 +634,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
return;
err_tm_map_cmd_send:
- dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), status is %d\n",
+ dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n",
cmd, ret);
}
@@ -634,7 +648,7 @@ static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev, "dump checksum fail, status is %d.\n",
+ dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n",
ret);
return;
}
@@ -658,7 +672,7 @@ static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "dump qos pri map fail, status is %d.\n", ret);
+ "dump qos pri map fail, ret = %d\n", ret);
return;
}
@@ -715,6 +729,34 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
rx_buf_cmd->shared_buf);
+ cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev, "\n");
+ dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
+
+ cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev,
+ "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
+ dev_info(&hdev->pdev->dev, "\n");
+
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports rx priv wl\n");
+ return;
+ }
cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
@@ -723,7 +765,6 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
if (ret)
goto err_qos_cmd_send;
- dev_info(&hdev->pdev->dev, "\n");
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
@@ -733,7 +774,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
- "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
+ "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
@@ -755,37 +797,15 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
dev_info(&hdev->pdev->dev,
- "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
+ "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
rx_com_thrd->com_thrd[i].high,
rx_com_thrd->com_thrd[i].low);
-
- cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret)
- goto err_qos_cmd_send;
-
- rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
- dev_info(&hdev->pdev->dev, "\n");
- dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
- rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
-
- cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret)
- goto err_qos_cmd_send;
-
- rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
- dev_info(&hdev->pdev->dev,
- "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
- rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
-
return;
err_qos_cmd_send:
dev_err(&hdev->pdev->dev,
- "dump qos buf cfg fail(0x%x), status is %d\n", cmd, ret);
+ "dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret);
}
static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
@@ -825,9 +845,9 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
"%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
- req0->index, req0->mac_add[0], req0->mac_add[1],
- req0->mac_add[2], req0->mac_add[3], req0->mac_add[4],
- req0->mac_add[5]);
+ req0->index, req0->mac_addr[0], req0->mac_addr[1],
+ req0->mac_addr[2], req0->mac_addr[3],
+ req0->mac_addr[4], req0->mac_addr[5]);
snprintf(printf_buf + strlen(printf_buf),
HCLGE_DBG_BUF_LEN - strlen(printf_buf),
@@ -883,14 +903,17 @@ static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
sel_x ? "x" : "y", loc);
+ /* tcam_data0 ~ tcam_data1 */
req = (u32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+ /* tcam_data2 ~ tcam_data7 */
req = (u32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+ /* tcam_data8 ~ tcam_data12 */
req = (u32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
dev_info(&hdev->pdev->dev, "%08x\n", *req++);
@@ -940,7 +963,7 @@ void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "get firmware statistics bd number failed, ret=%d\n",
+ "get firmware statistics bd number failed, ret = %d\n",
ret);
return;
}
@@ -961,7 +984,7 @@ void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
if (ret) {
kfree(desc_src);
dev_err(&hdev->pdev->dev,
- "get firmware statistics failed, ret=%d\n", ret);
+ "get firmware statistics failed, ret = %d\n", ret);
return;
}
@@ -981,6 +1004,33 @@ void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
kfree(desc_src);
}
+#define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
+
+static void hclge_ncl_config_data_print(struct hclge_dev *hdev,
+ struct hclge_desc *desc, int *offset,
+ int *length)
+{
+#define HCLGE_CMD_DATA_NUM 6
+
+ int i;
+ int j;
+
+ for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
+ for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
+ if (i == 0 && j == 0)
+ continue;
+
+ dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
+ *offset,
+ le32_to_cpu(desc[i].data[j]));
+ *offset += sizeof(u32);
+ *length -= sizeof(u32);
+ if (*length <= 0)
+ return;
+ }
+ }
+}
+
/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
* @hdev: pointer to struct hclge_dev
* @cmd_buf: string that contains offset and length
@@ -990,17 +1040,13 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
{
#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
-#define HCLGE_CMD_DATA_NUM 6
- struct hclge_desc desc[5];
- u32 byte_offset;
- int bd_num = 5;
+ struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
+ int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
int offset;
int length;
int data0;
int ret;
- int i;
- int j;
ret = sscanf(cmd_buf, "%x %x", &offset, &length);
if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
@@ -1026,22 +1072,7 @@ static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
if (ret)
return;
- byte_offset = offset;
- for (i = 0; i < bd_num; i++) {
- for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
- if (i == 0 && j == 0)
- continue;
-
- dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
- byte_offset,
- le32_to_cpu(desc[i].data[j]));
- byte_offset += sizeof(u32);
- length -= sizeof(u32);
- if (length <= 0)
- return;
- }
- }
- offset += HCLGE_MAX_NCL_CONFIG_LENGTH;
+ hclge_ncl_config_data_print(hdev, desc, &offset, &length);
}
}
@@ -1067,6 +1098,9 @@ static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
{
+#define DUMP_REG "dump reg"
+#define DUMP_TM_MAP "dump tm map"
+
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -1074,8 +1108,8 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
hclge_dbg_fd_tcam(hdev);
} else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
hclge_dbg_dump_tc(hdev);
- } else if (strncmp(cmd_buf, "dump tm map", 11) == 0) {
- hclge_dbg_dump_tm_map(hdev, cmd_buf);
+ } else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) {
+ hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]);
} else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
hclge_dbg_dump_tm(hdev);
} else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
@@ -1086,8 +1120,8 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
hclge_dbg_dump_qos_buf_cfg(hdev);
} else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
hclge_dbg_dump_mng_table(hdev);
- } else if (strncmp(cmd_buf, "dump reg", 8) == 0) {
- hclge_dbg_dump_reg_cmd(hdev, cmd_buf);
+ } else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) {
+ hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
} else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
hclge_dbg_dump_rst_info(hdev);
} else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index d055fda41775..38b79321c4c4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -4,6 +4,9 @@
#ifndef __HCLGE_DEBUGFS_H
#define __HCLGE_DEBUGFS_H
+#include <linux/etherdevice.h>
+#include "hclge_cmd.h"
+
#define HCLGE_DBG_BUF_LEN 256
#define HCLGE_DBG_MNG_TBL_MAX 64
@@ -63,9 +66,23 @@ struct hclge_dbg_bitmap_cmd {
};
};
+struct hclge_dbg_reg_common_msg {
+ int msg_num;
+ int offset;
+ enum hclge_opcode_type cmd;
+};
+
+#define HCLGE_DBG_MAX_DFX_MSG_LEN 60
struct hclge_dbg_dfx_message {
int flag;
- char message[60];
+ char message[HCLGE_DBG_MAX_DFX_MSG_LEN];
+};
+
+#define HCLGE_DBG_MAC_REG_TYPE_LEN 32
+struct hclge_dbg_reg_type_info {
+ const char *reg_type;
+ struct hclge_dbg_dfx_message *dfx_msg;
+ struct hclge_dbg_reg_common_msg reg_msg;
};
#pragma pack()
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 0a7243825e7b..58c6231aaa00 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -637,8 +637,8 @@ static void hclge_log_error(struct device *dev, char *reg,
{
while (err->msg) {
if (err->int_msk & err_sts) {
- dev_warn(dev, "%s %s found [error status=0x%x]\n",
- reg, err->msg, err_sts);
+ dev_err(dev, "%s %s found [error status=0x%x]\n",
+ reg, err->msg, err_sts);
if (err->reset_level &&
err->reset_level != HNAE3_NONE_RESET)
set_bit(err->reset_level, reset_requests);
@@ -652,16 +652,11 @@ static void hclge_log_error(struct device *dev, char *reg,
* @desc: descriptor for describing the command
* @cmd: command opcode
* @flag: flag for extended command structure
- * @w_num: offset for setting the read interrupt type.
- * @int_type: select which type of the interrupt for which the error
- * info will be read(RAS-CE/RAS-NFE/RAS-FE etc).
*
* This function query the error info from hw register/s using command
*/
static int hclge_cmd_query_error(struct hclge_dev *hdev,
- struct hclge_desc *desc, u32 cmd,
- u16 flag, u8 w_num,
- enum hclge_err_int_type int_type)
+ struct hclge_desc *desc, u32 cmd, u16 flag)
{
struct device *dev = &hdev->pdev->dev;
int desc_num = 1;
@@ -673,8 +668,6 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
desc_num = 2;
}
- if (w_num)
- desc[0].data[w_num] = cpu_to_le32(int_type);
ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
if (ret)
@@ -872,8 +865,7 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
}
/* configure TM QCN hw errors */
- ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG,
- 0, 0, 0);
+ ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, 0);
if (ret) {
dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret);
return ret;
@@ -938,32 +930,44 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
/* configure PPU error interrupts */
if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
- desc[0].flag |= HCLGE_CMD_FLAG_NEXT;
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
if (en) {
- desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN;
- desc[0].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN;
- desc[1].data[3] = HCLGE_PPU_MPF_ABNORMAL_INT3_EN;
- desc[1].data[4] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN;
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN);
+ desc[1].data[3] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN);
+ desc[1].data[4] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN);
}
- desc[1].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK;
- desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
- desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
- desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
+ desc[1].data[0] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK);
+ desc[1].data[1] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK);
+ desc[1].data[2] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK);
+ desc[1].data[3] |=
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK);
desc_num = 2;
} else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
if (en)
- desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2;
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2);
- desc[0].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK;
+ desc[0].data[2] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK);
} else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
if (en)
- desc[0].data[0] = HCLGE_PPU_PF_ABNORMAL_INT_EN;
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN);
- desc[0].data[2] = HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK;
+ desc[0].data[2] =
+ cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK);
} else {
dev_err(dev, "Invalid cmd to configure PPU error interrupts\n");
return -EINVAL;
@@ -1171,8 +1175,8 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
if (status) {
- dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
- status);
+ dev_err(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
+ status);
set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
@@ -1208,8 +1212,8 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
desc_data = (__le32 *)&desc[5];
status = le32_to_cpu(*(desc_data + 1));
if (status) {
- dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
- "rpu_rx_pkt_ecc_mbit_err");
+ dev_err(dev,
+ "PPU_MPF_ABNORMAL_INT_ST1 rpu_rx_pkt_ecc_mbit_err found\n");
set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
}
@@ -1321,10 +1325,12 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
- if (status)
+ if (status) {
hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
&hclge_ppu_pf_abnormal_int[0], status,
&ae_dev->hw_err_reset_req);
+ hclge_report_hw_error(hdev, HNAE3_PPU_POISON_ERROR);
+ }
/* clear all PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
@@ -1387,17 +1393,17 @@ static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
return ret;
}
- dev_info(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
- le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
- le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
- le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
- dev_info(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
- le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]),
- le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]),
- le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5]));
- dev_info(dev, "AXI3: %08X %08X %08X %08X\n",
- le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]),
- le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
+ dev_err(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_err(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]),
+ le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]),
+ le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5]));
+ dev_err(dev, "AXI3: %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]),
+ le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
return 0;
}
@@ -1410,18 +1416,18 @@ static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
ret = hclge_cmd_query_error(hdev, &desc[0],
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD,
- HCLGE_CMD_FLAG_NEXT, 0, 0);
+ HCLGE_CMD_FLAG_NEXT);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret);
return ret;
}
- dev_info(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
- le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
- le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
- le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
- dev_info(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
- le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
+ dev_err(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_err(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
+ le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
return 0;
}
@@ -1434,7 +1440,7 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
/* read overflow error status */
ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD,
- 0, 0, 0);
+ 0);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
return ret;
@@ -1450,9 +1456,9 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
le32_to_cpu(desc[0].data[0]);
while (err->msg) {
if (err->int_msk == err_sts) {
- dev_warn(dev, "%s [error status=0x%x] found\n",
- err->msg,
- le32_to_cpu(desc[0].data[0]));
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg,
+ le32_to_cpu(desc[0].data[0]));
break;
}
err++;
@@ -1460,13 +1466,13 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
}
if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
- dev_warn(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
- le32_to_cpu(desc[0].data[1]));
+ dev_err(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[1]));
}
if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
- dev_warn(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
- le32_to_cpu(desc[0].data[2]));
+ dev_err(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[2]));
}
return 0;
@@ -1483,8 +1489,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
/* read RAS error interrupt status */
ret = hclge_cmd_query_error(hdev, &desc[0],
- HCLGE_QUERY_CLEAR_ROCEE_RAS_INT,
- 0, 0, 0);
+ HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, 0);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
/* reset everything for now */
@@ -1495,10 +1500,10 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) {
if (status & HCLGE_ROCEE_RERR_INT_MASK)
- dev_warn(dev, "ROCEE RAS AXI rresp error\n");
+ dev_err(dev, "ROCEE RAS AXI rresp error\n");
if (status & HCLGE_ROCEE_BERR_INT_MASK)
- dev_warn(dev, "ROCEE RAS AXI bresp error\n");
+ dev_err(dev, "ROCEE RAS AXI bresp error\n");
reset_type = HNAE3_FUNC_RESET;
@@ -1508,7 +1513,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
}
if (status & HCLGE_ROCEE_ECC_INT_MASK) {
- dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
+ dev_err(dev, "ROCEE RAS 2bit ECC error\n");
reset_type = HNAE3_GLOBAL_RESET;
ret = hclge_log_rocee_ecc_error(hdev);
@@ -1566,8 +1571,8 @@ int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
{
- enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
struct hclge_dev *hdev = ae_dev->priv;
+ enum hnae3_reset_type reset_type;
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
hdev->pdev->revision < 0x21)
@@ -1649,16 +1654,16 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
/* Handling Non-fatal HNS RAS errors */
if (status & HCLGE_RAS_REG_NFE_MASK) {
- dev_warn(dev,
- "HNS Non-Fatal RAS error(status=0x%x) identified\n",
- status);
+ dev_err(dev,
+ "HNS Non-Fatal RAS error(status=0x%x) identified\n",
+ status);
hclge_handle_all_ras_errors(hdev);
}
/* Handling Non-fatal Rocee RAS errors */
if (hdev->pdev->revision >= 0x21 &&
status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
- dev_warn(dev, "ROCEE Non-Fatal RAS error identified\n");
+ dev_err(dev, "ROCEE Non-Fatal RAS error identified\n");
hclge_handle_rocee_ras_error(ae_dev);
}
@@ -1737,8 +1742,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
return;
}
- dev_warn(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%d), queue_id(%d)\n",
- vf_id, q_id);
+ dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n",
+ vf_id, q_id);
if (vf_id) {
if (vf_id >= hdev->num_alloc_vport) {
@@ -1755,8 +1760,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
if (ret)
- dev_warn(dev, "inform reset to vf(%d) failed %d!\n",
- hdev->vport->vport_id, ret);
+ dev_err(dev, "inform reset to vf(%u) failed %d!\n",
+ hdev->vport->vport_id, ret);
} else {
set_bit(HNAE3_FUNC_RESET, reset_requests);
}
@@ -1802,8 +1807,8 @@ static int hclge_handle_mpf_msix_error(struct hclge_dev *hdev,
status = le32_to_cpu(*(desc_data + 2)) &
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
if (status)
- dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]",
- status);
+ dev_err(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]",
+ status);
/* clear all main PF MSIx errors */
ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num);
@@ -1997,7 +2002,7 @@ void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
/* Handle Non-fatal HNS RAS errors */
if (status & HCLGE_RAS_REG_NFE_MASK) {
- dev_warn(dev, "HNS hw error(RAS) identified during init\n");
+ dev_err(dev, "HNS hw error(RAS) identified during init\n");
hclge_handle_all_ras_errors(hdev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 7ea8bb28a0cb..876fd81ad2f1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -5,6 +5,7 @@
#define __HCLGE_ERR_H
#include "hclge_main.h"
+#include "hnae3.h"
#define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10
#define HCLGE_PF_RAS_INT_MIN_BD_NUM 4
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 3fde5471e1c0..2b65f2799846 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -35,6 +35,25 @@
#define BUF_RESERVE_PERCENT 90
#define HCLGE_RESET_MAX_FAIL_CNT 5
+#define HCLGE_RESET_SYNC_TIME 100
+#define HCLGE_PF_RESET_SYNC_TIME 20
+#define HCLGE_PF_RESET_SYNC_CNT 1500
+
+/* Get DFX BD number offset */
+#define HCLGE_DFX_BIOS_BD_OFFSET 1
+#define HCLGE_DFX_SSU_0_BD_OFFSET 2
+#define HCLGE_DFX_SSU_1_BD_OFFSET 3
+#define HCLGE_DFX_IGU_BD_OFFSET 4
+#define HCLGE_DFX_RPU_0_BD_OFFSET 5
+#define HCLGE_DFX_RPU_1_BD_OFFSET 6
+#define HCLGE_DFX_NCSI_BD_OFFSET 7
+#define HCLGE_DFX_RTC_BD_OFFSET 8
+#define HCLGE_DFX_PPP_BD_OFFSET 9
+#define HCLGE_DFX_RCB_BD_OFFSET 10
+#define HCLGE_DFX_TQP_BD_OFFSET 11
+#define HCLGE_DFX_SSU_2_BD_OFFSET 12
+
+#define HCLGE_LINK_STATUS_MS 10
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
@@ -317,6 +336,80 @@ static const u8 hclge_hash_key[] = {
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
};
+static const u32 hclge_dfx_bd_offset_list[] = {
+ HCLGE_DFX_BIOS_BD_OFFSET,
+ HCLGE_DFX_SSU_0_BD_OFFSET,
+ HCLGE_DFX_SSU_1_BD_OFFSET,
+ HCLGE_DFX_IGU_BD_OFFSET,
+ HCLGE_DFX_RPU_0_BD_OFFSET,
+ HCLGE_DFX_RPU_1_BD_OFFSET,
+ HCLGE_DFX_NCSI_BD_OFFSET,
+ HCLGE_DFX_RTC_BD_OFFSET,
+ HCLGE_DFX_PPP_BD_OFFSET,
+ HCLGE_DFX_RCB_BD_OFFSET,
+ HCLGE_DFX_TQP_BD_OFFSET,
+ HCLGE_DFX_SSU_2_BD_OFFSET
+};
+
+static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
+ HCLGE_OPC_DFX_BIOS_COMMON_REG,
+ HCLGE_OPC_DFX_SSU_REG_0,
+ HCLGE_OPC_DFX_SSU_REG_1,
+ HCLGE_OPC_DFX_IGU_EGU_REG,
+ HCLGE_OPC_DFX_RPU_REG_0,
+ HCLGE_OPC_DFX_RPU_REG_1,
+ HCLGE_OPC_DFX_NCSI_REG,
+ HCLGE_OPC_DFX_RTC_REG,
+ HCLGE_OPC_DFX_PPP_REG,
+ HCLGE_OPC_DFX_RCB_REG,
+ HCLGE_OPC_DFX_TQP_REG,
+ HCLGE_OPC_DFX_SSU_REG_2
+};
+
+static const struct key_info meta_data_key_info[] = {
+ { PACKET_TYPE_ID, 6},
+ { IP_FRAGEMENT, 1},
+ { ROCE_TYPE, 1},
+ { NEXT_KEY, 5},
+ { VLAN_NUMBER, 2},
+ { SRC_VPORT, 12},
+ { DST_VPORT, 12},
+ { TUNNEL_PACKET, 1},
+};
+
+static const struct key_info tuple_key_info[] = {
+ { OUTER_DST_MAC, 48},
+ { OUTER_SRC_MAC, 48},
+ { OUTER_VLAN_TAG_FST, 16},
+ { OUTER_VLAN_TAG_SEC, 16},
+ { OUTER_ETH_TYPE, 16},
+ { OUTER_L2_RSV, 16},
+ { OUTER_IP_TOS, 8},
+ { OUTER_IP_PROTO, 8},
+ { OUTER_SRC_IP, 32},
+ { OUTER_DST_IP, 32},
+ { OUTER_L3_RSV, 16},
+ { OUTER_SRC_PORT, 16},
+ { OUTER_DST_PORT, 16},
+ { OUTER_L4_RSV, 32},
+ { OUTER_TUN_VNI, 24},
+ { OUTER_TUN_FLOW_ID, 8},
+ { INNER_DST_MAC, 48},
+ { INNER_SRC_MAC, 48},
+ { INNER_VLAN_TAG_FST, 16},
+ { INNER_VLAN_TAG_SEC, 16},
+ { INNER_ETH_TYPE, 16},
+ { INNER_L2_RSV, 16},
+ { INNER_IP_TOS, 8},
+ { INNER_IP_PROTO, 8},
+ { INNER_SRC_IP, 32},
+ { INNER_DST_IP, 32},
+ { INNER_L3_RSV, 16},
+ { INNER_SRC_PORT, 16},
+ { INNER_DST_PORT, 16},
+ { INNER_L4_RSV, 32},
+};
+
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -364,9 +457,13 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
u16 i, k, n;
int ret;
- desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ /* This may be called inside atomic sections,
+ * so GFP_ATOMIC is more suitalbe here
+ */
+ desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
if (!desc)
return -ENOMEM;
+
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
if (ret) {
@@ -647,6 +744,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
count += 2;
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+
+ if (hdev->hw.mac.phydev) {
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
+ }
+
} else if (stringset == ETH_SS_STATS) {
count = ARRAY_SIZE(g_mac_stats_string) +
hclge_tqps_get_sset_count(handle, stringset);
@@ -702,14 +805,16 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
p = hclge_tqps_get_stats(handle, p);
}
-static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
- u64 *rx_cnt)
+static void hclge_get_mac_stat(struct hnae3_handle *handle,
+ struct hns3_mac_stats *mac_stats)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
- *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
+ hclge_update_stats(handle, NULL);
+
+ mac_stats->tx_pause_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
+ mac_stats->rx_pause_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
}
static int hclge_parse_func_status(struct hclge_dev *hdev,
@@ -1075,6 +1180,7 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
hclge_parse_backplane_link_mode(hdev, speed_ability);
}
+
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
struct hclge_cfg_param_cmd *req;
@@ -1270,6 +1376,12 @@ static int hclge_configure(struct hclge_dev *hdev)
hclge_init_kdump_kernel_config(hdev);
+ /* Set the init affinity based on pci func number */
+ i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
+ i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
+ cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
+ &hdev->affinity_mask);
+
return ret;
}
@@ -2499,22 +2611,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->mbx_service_task);
+ queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
+ &hdev->mbx_service_task);
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
- schedule_work(&hdev->rst_service_task);
+ queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
+ &hdev->rst_service_task);
}
-static void hclge_task_schedule(struct hclge_dev *hdev)
+void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
- (void)schedule_work(&hdev->service_task);
+ !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
+ hdev->hw_stats.stats_timer++;
+ hdev->fd_arfs_expire_timer++;
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ system_wq, &hdev->service_task,
+ delay_time);
+ }
}
static int hclge_get_mac_link_status(struct hclge_dev *hdev)
@@ -2729,25 +2848,6 @@ static int hclge_get_status(struct hnae3_handle *handle)
return hdev->hw.mac.link;
}
-static void hclge_service_timer(struct timer_list *t)
-{
- struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
-
- mod_timer(&hdev->service_timer, jiffies + HZ);
- hdev->hw_stats.stats_timer++;
- hdev->fd_arfs_expire_timer++;
- hclge_task_schedule(hdev);
-}
-
-static void hclge_service_complete(struct hclge_dev *hdev)
-{
- WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
-
- /* Flush memory before next watchdog */
- smp_mb__before_atomic();
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
-}
-
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
@@ -2763,9 +2863,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
* defer the processing of the mailbox events. Since, we would have not
* cleared RX CMDQ event this time we would receive again another
* interrupt from H/W just for the mailbox.
+ *
+ * check for vector0 reset event sources
*/
-
- /* check for vector0 reset event sources */
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
@@ -2882,10 +2982,15 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
break;
}
- /* clear the source of interrupt if it is not cause by reset */
+ hclge_clear_event_cause(hdev, event_cause, clearval);
+
+ /* Enable interrupt if it is not cause by reset. And when
+ * clearval equal to 0, it means interrupt status may be
+ * cleared by hardware before driver reads status register.
+ * For this case, vector0 interrupt also should be enabled.
+ */
if (!clearval ||
event_cause == HCLGE_VECTOR0_EVENT_MBX) {
- hclge_clear_event_cause(hdev, event_cause, clearval);
hclge_enable_vector(&hdev->misc_vector, true);
}
@@ -2918,6 +3023,36 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
hdev->num_msi_used += 1;
}
+static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
+ affinity_notify);
+
+ cpumask_copy(&hdev->affinity_mask, mask);
+}
+
+static void hclge_irq_affinity_release(struct kref *ref)
+{
+}
+
+static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
+{
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq,
+ &hdev->affinity_mask);
+
+ hdev->affinity_notify.notify = hclge_irq_affinity_notify;
+ hdev->affinity_notify.release = hclge_irq_affinity_release;
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
+ &hdev->affinity_notify);
+}
+
+static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
+{
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
+}
+
static int hclge_misc_irq_init(struct hclge_dev *hdev)
{
int ret;
@@ -3105,6 +3240,71 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
return 0;
}
+static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
+{
+ struct hclge_pf_rst_sync_cmd *req;
+ struct hclge_desc desc;
+ int cnt = 0;
+ int ret;
+
+ req = (struct hclge_pf_rst_sync_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
+
+ do {
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ /* for compatible with old firmware, wait
+ * 100 ms for VF to stop IO
+ */
+ if (ret == -EOPNOTSUPP) {
+ msleep(HCLGE_RESET_SYNC_TIME);
+ return 0;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
+ ret);
+ return ret;
+ } else if (req->all_vf_ready) {
+ return 0;
+ }
+ msleep(HCLGE_PF_RESET_SYNC_TIME);
+ hclge_cmd_reuse_desc(&desc, true);
+ } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
+
+ dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
+ return -ETIME;
+}
+
+void hclge_report_hw_error(struct hclge_dev *hdev,
+ enum hnae3_hw_error_type type)
+{
+ struct hnae3_client *client = hdev->nic_client;
+ u16 i;
+
+ if (!client || !client->ops->process_hw_error ||
+ !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
+ return;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
+ client->ops->process_hw_error(&hdev->vport[i].nic, type);
+}
+
+static void hclge_handle_imp_error(struct hclge_dev *hdev)
+{
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
+ hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+ }
+
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
+ hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+ }
+}
+
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
{
struct hclge_desc desc;
@@ -3229,7 +3429,13 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
if (!clearval)
return;
- hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
+ /* For revision 0x20, the reset interrupt source
+ * can only be cleared after hardware reset done
+ */
+ if (hdev->pdev->revision == 0x20)
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
+ clearval);
+
hclge_enable_vector(&hdev->misc_vector, true);
}
@@ -3250,19 +3456,33 @@ static int hclge_reset_prepare_down(struct hclge_dev *hdev)
return ret;
}
-static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
{
-#define HCLGE_RESET_SYNC_TIME 100
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ if (enable)
+ reg_val |= HCLGE_NIC_SW_RST_RDY;
+ else
+ reg_val &= ~HCLGE_NIC_SW_RST_RDY;
+
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
+}
+static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+{
u32 reg_val;
int ret = 0;
switch (hdev->reset_type) {
case HNAE3_FUNC_RESET:
- /* There is no mechanism for PF to know if VF has stopped IO
- * for now, just wait 100 ms for VF to stop IO
+ /* to confirm whether all running VF is ready
+ * before request PF reset
*/
- msleep(HCLGE_RESET_SYNC_TIME);
+ ret = hclge_func_reset_sync_vf(hdev);
+ if (ret)
+ return ret;
+
ret = hclge_func_reset_cmd(hdev, 0);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3279,15 +3499,19 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
- /* There is no mechanism for PF to know if VF has stopped IO
- * for now, just wait 100 ms for VF to stop IO
+ /* to confirm whether all running VF is ready
+ * before request PF reset
*/
- msleep(HCLGE_RESET_SYNC_TIME);
+ ret = hclge_func_reset_sync_vf(hdev);
+ if (ret)
+ return ret;
+
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
hdev->rst_stats.flr_rst_cnt++;
break;
case HNAE3_IMP_RESET:
+ hclge_handle_imp_error(hdev);
reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
@@ -3298,14 +3522,13 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* inform hardware that preparatory work is done */
msleep(HCLGE_RESET_SYNC_TIME);
- hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
- HCLGE_NIC_CMQ_ENABLE);
+ hclge_reset_handshake(hdev, true);
dev_info(&hdev->pdev->dev, "prepare wait ok\n");
return ret;
}
-static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
+static bool hclge_reset_err_handle(struct hclge_dev *hdev)
{
#define MAX_RESET_FAIL_CNT 5
@@ -3313,36 +3536,42 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
hdev->reset_pending);
return true;
- } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
- (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
- BIT(HCLGE_IMP_RESET_BIT))) {
+ } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
+ HCLGE_RESET_INT_M) {
dev_info(&hdev->pdev->dev,
- "reset failed because IMP Reset is pending\n");
+ "reset failed because new reset interrupt\n");
hclge_clear_reset_cause(hdev);
return false;
} else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
hdev->reset_fail_cnt++;
- if (is_timeout) {
- set_bit(hdev->reset_type, &hdev->reset_pending);
- dev_info(&hdev->pdev->dev,
- "re-schedule to wait for hw reset done\n");
- return true;
- }
-
- dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
- hclge_clear_reset_cause(hdev);
- set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
- mod_timer(&hdev->reset_timer,
- jiffies + HCLGE_RESET_INTERVAL);
-
- return false;
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ dev_info(&hdev->pdev->dev,
+ "re-schedule reset task(%d)\n",
+ hdev->reset_fail_cnt);
+ return true;
}
hclge_clear_reset_cause(hdev);
+
+ /* recover the handshake status when reset fail */
+ hclge_reset_handshake(hdev, true);
+
dev_err(&hdev->pdev->dev, "Reset fail!\n");
return false;
}
+static int hclge_set_rst_done(struct hclge_dev *hdev)
+{
+ struct hclge_pf_rst_done_cmd *req;
+ struct hclge_desc desc;
+
+ req = (struct hclge_pf_rst_done_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
+ req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
static int hclge_reset_prepare_up(struct hclge_dev *hdev)
{
int ret = 0;
@@ -3353,10 +3582,18 @@ static int hclge_reset_prepare_up(struct hclge_dev *hdev)
case HNAE3_FLR_RESET:
ret = hclge_set_all_vf_rst(hdev, false);
break;
+ case HNAE3_GLOBAL_RESET:
+ /* fall through */
+ case HNAE3_IMP_RESET:
+ ret = hclge_set_rst_done(hdev);
+ break;
default:
break;
}
+ /* clear up the handshake status after re-initialize done */
+ hclge_reset_handshake(hdev, false);
+
return ret;
}
@@ -3382,7 +3619,6 @@ static int hclge_reset_stack(struct hclge_dev *hdev)
static void hclge_reset(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- bool is_timeout = false;
int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
@@ -3410,10 +3646,8 @@ static void hclge_reset(struct hclge_dev *hdev)
if (ret)
goto err_reset;
- if (hclge_reset_wait(hdev)) {
- is_timeout = true;
+ if (hclge_reset_wait(hdev))
goto err_reset;
- }
hdev->rst_stats.hw_reset_done_cnt++;
@@ -3458,14 +3692,22 @@ static void hclge_reset(struct hclge_dev *hdev)
hdev->reset_fail_cnt = 0;
hdev->rst_stats.reset_done_cnt++;
ae_dev->reset_type = HNAE3_NONE_RESET;
- del_timer(&hdev->reset_timer);
+
+ /* if default_reset_request has a higher level reset request,
+ * it should be handled as soon as possible. since some errors
+ * need this kind of reset to fix.
+ */
+ hdev->reset_level = hclge_get_reset_level(ae_dev,
+ &hdev->default_reset_request);
+ if (hdev->reset_level != HNAE3_NONE_RESET)
+ set_bit(hdev->reset_level, &hdev->reset_request);
return;
err_reset_lock:
rtnl_unlock();
err_reset:
- if (hclge_reset_err_handle(hdev, is_timeout))
+ if (hclge_reset_err_handle(hdev))
hclge_reset_task_schedule(hdev);
}
@@ -3493,9 +3735,10 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
handle = &hdev->vport[0].nic;
if (time_before(jiffies, (hdev->last_reset_time +
- HCLGE_RESET_INTERVAL)))
+ HCLGE_RESET_INTERVAL))) {
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
return;
- else if (hdev->default_reset_request)
+ } else if (hdev->default_reset_request)
hdev->reset_level =
hclge_get_reset_level(ae_dev,
&hdev->default_reset_request);
@@ -3525,6 +3768,12 @@ static void hclge_reset_timer(struct timer_list *t)
{
struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+ /* if default_reset_request has no value, it means that this reset
+ * request has already be handled, so just return here
+ */
+ if (!hdev->default_reset_request)
+ return;
+
dev_info(&hdev->pdev->dev,
"triggering reset in reset timer\n");
hclge_reset_event(hdev->pdev, NULL);
@@ -3606,7 +3855,9 @@ static void hclge_update_vport_alive(struct hclge_dev *hdev)
static void hclge_service_task(struct work_struct *work)
{
struct hclge_dev *hdev =
- container_of(work, struct hclge_dev, service_task);
+ container_of(work, struct hclge_dev, service_task.work);
+
+ clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
hclge_update_stats_for_all(hdev);
@@ -3621,7 +3872,8 @@ static void hclge_service_task(struct work_struct *work)
hclge_rfs_filter_expire(hdev);
hdev->fd_arfs_expire_timer = 0;
}
- hclge_service_complete(hdev);
+
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
}
struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
@@ -4197,8 +4449,8 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
- struct hclge_ctrl_vector_chain_cmd *req
- = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ struct hclge_ctrl_vector_chain_cmd *req =
+ (struct hclge_ctrl_vector_chain_cmd *)desc.data;
enum hclge_cmd_status status;
enum hclge_opcode_type op;
u16 tqp_type_and_id;
@@ -5808,7 +6060,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
return -ENOSPC;
}
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock);
@@ -5959,6 +6211,89 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
"mac enable fail, ret =%d.\n", ret);
}
+static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
+ u8 switch_param, u8 param_mask)
+{
+ struct hclge_mac_vlan_switch_cmd *req;
+ struct hclge_desc desc;
+ u32 func_id;
+ int ret;
+
+ func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
+ req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
+ false);
+ req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
+ req->func_id = cpu_to_le32(func_id);
+ req->switch_param = switch_param;
+ req->param_mask = param_mask;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "set mac vlan switch parameter fail, ret = %d\n", ret);
+ return ret;
+}
+
+static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
+ int link_ret)
+{
+#define HCLGE_PHY_LINK_STATUS_NUM 200
+
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int i = 0;
+ int ret;
+
+ do {
+ ret = phy_read_status(phydev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "phy update link status fail, ret = %d\n", ret);
+ return;
+ }
+
+ if (phydev->link == link_ret)
+ break;
+
+ msleep(HCLGE_LINK_STATUS_MS);
+ } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
+}
+
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
+{
+#define HCLGE_MAC_LINK_STATUS_NUM 100
+
+ int i = 0;
+ int ret;
+
+ do {
+ ret = hclge_get_mac_link_status(hdev);
+ if (ret < 0)
+ return ret;
+ else if (ret == link_ret)
+ return 0;
+
+ msleep(HCLGE_LINK_STATUS_MS);
+ } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
+ return -EBUSY;
+}
+
+static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
+ bool is_phy)
+{
+#define HCLGE_LINK_STATUS_DOWN 0
+#define HCLGE_LINK_STATUS_UP 1
+
+ int link_ret;
+
+ link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
+
+ if (is_phy)
+ hclge_phy_link_status_wait(hdev, link_ret);
+
+ return hclge_mac_link_status_wait(hdev, link_ret);
+}
+
static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
{
struct hclge_config_mac_mode_cmd *req;
@@ -6001,14 +6336,8 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
#define HCLGE_SERDES_RETRY_MS 10
#define HCLGE_SERDES_RETRY_NUM 100
-#define HCLGE_MAC_LINK_STATUS_MS 10
-#define HCLGE_MAC_LINK_STATUS_NUM 100
-#define HCLGE_MAC_LINK_STATUS_DOWN 0
-#define HCLGE_MAC_LINK_STATUS_UP 1
-
struct hclge_serdes_lb_cmd *req;
struct hclge_desc desc;
- int mac_link_ret = 0;
int ret, i = 0;
u8 loop_mode_b;
@@ -6031,10 +6360,8 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
if (en) {
req->enable = loop_mode_b;
req->mask = loop_mode_b;
- mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
} else {
req->mask = loop_mode_b;
- mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -6067,18 +6394,70 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
hclge_cfg_mac_mode(hdev, en);
- i = 0;
- do {
- /* serdes Internal loopback, independent of the network cable.*/
- msleep(HCLGE_MAC_LINK_STATUS_MS);
- ret = hclge_get_mac_link_status(hdev);
- if (ret == mac_link_ret)
- return 0;
- } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
+ ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "serdes loopback config mac mode timeout\n");
- dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
+ return ret;
+}
- return -EBUSY;
+static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
+ struct phy_device *phydev)
+{
+ int ret;
+
+ if (!phydev->suspended) {
+ ret = phy_suspend(phydev);
+ if (ret)
+ return ret;
+ }
+
+ ret = phy_resume(phydev);
+ if (ret)
+ return ret;
+
+ return phy_loopback(phydev, true);
+}
+
+static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
+ struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_loopback(phydev, false);
+ if (ret)
+ return ret;
+
+ return phy_suspend(phydev);
+}
+
+static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int ret;
+
+ if (!phydev)
+ return -ENOTSUPP;
+
+ if (en)
+ ret = hclge_enable_phy_loopback(hdev, phydev);
+ else
+ ret = hclge_disable_phy_loopback(hdev, phydev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set phy loopback fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ hclge_cfg_mac_mode(hdev, en);
+
+ ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "phy loopback config mac mode timeout\n");
+
+ return ret;
}
static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
@@ -6110,6 +6489,20 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
int i, ret;
+ /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
+ * default, SSU loopback is enabled, so if the SMAC and the DMAC are
+ * the same, the packets are looped back in the SSU. If SSU loopback
+ * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
+ */
+ if (hdev->pdev->revision >= 0x21) {
+ u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
+
+ ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
+ HCLGE_SWITCH_ALW_LPBK_MASK);
+ if (ret)
+ return ret;
+ }
+
switch (loop_mode) {
case HNAE3_LOOP_APP:
ret = hclge_set_app_loopback(hdev, en);
@@ -6118,6 +6511,9 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
case HNAE3_LOOP_PARALLEL_SERDES:
ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
break;
+ case HNAE3_LOOP_PHY:
+ ret = hclge_set_phy_loopback(hdev, en);
+ break;
default:
ret = -ENOTSUPP;
dev_err(&hdev->pdev->dev,
@@ -6160,10 +6556,13 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
struct hclge_dev *hdev = vport->back;
if (enable) {
- mod_timer(&hdev->service_timer, jiffies + HZ);
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
} else {
- del_timer_sync(&hdev->service_timer);
- cancel_work_sync(&hdev->service_task);
+ /* Set the DOWN flag here to disable the service to be
+ * scheduled again
+ */
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ cancel_delayed_work_sync(&hdev->service_task);
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
}
}
@@ -6202,12 +6601,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
hdev->reset_type != HNAE3_FUNC_RESET) {
hclge_mac_stop_phy(hdev);
+ hclge_update_link_status(hdev);
return;
}
for (i = 0; i < handle->kinfo.num_tqps; i++)
hclge_reset_tqp(handle, i);
+ hclge_config_mac_tnl_int(hdev, false);
+
/* Mac disable */
hclge_cfg_mac_mode(hdev, false);
@@ -6249,7 +6651,6 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
enum hclge_mac_vlan_tbl_opcode op)
{
struct hclge_dev *hdev = vport->back;
- int return_status = -EIO;
if (cmdq_resp) {
dev_err(&hdev->pdev->dev,
@@ -6260,52 +6661,53 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
if (op == HCLGE_MAC_VLAN_ADD) {
if ((!resp_code) || (resp_code == 1)) {
- return_status = 0;
+ return 0;
} else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
- return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for uc_overflow.\n");
+ return -ENOSPC;
} else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
- return_status = -ENOSPC;
dev_err(&hdev->pdev->dev,
"add mac addr failed for mc_overflow.\n");
- } else {
- dev_err(&hdev->pdev->dev,
- "add mac addr failed for undefined, code=%d.\n",
- resp_code);
+ return -ENOSPC;
}
+
+ dev_err(&hdev->pdev->dev,
+ "add mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
} else if (op == HCLGE_MAC_VLAN_REMOVE) {
if (!resp_code) {
- return_status = 0;
+ return 0;
} else if (resp_code == 1) {
- return_status = -ENOENT;
dev_dbg(&hdev->pdev->dev,
"remove mac addr failed for miss.\n");
- } else {
- dev_err(&hdev->pdev->dev,
- "remove mac addr failed for undefined, code=%d.\n",
- resp_code);
+ return -ENOENT;
}
+
+ dev_err(&hdev->pdev->dev,
+ "remove mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
} else if (op == HCLGE_MAC_VLAN_LKUP) {
if (!resp_code) {
- return_status = 0;
+ return 0;
} else if (resp_code == 1) {
- return_status = -ENOENT;
dev_dbg(&hdev->pdev->dev,
"lookup mac addr failed for miss.\n");
- } else {
- dev_err(&hdev->pdev->dev,
- "lookup mac addr failed for undefined, code=%d.\n",
- resp_code);
+ return -ENOENT;
}
- } else {
- return_status = -EINVAL;
+
dev_err(&hdev->pdev->dev,
- "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
- op);
+ "lookup mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
}
- return return_status;
+ dev_err(&hdev->pdev->dev,
+ "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
+
+ return -EINVAL;
}
static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
@@ -7019,7 +7421,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
is_broadcast_ether_addr(new_addr) ||
is_multicast_ether_addr(new_addr)) {
dev_err(&hdev->pdev->dev,
- "Change uc mac err! invalid mac:%p.\n",
+ "Change uc mac err! invalid mac:%pM.\n",
new_addr);
return -EINVAL;
}
@@ -7124,7 +7526,7 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
}
static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
- bool is_kill, u16 vlan, u8 qos,
+ bool is_kill, u16 vlan,
__be16 proto)
{
#define HCLGE_MAX_VF_BYTES 16
@@ -7235,7 +7637,7 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
}
static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
- u16 vport_id, u16 vlan_id, u8 qos,
+ u16 vport_id, u16 vlan_id,
bool is_kill)
{
u16 vport_idx, vport_num = 0;
@@ -7245,7 +7647,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return 0;
ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
- 0, proto);
+ proto);
if (ret) {
dev_err(&hdev->pdev->dev,
"Set %d vport vlan filter config fail, ret =%d.\n",
@@ -7532,7 +7934,7 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
if (!vlan->hd_tbl_status) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id,
- vlan->vlan_id, 0, false);
+ vlan->vlan_id, false);
if (ret) {
dev_err(&hdev->pdev->dev,
"restore vport vlan list failed, ret=%d\n",
@@ -7558,7 +7960,7 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
hclge_set_vlan_filter_hw(hdev,
htons(ETH_P_8021Q),
vport->vport_id,
- vlan_id, 0,
+ vlan_id,
true);
list_del(&vlan->node);
@@ -7578,7 +7980,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
hclge_set_vlan_filter_hw(hdev,
htons(ETH_P_8021Q),
vport->vport_id,
- vlan->vlan_id, 0,
+ vlan->vlan_id,
true);
vlan->hd_tbl_status = false;
@@ -7611,7 +8013,7 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
- u16 vlan_proto, qos;
+ u16 vlan_proto;
u16 state, vlan_id;
int i;
@@ -7620,12 +8022,11 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
vport = &hdev->vport[i];
vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- qos = vport->port_base_vlan_cfg.vlan_info.qos;
state = vport->port_base_vlan_cfg.state;
if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
- vport->vport_id, vlan_id, qos,
+ vport->vport_id, vlan_id,
false);
continue;
}
@@ -7635,7 +8036,7 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle)
hclge_set_vlan_filter_hw(hdev,
htons(ETH_P_8021Q),
vport->vport_id,
- vlan->vlan_id, 0,
+ vlan->vlan_id,
false);
}
}
@@ -7675,12 +8076,12 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
htons(new_info->vlan_proto),
vport->vport_id,
new_info->vlan_tag,
- new_info->qos, false);
+ false);
}
ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
vport->vport_id, old_info->vlan_tag,
- old_info->qos, true);
+ true);
if (ret)
return ret;
@@ -7707,7 +8108,7 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
htons(vlan_info->vlan_proto),
vport->vport_id,
vlan_info->vlan_tag,
- vlan_info->qos, false);
+ false);
if (ret)
return ret;
@@ -7716,7 +8117,7 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
htons(old_vlan_info->vlan_proto),
vport->vport_id,
old_vlan_info->vlan_tag,
- old_vlan_info->qos, true);
+ true);
if (ret)
return ret;
@@ -7829,7 +8230,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
return -EBUSY;
}
- /* When port base vlan enabled, we use port base vlan as the vlan
+ /* when port base vlan enabled, we use port base vlan as the vlan
* filter entry. In this case, we don't update vlan filter table
* when user add new vlan or remove exist vlan, just update the vport
* vlan list. The vlan id in vlan list will be writen in vlan filter
@@ -7837,7 +8238,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
*/
if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
- vlan_id, 0, is_kill);
+ vlan_id, is_kill);
writen_to_tbl = true;
}
@@ -7848,7 +8249,7 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
} else if (is_kill) {
- /* When remove hw vlan filter failed, record the vlan id,
+ /* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack
*/
@@ -7873,7 +8274,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
while (vlan_id != VLAN_N_VID) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id, vlan_id,
- 0, true);
+ true);
if (ret && ret != -EINVAL)
return;
@@ -8044,11 +8445,12 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
}
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- /* Wait for tqp hw reset */
- msleep(20);
reset_status = hclge_get_reset_status(hdev, queue_gid);
if (reset_status)
break;
+
+ /* Wait for tqp hw reset */
+ usleep_range(1000, 1200);
}
if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
@@ -8082,11 +8484,12 @@ void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
}
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- /* Wait for tqp hw reset */
- msleep(20);
reset_status = hclge_get_reset_status(hdev, queue_gid);
if (reset_status)
break;
+
+ /* Wait for tqp hw reset */
+ usleep_range(1000, 1200);
}
if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
@@ -8122,28 +8525,15 @@ static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
{
int ret;
- if (rx_en && tx_en)
- hdev->fc_mode_last_time = HCLGE_FC_FULL;
- else if (rx_en && !tx_en)
- hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
- else if (!rx_en && tx_en)
- hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
- else
- hdev->fc_mode_last_time = HCLGE_FC_NONE;
-
if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
return 0;
ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
- if (ret) {
- dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
- ret);
- return ret;
- }
-
- hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "configure pauseparam error, ret = %d.\n", ret);
- return 0;
+ return ret;
}
int hclge_cfg_flowctrl(struct hclge_dev *hdev)
@@ -8208,6 +8598,21 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
}
}
+static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
+ u32 rx_en, u32 tx_en)
+{
+ if (rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_FULL;
+ else if (rx_en && !tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
+ else if (!rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
+ else
+ hdev->fc_mode_last_time = HCLGE_FC_NONE;
+
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+}
+
static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
u32 rx_en, u32 tx_en)
{
@@ -8233,6 +8638,8 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+ hclge_record_user_pauseparam(hdev, rx_en, tx_en);
+
if (!auto_neg)
return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
@@ -8481,7 +8888,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
}
}
- return ret;
+ return 0;
clear_nic:
hdev->nic_client = NULL;
@@ -8602,12 +9009,10 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
set_bit(HCLGE_STATE_REMOVING, &hdev->state);
- if (hdev->service_timer.function)
- del_timer_sync(&hdev->service_timer);
if (hdev->reset_timer.function)
del_timer_sync(&hdev->reset_timer);
- if (hdev->service_task.func)
- cancel_work_sync(&hdev->service_task);
+ if (hdev->service_task.work.func)
+ cancel_delayed_work_sync(&hdev->service_task);
if (hdev->rst_service_task.func)
cancel_work_sync(&hdev->rst_service_task);
if (hdev->mbx_service_task.func)
@@ -8812,12 +9217,16 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_dcb_ops_set(hdev);
- timer_setup(&hdev->service_timer, hclge_service_timer, 0);
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
- INIT_WORK(&hdev->service_task, hclge_service_task);
+ INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
+ /* Setup affinity after service timer setup because add_timer_on
+ * is called in affinity notify.
+ */
+ hclge_misc_affinity_setup(hdev);
+
hclge_clear_all_event_cause(hdev);
hclge_clear_resetting_state(hdev);
@@ -8842,7 +9251,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
- pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
+ dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
+ HCLGE_DRIVER_NAME);
+
return 0;
err_mdiobus_unreg:
@@ -8979,6 +9390,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
struct hclge_mac *mac = &hdev->hw.mac;
+ hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
if (mac->phydev)
@@ -9238,106 +9650,314 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
}
#define MAX_SEPARATE_NUM 4
-#define SEPARATOR_VALUE 0xFFFFFFFF
+#define SEPARATOR_VALUE 0xFDFCFBFA
#define REG_NUM_PER_LINE 4
#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
+#define REG_SEPARATOR_LINE 1
+#define REG_NUM_REMAIN_MASK 3
+#define BD_LIST_MAX_NUM 30
-static int hclge_get_regs_len(struct hnae3_handle *handle)
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
{
- int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- u32 regs_num_32_bit, regs_num_64_bit;
+ /*prepare 4 commands to query DFX BD number*/
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
+
+ return hclge_cmd_send(&hdev->hw, desc, 4);
+}
+
+static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
+ int *bd_num_list,
+ u32 type_num)
+{
+#define HCLGE_DFX_REG_BD_NUM 4
+
+ u32 entries_per_desc, desc_index, index, offset, i;
+ struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
int ret;
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return -EOPNOTSUPP;
+ "Get dfx bd num fail, status is %d.\n", ret);
+ return ret;
}
- cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
- common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
- ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
- tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
+ for (i = 0; i < type_num; i++) {
+ offset = hclge_dfx_bd_offset_list[i];
+ index = offset % entries_per_desc;
+ desc_index = offset / entries_per_desc;
+ bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
+ }
- return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
- tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
- regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
+ return ret;
}
-static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
- void *data)
+static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc_src, int bd_num,
+ enum hclge_opcode_type cmd)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- u32 regs_num_32_bit, regs_num_64_bit;
- int i, j, reg_um, separator_num;
+ struct hclge_desc *desc = desc_src;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ for (i = 0; i < bd_num - 1; i++) {
+ desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc++;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ }
+
+ desc = desc_src;
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
+ cmd, ret);
+
+ return ret;
+}
+
+static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
+ void *data)
+{
+ int entries_per_desc, reg_num, separator_num, desc_index, index, i;
+ struct hclge_desc *desc = desc_src;
u32 *reg = data;
+
+ entries_per_desc = ARRAY_SIZE(desc->data);
+ reg_num = entries_per_desc * bd_num;
+ separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++) {
+ index = i % entries_per_desc;
+ desc_index = i / entries_per_desc;
+ *reg++ = le32_to_cpu(desc[desc_index].data[index]);
+ }
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ return reg_num + separator_num;
+}
+
+static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ int data_len_per_desc, data_len, bd_num, i;
+ int bd_num_list[BD_LIST_MAX_NUM];
int ret;
- *version = hdev->fw_version;
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ return ret;
+ }
- ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
+ *len = 0;
+ for (i = 0; i < dfx_reg_type_num; i++) {
+ bd_num = bd_num_list[i];
+ data_len = data_len_per_desc * bd_num;
+ *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
+ }
+
+ return ret;
+}
+
+static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ int bd_num, bd_num_max, buf_len, i;
+ int bd_num_list[BD_LIST_MAX_NUM];
+ struct hclge_desc *desc_src;
+ u32 *reg = data;
+ int ret;
+
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Get register number failed, ret = %d.\n", ret);
- return;
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ return ret;
+ }
+
+ bd_num_max = bd_num_list[0];
+ for (i = 1; i < dfx_reg_type_num; i++)
+ bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
+
+ buf_len = sizeof(*desc_src) * bd_num_max;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src) {
+ dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
+ return -ENOMEM;
}
+ for (i = 0; i < dfx_reg_type_num; i++) {
+ bd_num = bd_num_list[i];
+ ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
+ hclge_dfx_reg_opcode_list[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg fail, status is %d.\n", ret);
+ break;
+ }
+
+ reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
+ }
+
+ kfree(desc_src);
+ return ret;
+}
+
+static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
+ struct hnae3_knic_private_info *kinfo)
+{
+#define HCLGE_RING_REG_OFFSET 0x200
+#define HCLGE_RING_INT_REG_OFFSET 0x4
+
+ int i, j, reg_num, separator_num;
+ int data_num_sum;
+ u32 *reg = data;
+
/* fetching per-PF registers valus from PF PCIe register space */
- reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
+ data_num_sum = reg_num + separator_num;
- reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
- for (i = 0; i < reg_um; i++)
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
+ data_num_sum += reg_num + separator_num;
- reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
for (j = 0; j < kinfo->num_tqps; j++) {
- for (i = 0; i < reg_um; i++)
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw,
ring_reg_addr_list[i] +
- 0x200 * j);
+ HCLGE_RING_REG_OFFSET * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
+ data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
- reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
- separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
for (j = 0; j < hdev->num_msi_used - 1; j++) {
- for (i = 0; i < reg_um; i++)
+ for (i = 0; i < reg_num; i++)
*reg++ = hclge_read_dev(&hdev->hw,
tqp_intr_reg_addr_list[i] +
- 4 * j);
+ HCLGE_RING_INT_REG_OFFSET * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
+ data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
+
+ return data_num_sum;
+}
+
+static int hclge_get_regs_len(struct hnae3_handle *handle)
+{
+ int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
+ int regs_lines_32_bit, regs_lines_64_bit;
+ int ret;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg len failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+
+ return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
+ tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
+ regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
+}
+
+static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ int i, reg_num, separator_num, ret;
+ u32 *reg = data;
+
+ *version = hdev->fw_version;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return;
+ }
+
+ reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
- /* fetching PF common registers values from firmware */
ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get 32 bit register failed, ret = %d.\n", ret);
return;
}
+ reg_num = regs_num_32_bit;
+ reg += reg_num;
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
- reg += regs_num_32_bit;
ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev,
"Get 64 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+ reg_num = regs_num_64_bit * 2;
+ reg += reg_num;
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ ret = hclge_get_dfx_reg(hdev, reg);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Get dfx register failed, ret = %d.\n", ret);
}
static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
@@ -9452,7 +10072,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_mtu = hclge_set_mtu,
.reset_queue = hclge_reset_tqp,
.get_stats = hclge_get_stats,
- .get_mac_pause_stats = hclge_get_mac_pause_stat,
+ .get_mac_stats = hclge_get_mac_stat,
.update_stats = hclge_update_stats,
.get_strings = hclge_get_strings,
.get_sset_count = hclge_get_sset_count,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 6a12285f4c76..870550fa9ff1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -119,7 +119,7 @@
#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
(HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
-#define HCLGE_TQP_RESET_TRY_TIMES 10
+#define HCLGE_TQP_RESET_TRY_TIMES 200
#define HCLGE_PHY_PAGE_MDIX 0
#define HCLGE_PHY_PAGE_COPPER 0
@@ -148,6 +148,8 @@ enum HLCGE_PORT_TYPE {
NETWORK_PORT
};
+#define PF_VPORT_ID 0
+
#define HCLGE_PF_ID_S 0
#define HCLGE_PF_ID_M GENMASK(2, 0)
#define HCLGE_VF_ID_S 3
@@ -164,6 +166,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_GLOBAL_RESET_BIT 0
#define HCLGE_CORE_RESET_BIT 1
#define HCLGE_IMP_RESET_BIT 2
+#define HCLGE_RESET_INT_M GENMASK(2, 0)
#define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0
@@ -178,6 +181,8 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
#define HCLGE_VECTOR0_IMP_RESET_INT_B 1
+#define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U
+#define HCLGE_VECTOR0_IMP_RD_POISON_B 5U
#define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
@@ -302,6 +307,13 @@ enum hclge_fc_mode {
HCLGE_FC_DEFAULT
};
+enum hclge_link_fail_code {
+ HCLGE_LF_NORMAL,
+ HCLGE_LF_REF_CLOCK_LOST,
+ HCLGE_LF_XSFP_TX_DISABLE,
+ HCLGE_LF_XSFP_ABSENT,
+};
+
#define HCLGE_PG_NUM 4
#define HCLGE_SCH_MODE_SP 0
#define HCLGE_SCH_MODE_DWRR 1
@@ -532,50 +544,6 @@ struct key_info {
u8 key_length; /* use bit as unit */
};
-static const struct key_info meta_data_key_info[] = {
- { PACKET_TYPE_ID, 6},
- { IP_FRAGEMENT, 1},
- { ROCE_TYPE, 1},
- { NEXT_KEY, 5},
- { VLAN_NUMBER, 2},
- { SRC_VPORT, 12},
- { DST_VPORT, 12},
- { TUNNEL_PACKET, 1},
-};
-
-static const struct key_info tuple_key_info[] = {
- { OUTER_DST_MAC, 48},
- { OUTER_SRC_MAC, 48},
- { OUTER_VLAN_TAG_FST, 16},
- { OUTER_VLAN_TAG_SEC, 16},
- { OUTER_ETH_TYPE, 16},
- { OUTER_L2_RSV, 16},
- { OUTER_IP_TOS, 8},
- { OUTER_IP_PROTO, 8},
- { OUTER_SRC_IP, 32},
- { OUTER_DST_IP, 32},
- { OUTER_L3_RSV, 16},
- { OUTER_SRC_PORT, 16},
- { OUTER_DST_PORT, 16},
- { OUTER_L4_RSV, 32},
- { OUTER_TUN_VNI, 24},
- { OUTER_TUN_FLOW_ID, 8},
- { INNER_DST_MAC, 48},
- { INNER_SRC_MAC, 48},
- { INNER_VLAN_TAG_FST, 16},
- { INNER_VLAN_TAG_SEC, 16},
- { INNER_ETH_TYPE, 16},
- { INNER_L2_RSV, 16},
- { INNER_IP_TOS, 8},
- { INNER_IP_PROTO, 8},
- { INNER_SRC_IP, 32},
- { INNER_DST_IP, 32},
- { INNER_L3_RSV, 16},
- { INNER_SRC_PORT, 16},
- { INNER_DST_PORT, 16},
- { INNER_L4_RSV, 32},
-};
-
#define MAX_KEY_LENGTH 400
#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
@@ -806,9 +774,8 @@ struct hclge_dev {
u16 adminq_work_limit; /* Num of admin receive queue desc to process */
unsigned long service_timer_period;
unsigned long service_timer_previous;
- struct timer_list service_timer;
struct timer_list reset_timer;
- struct work_struct service_task;
+ struct delayed_work service_task;
struct work_struct rst_service_task;
struct work_struct mbx_service_task;
@@ -864,6 +831,10 @@ struct hclge_dev {
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
+
+ /* affinity mask and notify for misc interrupt */
+ cpumask_t affinity_mask;
+ struct irq_affinity_notify affinity_notify;
};
/* VPort level vlan tag configuration for TX direction */
@@ -990,7 +961,6 @@ int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
-int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
@@ -1018,4 +988,9 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
u16 state, u16 vlan_tag, u16 qos,
u16 vlan_proto);
+void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time);
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc);
+void hclge_report_hw_error(struct hclge_dev *hdev,
+ enum hnae3_hw_error_type type);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 690b9990215c..f5da28a60d00 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -479,7 +479,7 @@ static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
hclge_reset_vf_queue(vport, queue_id);
- /* send response msg to VF after queue reset complete*/
+ /* send response msg to VF after queue reset complete */
hclge_gen_resp_to_vf(vport, mbx_req, 0, NULL, 0);
}
@@ -545,6 +545,36 @@ static int hclge_get_rss_key(struct hclge_vport *vport,
HCLGE_RSS_MBX_RESP_LEN);
}
+static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
+{
+ switch (link_fail_code) {
+ case HCLGE_LF_REF_CLOCK_LOST:
+ dev_warn(&hdev->pdev->dev, "Reference clock lost!\n");
+ break;
+ case HCLGE_LF_XSFP_TX_DISABLE:
+ dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n");
+ break;
+ case HCLGE_LF_XSFP_ABSENT:
+ dev_warn(&hdev->pdev->dev, "SFP is absent!\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void hclge_handle_link_change_event(struct hclge_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+#define LINK_STATUS_OFFSET 1
+#define LINK_FAIL_CODE_OFFSET 2
+
+ clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+ hclge_task_schedule(hdev, 0);
+
+ if (!req->msg[LINK_STATUS_OFFSET])
+ hclge_link_fail_parse(hdev, req->msg[LINK_FAIL_CODE_OFFSET]);
+}
+
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
@@ -552,6 +582,15 @@ static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
return tail == hw->cmq.crq.next_to_use;
}
+static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+
+ ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET);
+ dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n");
+ ae_dev->ops->reset_event(hdev->pdev, NULL);
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
@@ -707,6 +746,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"PF fail(%d) to media type for VF\n",
ret);
break;
+ case HCLGE_MBX_PUSH_LINK_STATUS:
+ hclge_handle_link_change_event(hdev, req);
+ break;
+ case HCLGE_MBX_NCSI_ERROR:
+ hclge_handle_ncsi_error(hdev);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index abb1b438564e..dc4dfd4602ab 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -231,6 +231,8 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
phydev->advertising);
+ phy_attached_info(phydev);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 3f41fa2bc414..e829101d576c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -404,8 +404,8 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
{
struct hclge_port_shapping_cmd *shap_cfg_cmd;
struct hclge_desc desc;
- u32 shapping_para = 0;
u8 ir_u, ir_b, ir_s;
+ u32 shapping_para;
int ret;
ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
@@ -650,12 +650,8 @@ static void hclge_pfc_info_init(struct hclge_dev *hdev)
}
}
-static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
+static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
{
- if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
- (hdev->tm_info.num_pg != 1))
- return -EINVAL;
-
hclge_tm_pg_info_init(hdev);
hclge_tm_tc_info_init(hdev);
@@ -663,8 +659,6 @@ static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
hclge_tm_vport_info_update(hdev);
hclge_pfc_info_init(hdev);
-
- return 0;
}
static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
@@ -1428,15 +1422,15 @@ int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
int hclge_tm_schd_init(struct hclge_dev *hdev)
{
- int ret;
-
/* fc_mode is HCLGE_FC_FULL on reset */
hdev->tm_info.fc_mode = HCLGE_FC_FULL;
hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
- ret = hclge_tm_schd_info_init(hdev);
- if (ret)
- return ret;
+ if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
+ hdev->tm_info.num_pg != 1)
+ return -EINVAL;
+
+ hclge_tm_schd_info_init(hdev);
return hclge_tm_init_hw(hdev, true);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 652b796044e3..4c2c9458648f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -43,7 +43,7 @@ static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
{
struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- int clean = 0;
+ int clean;
u32 head;
head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
@@ -97,7 +97,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
+ reg_val &= HCLGEVF_NIC_SW_RST_RDY;
+ reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
@@ -405,7 +407,15 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
}
hdev->fw_version = version;
- dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+ dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 127a434a56f3..f830eef02e5c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -244,8 +244,11 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
-#define HCLGEVF_NIC_CMQ_EN_B 16
-#define HCLGEVF_NIC_CMQ_ENABLE BIT(HCLGEVF_NIC_CMQ_EN_B)
+
+/* this bit indicates that the driver is ready for hardware reset */
+#define HCLGEVF_NIC_SW_RST_RDY_B 16
+#define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B)
+
#define HCLGEVF_NIC_CMQ_DESC_NUM 1024
#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index a13a0e101c3b..594cae8c7410 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1269,7 +1269,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
HCLGE_MBX_VLAN_FILTER, msg_data,
HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
- /* When remove hw vlan filter failed, record the vlan id,
+ /* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack.
*/
@@ -1396,19 +1396,22 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
u32 val;
int ret;
- /* wait to check the hardware reset completion status */
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
- dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
-
if (hdev->reset_type == HNAE3_FLR_RESET)
return hclgevf_flr_poll_timeout(hdev,
HCLGEVF_RESET_WAIT_US,
HCLGEVF_RESET_WAIT_CNT);
-
- ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
- !(val & HCLGEVF_RST_ING_BITS),
- HCLGEVF_RESET_WAIT_US,
- HCLGEVF_RESET_WAIT_TIMEOUT_US);
+ else if (hdev->reset_type == HNAE3_VF_RESET)
+ ret = readl_poll_timeout(hdev->hw.io_base +
+ HCLGEVF_VF_RST_ING, val,
+ !(val & HCLGEVF_VF_RST_ING_BIT),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
+ else
+ ret = readl_poll_timeout(hdev->hw.io_base +
+ HCLGEVF_RST_ING, val,
+ !(val & HCLGEVF_RST_ING_BITS),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
/* hardware completion status should be available by this time */
if (ret) {
@@ -1426,6 +1429,20 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
return 0;
}
+static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
+{
+ u32 reg_val;
+
+ reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
+ if (enable)
+ reg_val |= HCLGEVF_NIC_SW_RST_RDY;
+ else
+ reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
+
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
+ reg_val);
+}
+
static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
{
int ret;
@@ -1448,7 +1465,14 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
if (ret)
return ret;
- return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
+ if (ret)
+ return ret;
+
+ /* clear handshake status with IMP */
+ hclgevf_reset_handshake(hdev, false);
+
+ return 0;
}
static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
@@ -1474,8 +1498,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
/* inform hardware that preparatory work is done */
msleep(HCLGEVF_RESET_SYNC_TIME);
- hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
- HCLGEVF_NIC_CMQ_ENABLE);
+ hclgevf_reset_handshake(hdev, true);
dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
hdev->reset_type, ret);
@@ -1484,6 +1507,8 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
{
+ /* recover handshake status with IMP when reset fail */
+ hclgevf_reset_handshake(hdev, true);
hdev->rst_stats.rst_fail_cnt++;
dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n",
hdev->rst_stats.rst_fail_cnt);
@@ -1494,9 +1519,6 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
if (hclgevf_is_reset_pending(hdev)) {
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
- } else {
- hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
- HCLGEVF_NIC_CMQ_ENABLE);
}
}
@@ -1539,7 +1561,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
rtnl_lock();
- /* now, re-initialize the nic client and ae device*/
+ /* now, re-initialize the nic client and ae device */
ret = hclgevf_reset_stack(hdev);
if (ret) {
dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
@@ -1762,9 +1784,8 @@ static void hclgevf_reset_service_task(struct work_struct *work)
* 1b and 2. cases but we will not get any intimation about 1a
* from PF as cmdq would be in unreliable state i.e. mailbox
* communication between PF and VF would be broken.
- */
-
- /* if we are never geting into pending state it means either:
+ *
+ * if we are never geting into pending state it means either:
* 1. PF is not receiving our request which could be due to IMP
* reset
* 2. PF is screwed
@@ -1867,29 +1888,45 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
u32 *clearval)
{
- u32 cmdq_src_reg, rst_ing_reg;
+ u32 val, cmdq_stat_reg, rst_ing_reg;
/* fetch the events from their corresponding regs */
- cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
- HCLGEVF_VECTOR0_CMDQ_SRC_REG);
+ cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
+ HCLGEVF_VECTOR0_CMDQ_STAT_REG);
- if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
+ if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
"receive reset interrupt 0x%x!\n", rst_ing_reg);
set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
- cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
- *clearval = cmdq_src_reg;
+ *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
hdev->rst_stats.vf_rst_cnt++;
+ /* set up VF hardware reset status, its PF will clear
+ * this status when PF has initialized done.
+ */
+ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
+ val | HCLGEVF_VF_RST_ING_BIT);
return HCLGEVF_VECTOR0_EVENT_RST;
}
/* check for vector0 mailbox(=CMDQ RX) event source */
- if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
- cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
- *clearval = cmdq_src_reg;
+ if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
+ /* for revision 0x21, clearing interrupt is writing bit 0
+ * to the clear register, writing bit 1 means to keep the
+ * old value.
+ * for revision 0x20, the clear register is a read & write
+ * register, so we should just write 0 to the bit we are
+ * handling, and keep other bits as cmdq_stat_reg.
+ */
+ if (hdev->pdev->revision >= 0x21)
+ *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+ else
+ *clearval = cmdq_stat_reg &
+ ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+
return HCLGEVF_VECTOR0_EVENT_MBX;
}
@@ -2265,7 +2302,7 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
{
- int ret = 0;
+ int ret;
hclgevf_get_misc_vector(hdev);
@@ -2695,7 +2732,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
}
hdev->last_reset_time = jiffies;
- pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
+ dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
+ HCLGEVF_DRIVER_NAME);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 5a9e30998a8f..bdde3afc286b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -87,6 +87,8 @@
/* Vector0 interrupt CMDQ event source register(RW) */
#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
+/* Vector0 interrupt CMDQ event status register(RO) */
+#define HCLGEVF_VECTOR0_CMDQ_STAT_REG 0x27104
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
/* RST register bits for RESET event */
@@ -103,6 +105,9 @@
(HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
+#define HCLGEVF_VF_RST_ING 0x07008
+#define HCLGEVF_VF_RST_ING_BIT BIT(16)
+
#define HCLGEVF_RSS_IND_TBL_SIZE 512
#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
#define HCLGEVF_RSS_KEY_SIZE 40
@@ -120,7 +125,7 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
-#define HCLGEVF_STATS_TIMER_INTERVAL (36)
+#define HCLGEVF_STATS_TIMER_INTERVAL 36U
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 6a96987bd8f0..a108191c9e50 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -277,9 +277,9 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
switch (msg_q[0]) {
case HCLGE_MBX_LINK_STAT_CHANGE:
- link_status = le16_to_cpu(msg_q[1]);
+ link_status = msg_q[1];
memcpy(&speed, &msg_q[2], sizeof(speed));
- duplex = (u8)le16_to_cpu(msg_q[4]);
+ duplex = (u8)msg_q[4];
/* update upper layer with new link link status */
hclgevf_update_link_status(hdev, link_status);
@@ -287,7 +287,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
break;
case HCLGE_MBX_LINK_STAT_MODE:
- idx = (u8)le16_to_cpu(msg_q[1]);
+ idx = (u8)msg_q[1];
if (idx)
memcpy(&hdev->hw.mac.supported, &msg_q[2],
sizeof(unsigned long));
@@ -301,14 +301,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
* has been completely reset. After this stack should
* eventually be re-initialized.
*/
- reset_type = le16_to_cpu(msg_q[1]);
+ reset_type = (enum hnae3_reset_type)msg_q[1];
set_bit(reset_type, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
break;
case HCLGE_MBX_PUSH_VLAN_INFO:
- state = le16_to_cpu(msg_q[1]);
+ state = msg_q[1];
vlan_info = &msg_q[1];
hclgevf_update_port_base_vlan_info(hdev, state,
(u8 *)vlan_info, 8);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 9c78251f9c39..0e13d1c7e474 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -136,7 +136,7 @@ static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t dma_addr;
int i, j;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index cca71ba7a74a..13e30eba5349 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1577,20 +1577,16 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
ehea_destroy_eq(pr->eq);
for (i = 0; i < pr->rq1_skba.len; i++)
- if (pr->rq1_skba.arr[i])
- dev_kfree_skb(pr->rq1_skba.arr[i]);
+ dev_kfree_skb(pr->rq1_skba.arr[i]);
for (i = 0; i < pr->rq2_skba.len; i++)
- if (pr->rq2_skba.arr[i])
- dev_kfree_skb(pr->rq2_skba.arr[i]);
+ dev_kfree_skb(pr->rq2_skba.arr[i]);
for (i = 0; i < pr->rq3_skba.len; i++)
- if (pr->rq3_skba.arr[i])
- dev_kfree_skb(pr->rq3_skba.arr[i]);
+ dev_kfree_skb(pr->rq3_skba.arr[i]);
for (i = 0; i < pr->sq_skba.len; i++)
- if (pr->sq_skba.arr[i])
- dev_kfree_skb(pr->sq_skba.arr[i]);
+ dev_kfree_skb(pr->sq_skba.arr[i]);
vfree(pr->rq1_skba.arr);
vfree(pr->rq2_skba.arr);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 395dde444483..9e43c9ace9c2 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1549,7 +1549,7 @@ emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
ctrl);
/* skb fragments */
for (i = 0; i < nr_frags; ++i) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index fa4bb940665c..4f83f97ffe8b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1485,7 +1485,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
memcpy(dst + cur,
page_address(skb_frag_page(frag)) +
- frag->page_offset, skb_frag_size(frag));
+ skb_frag_off(frag), skb_frag_size(frag));
cur += skb_frag_size(frag);
}
} else {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index a41008523c98..71d3d8854d8f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -937,8 +937,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info[i].dma,
txdr->buffer_info[i].length,
DMA_TO_DEVICE);
- if (txdr->buffer_info[i].skb)
- dev_kfree_skb(txdr->buffer_info[i].skb);
+ dev_kfree_skb(txdr->buffer_info[i].skb);
}
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index f703fa58458e..86493fea56e4 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2889,9 +2889,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- frag = &skb_shinfo(skb)->frags[f];
len = skb_frag_size(frag);
offset = 0;
@@ -4176,8 +4175,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* an error means any chain goes out the window
* too
*/
- if (rx_ring->rx_skb_top)
- dev_kfree_skb(rx_ring->rx_skb_top);
+ dev_kfree_skb(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
goto next_desc;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 08342698386d..de8c5818a305 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1126,8 +1126,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
buffer_info->dma,
buffer_info->length,
DMA_TO_DEVICE);
- if (buffer_info->skb)
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb(buffer_info->skb);
}
}
@@ -1139,8 +1138,7 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
dma_unmap_single(&pdev->dev,
buffer_info->dma,
2048, DMA_FROM_DEVICE);
- if (buffer_info->skb)
- dev_kfree_skb(buffer_info->skb);
+ dev_kfree_skb(buffer_info->skb);
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 395b05701480..a1fab77b2096 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1429,6 +1429,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
else
phy_reg |= 0xFA;
e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
+
+ if (speed == SPEED_1000) {
+ hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
+ &phy_reg);
+
+ phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
+
+ hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
+ phy_reg);
+ }
}
hw->phy.ops.release(hw);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index eb09c755fa17..1502895eb45d 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -210,7 +210,7 @@
/* PHY Power Management Control */
#define HV_PM_CTRL PHY_REG(770, 17)
-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+#define HV_PM_CTRL_K1_CLK_REQ 0x200
#define HV_PM_CTRL_K1_ENABLE 0x4000
#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index e4baa13b3cda..8a3f035c3a5f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5579,9 +5579,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- frag = &skb_shinfo(skb)->frags[f];
len = skb_frag_size(frag);
offset = 0;
@@ -6297,7 +6296,7 @@ fl_out:
static int e1000e_pm_freeze(struct device *dev)
{
- struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
netif_device_detach(netdev);
@@ -6630,7 +6629,7 @@ static int __e1000_resume(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int e1000e_pm_thaw(struct device *dev)
{
- struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
+ struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
e1000e_set_interrupt_capability(adapter);
@@ -6679,8 +6678,7 @@ static int e1000e_pm_resume(struct device *dev)
static int e1000e_pm_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
u16 eee_lp;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 7d42582ed48d..b14441944b4b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#ifndef _FM10K_H_
#define _FM10K_H_
@@ -177,14 +177,10 @@ static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring)
#define MIN_Q_VECTORS 1
enum fm10k_non_q_vectors {
FM10K_MBX_VECTOR,
-#define NON_Q_VECTORS_VF NON_Q_VECTORS_PF
- NON_Q_VECTORS_PF
+ NON_Q_VECTORS
};
-#define NON_Q_VECTORS(hw) (((hw)->mac.type == fm10k_mac_pf) ? \
- NON_Q_VECTORS_PF : \
- NON_Q_VECTORS_VF)
-#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS(hw))
+#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS)
struct fm10k_q_vector {
struct fm10k_intfc *interface;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
index 20768ac7f17e..c45315472245 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k.h"
@@ -36,7 +36,7 @@ static int fm10k_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
{
u8 num_tc = 0;
- int i, err;
+ int i;
/* verify type and determine num_tcs needed */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
@@ -57,7 +57,7 @@ static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
/* update TC hardware mapping if necessary */
if (num_tc != netdev_get_num_tc(dev)) {
- err = fm10k_setup_tc(dev, num_tc);
+ int err = fm10k_setup_tc(dev, num_tc);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index dca104121c05..1d27b2fb23af 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -160,8 +160,6 @@ void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
snprintf(name, sizeof(name), "q_vector.%03d", q_vector->v_idx);
q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc);
- if (!q_vector->dbg_q_vector)
- return;
/* Generate a file for each rx ring in the q_vector */
for (i = 0; i < q_vector->tx.count; i++) {
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 4895dd83dd08..c681d2d28107 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include <linux/vmalloc.h>
@@ -222,7 +222,6 @@ static void __fm10k_add_ethtool_stats(u64 **data, void *pointer,
const unsigned int size)
{
unsigned int i;
- char *p;
if (!pointer) {
/* memory is not zero allocated so we have to clear it */
@@ -232,7 +231,7 @@ static void __fm10k_add_ethtool_stats(u64 **data, void *pointer,
}
for (i = 0; i < size; i++) {
- p = (char *)pointer + stats[i].stat_offset;
+ char *p = (char *)pointer + stats[i].stat_offset;
switch (stats[i].sizeof_stat) {
case sizeof(u64):
@@ -651,7 +650,6 @@ static int fm10k_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_q_vector *qv;
u16 tx_itr, rx_itr;
int i;
@@ -677,7 +675,8 @@ static int fm10k_set_coalesce(struct net_device *dev,
/* update q_vectors */
for (i = 0; i < interface->num_q_vectors; i++) {
- qv = interface->q_vector[i];
+ struct fm10k_q_vector *qv = interface->q_vector[i];
+
qv->tx.itr = tx_itr;
qv->rx.itr = rx_itr;
}
@@ -1115,13 +1114,12 @@ static void fm10k_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_hw *hw = &interface->hw;
/* report maximum channels */
ch->max_combined = fm10k_max_channels(dev);
/* report info for other vector */
- ch->max_other = NON_Q_VECTORS(hw);
+ ch->max_other = NON_Q_VECTORS;
ch->other_count = ch->max_other;
/* record RSS queues */
@@ -1133,14 +1131,13 @@ static int fm10k_set_channels(struct net_device *dev,
{
struct fm10k_intfc *interface = netdev_priv(dev);
unsigned int count = ch->combined_count;
- struct fm10k_hw *hw = &interface->hw;
/* verify they are not requesting separate vectors */
if (!count || ch->rx_count || ch->tx_count)
return -EINVAL;
/* verify other_count has not changed */
- if (ch->other_count != NON_Q_VECTORS(hw))
+ if (ch->other_count != NON_Q_VECTORS)
return -EINVAL;
/* verify the number of channels does not exceed hardware limits */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index 8de77155f2e7..afe1fafd2447 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k.h"
#include "fm10k_vf.h"
@@ -426,7 +426,7 @@ static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
size_t size;
- int i, err;
+ int i;
/* return error if iov_data is already populated */
if (iov_data)
@@ -452,6 +452,7 @@ static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
/* loop through vf_info structures initializing each entry */
for (i = 0; i < num_vfs; i++) {
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
+ int err;
/* Record VF VSI value */
vf_info->vsi = i + 1;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 90270b4a1682..e0a2be534b20 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include <linux/types.h>
#include <linux/module.h>
@@ -17,7 +17,7 @@ const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
- "Copyright(c) 2013 - 2018 Intel Corporation.";
+ "Copyright(c) 2013 - 2019 Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
@@ -315,7 +315,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
/* prefetch first cache line of first page */
prefetch(page_addr);
#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
+ prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES));
#endif
/* allocate a skb to store the frags */
@@ -946,7 +946,7 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct fm10k_tx_buffer *tx_buffer;
struct fm10k_tx_desc *tx_desc;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned char *data;
dma_addr_t dma;
unsigned int data_len, size;
@@ -1074,7 +1074,8 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(
+ &skb_shinfo(skb)->frags[f]));
if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring->tx_stats.tx_busy++;
@@ -1823,7 +1824,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
v_budget = min_t(u16, v_budget, num_online_cpus());
/* account for vectors not related to queues */
- v_budget += NON_Q_VECTORS(hw);
+ v_budget += NON_Q_VECTORS;
/* At the same time, hardware can only support a maximum of
* hw.mac->max_msix_vectors vectors. With features
@@ -1855,7 +1856,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
}
/* record the number of queues available for q_vectors */
- interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw);
+ interface->num_q_vectors = v_budget - NON_Q_VECTORS;
return 0;
}
@@ -1869,7 +1870,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
{
struct net_device *dev = interface->netdev;
- int pc, offset, rss_i, i, q_idx;
+ int pc, offset, rss_i, i;
u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1;
u8 num_pcs = netdev_get_num_tc(dev);
@@ -1879,7 +1880,8 @@ static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
rss_i = interface->ring_feature[RING_F_RSS].indices;
for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) {
- q_idx = pc;
+ int q_idx = pc;
+
for (i = 0; i < rss_i; i++) {
interface->tx_ring[offset + i]->reg_idx = q_idx;
interface->tx_ring[offset + i]->qos_pc = pc;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 21021fe4f1c3..75e51f91036c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_common.h"
@@ -297,13 +297,14 @@ static u16 fm10k_mbx_validate_msg_size(struct fm10k_mbx_info *mbx, u16 len)
{
struct fm10k_mbx_fifo *fifo = &mbx->rx;
u16 total_len = 0, msg_len;
- u32 *msg;
/* length should include previous amounts pushed */
len += mbx->pushed;
/* offset in message is based off of current message size */
do {
+ u32 *msg;
+
msg = fifo->buffer + fm10k_fifo_tail_offset(fifo, total_len);
msg_len = FM10K_TLV_DWORD_LEN(*msg);
total_len += msg_len;
@@ -1920,7 +1921,6 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw,
/* reduce length by 1 to convert to a mask */
u16 mbmem_len = mbx->mbmem_len - 1;
u16 tail_len, len = 0;
- u32 *msg;
/* push head behind tail */
if (mbx->tail < head)
@@ -1930,6 +1930,8 @@ static void fm10k_sm_mbx_transmit(struct fm10k_hw *hw,
/* determine msg aligned offset for end of buffer */
do {
+ u32 *msg;
+
msg = fifo->buffer + fm10k_fifo_head_offset(fifo, len);
tail_len = len;
len += FM10K_TLV_DWORD_LEN(*msg);
@@ -2132,7 +2134,8 @@ fifo_err:
* DWORDs, not bytes. Any invalid values will cause the mailbox to return
* error.
**/
-s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
+s32 fm10k_sm_mbx_init(struct fm10k_hw __always_unused *hw,
+ struct fm10k_mbx_info *mbx,
const struct fm10k_msg_data *msg_data)
{
mbx->mbx_reg = FM10K_GMBX;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 538a8467f434..09f7a246e134 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k.h"
#include <linux/vmalloc.h>
@@ -54,7 +54,7 @@ err:
**/
static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface)
{
- int i, err = 0;
+ int i, err;
for (i = 0; i < interface->num_tx_queues; i++) {
err = fm10k_setup_tx_resources(interface->tx_ring[i]);
@@ -121,7 +121,7 @@ err:
**/
static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface)
{
- int i, err = 0;
+ int i, err;
for (i = 0; i < interface->num_rx_queues; i++) {
err = fm10k_setup_rx_resources(interface->rx_ring[i]);
@@ -169,7 +169,6 @@ void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring,
**/
static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring)
{
- struct fm10k_tx_buffer *tx_buffer;
unsigned long size;
u16 i;
@@ -179,7 +178,8 @@ static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring)
/* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) {
- tx_buffer = &tx_ring->tx_buffer[i];
+ struct fm10k_tx_buffer *tx_buffer = &tx_ring->tx_buffer[i];
+
fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
}
@@ -253,8 +253,7 @@ static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring)
if (!rx_ring->rx_buffer)
return;
- if (rx_ring->skb)
- dev_kfree_skb(rx_ring->skb);
+ dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
/* Free all the Rx ring sk_buffs */
@@ -871,7 +870,7 @@ static int fm10k_uc_vlan_unsync(struct net_device *netdev,
u16 glort = interface->glort;
u16 vid = interface->vid;
bool set = !!(vid / VLAN_N_VID);
- int err = -EHOSTDOWN;
+ int err;
/* drop any leading bits on the VLAN ID */
vid &= VLAN_N_VID - 1;
@@ -891,7 +890,7 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev,
u16 glort = interface->glort;
u16 vid = interface->vid;
bool set = !!(vid / VLAN_N_VID);
- int err = -EHOSTDOWN;
+ int err;
/* drop any leading bits on the VLAN ID */
vid &= VLAN_N_VID - 1;
@@ -1444,11 +1443,11 @@ static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type,
static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
struct fm10k_l2_accel *l2_accel)
{
- struct fm10k_ring *ring;
int i;
for (i = 0; i < interface->num_rx_queues; i++) {
- ring = interface->rx_ring[i];
+ struct fm10k_ring *ring = interface->rx_ring[i];
+
rcu_assign_pointer(ring->l2_accel, l2_accel);
}
@@ -1463,7 +1462,7 @@ static void *fm10k_dfwd_add_station(struct net_device *dev,
struct fm10k_l2_accel *old_l2_accel = NULL;
struct fm10k_dglort_cfg dglort = { 0 };
struct fm10k_hw *hw = &interface->hw;
- int size = 0, i;
+ int size, i;
u16 vid, glort;
/* The hardware supported by fm10k only filters on the destination MAC
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index e49fb51d3613..bb236fa44048 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -344,7 +344,6 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
struct net_device *netdev = interface->netdev;
u32 __iomem *hw_addr;
u32 value;
- int err;
/* do nothing if netdev is still present or hw_addr is set */
if (netif_device_present(netdev) || interface->hw.hw_addr)
@@ -362,6 +361,8 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
hw_addr = READ_ONCE(interface->uc_addr);
value = readl(hw_addr);
if (~value) {
+ int err;
+
/* Make sure the reset was initiated because we detached,
* otherwise we might race with a different reset flow.
*/
@@ -697,8 +698,6 @@ static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
*/
static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
{
- int i;
-
/* If we're down or resetting, just bail */
if (test_bit(__FM10K_DOWN, interface->state) ||
test_bit(__FM10K_RESETTING, interface->state))
@@ -710,6 +709,8 @@ static void fm10k_check_hang_subtask(struct fm10k_intfc *interface)
interface->next_tx_hang_check = jiffies + (2 * HZ);
if (netif_carrier_ok(interface->netdev)) {
+ int i;
+
/* Force detection of hung controller */
for (i = 0; i < interface->num_tx_queues; i++)
set_check_for_tx_hang(interface->tx_ring[i]);
@@ -897,7 +898,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
/* Map interrupt */
if (ring->q_vector) {
- txint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
+ txint = ring->q_vector->v_idx + NON_Q_VECTORS;
txint |= FM10K_INT_MAP_TIMER0;
}
@@ -1036,7 +1037,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
/* Map interrupt */
if (ring->q_vector) {
- rxint = ring->q_vector->v_idx + NON_Q_VECTORS(hw);
+ rxint = ring->q_vector->v_idx + NON_Q_VECTORS;
rxint |= FM10K_INT_MAP_TIMER1;
}
@@ -1719,10 +1720,9 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
void fm10k_qv_free_irq(struct fm10k_intfc *interface)
{
int vector = interface->num_q_vectors;
- struct fm10k_hw *hw = &interface->hw;
struct msix_entry *entry;
- entry = &interface->msix_entries[NON_Q_VECTORS(hw) + vector];
+ entry = &interface->msix_entries[NON_Q_VECTORS + vector];
while (vector) {
struct fm10k_q_vector *q_vector;
@@ -1759,7 +1759,7 @@ int fm10k_qv_request_irq(struct fm10k_intfc *interface)
unsigned int ri = 0, ti = 0;
int vector, err;
- entry = &interface->msix_entries[NON_Q_VECTORS(hw)];
+ entry = &interface->msix_entries[NON_Q_VECTORS];
for (vector = 0; vector < interface->num_q_vectors; vector++) {
struct fm10k_q_vector *q_vector = interface->q_vector[vector];
@@ -2339,7 +2339,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
/* Restart the MAC/VLAN request queue in-case of outstanding events */
fm10k_macvlan_schedule(interface);
- return err;
+ return 0;
}
/**
@@ -2352,7 +2352,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
**/
static int __maybe_unused fm10k_resume(struct device *dev)
{
- struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
+ struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
int err;
@@ -2379,7 +2379,7 @@ static int __maybe_unused fm10k_resume(struct device *dev)
**/
static int __maybe_unused fm10k_suspend(struct device *dev)
{
- struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev));
+ struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
netif_device_detach(netdev);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index cb4d02629b86..be07bfdb0bb4 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_pf.h"
#include "fm10k_vf.h"
@@ -1152,7 +1152,7 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
* assumption is that in this case it is acceptable to just directly
* hand off the message from the VF to the underlying shared code.
**/
-s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
+s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 __always_unused **results,
struct fm10k_mbx_info *mbx)
{
struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
@@ -1352,7 +1352,6 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *mbx)
{
struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
- u32 *result;
s32 err = 0;
u32 msg[2];
u8 mode = 0;
@@ -1362,7 +1361,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
return FM10K_ERR_PARAM;
if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) {
- result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
+ u32 *result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
/* XCAST mode update requested */
err = fm10k_tlv_attr_get_u8(result, &mode);
@@ -1566,7 +1565,7 @@ static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
/* read remaining fields */
fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI);
fault->address <<= 32;
- fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO);
+ fault->address |= fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO);
fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO);
/* clear valid bit to allow for next error */
@@ -1642,7 +1641,7 @@ const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
* switch API.
**/
s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
u16 glort, mask;
u32 dglort_map;
@@ -1685,7 +1684,7 @@ const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
* This handler configures the default VLAN for the PF
**/
static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
u16 glort, pvid;
u32 pvid_update;
@@ -1746,7 +1745,7 @@ const struct fm10k_tlv_attr fm10k_err_msg_attr[] = {
* messages that the PF has sent.
**/
s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
struct fm10k_swapi_error err_msg;
s32 err;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index 2a7a40bf2b1c..21eff0895a7a 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_tlv.h"
@@ -472,7 +472,7 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
const struct fm10k_tlv_attr *tlv_attr)
{
u32 i, attr_id, offset = 0;
- s32 err = 0;
+ s32 err;
u16 len;
/* verify pointers are not NULL */
@@ -587,8 +587,9 @@ s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg,
* a minimum it just indicates that the message requested was
* unimplemented.
**/
-s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+s32 fm10k_tlv_msg_error(struct fm10k_hw __always_unused *hw,
+ u32 __always_unused **results,
+ struct fm10k_mbx_info __always_unused *mbx)
{
return FM10K_NOT_IMPLEMENTED;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 9fb9fca375e3..15ac1c7885bc 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#ifndef _FM10K_TYPE_H_
#define _FM10K_TYPE_H_
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index a8519c1f0406..dc8ccd378ec9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2019 Intel Corporation. */
#include "fm10k_vf.h"
@@ -198,7 +198,7 @@ static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
* This function should determine the MAC address for the VF
**/
s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
u8 perm_addr[ETH_ALEN];
u16 vid;
@@ -267,8 +267,10 @@ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw)
* This function is used to add or remove unicast MAC addresses for
* the VF.
**/
-static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
- const u8 *mac, u16 vid, bool add, u8 flags)
+static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort,
+ const u8 *mac, u16 vid, bool add,
+ u8 __always_unused flags)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[7];
@@ -309,7 +311,8 @@ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
* This function is used to add or remove multicast MAC addresses for
* the VF.
**/
-static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort,
+static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort,
const u8 *mac, u16 vid, bool add)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
@@ -373,7 +376,7 @@ const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = {
* are ready to bring up the interface.
**/
s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info *mbx)
+ struct fm10k_mbx_info __always_unused *mbx)
{
hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ?
FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO;
@@ -392,8 +395,9 @@ s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results,
* enabled we can add filters, if it is disabled all filters for this
* logical port are flushed.
**/
-static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort,
- u16 count, bool enable)
+static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort,
+ u16 __always_unused count, bool enable)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[2];
@@ -420,7 +424,8 @@ static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort,
* so that it can enable either multicast, multicast promiscuous, or
* promiscuous mode of operation.
**/
-static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode)
+static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw,
+ u16 __always_unused glort, u8 mode)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 msg[3];
@@ -475,7 +480,7 @@ static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw,
* that information to then populate a DGLORTMAP/DEC entry and the queues
* to which it has been assigned.
**/
-static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw,
+static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw __always_unused *hw,
struct fm10k_dglort_cfg *dglort)
{
/* verify the dglort pointer */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 84bd06901014..3e535d3263b3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1021,6 +1021,7 @@ i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type)
return NULL;
}
void i40e_update_stats(struct i40e_vsi *vsi);
+void i40e_update_veb_stats(struct i40e_veb *veb);
void i40e_update_eth_stats(struct i40e_vsi *vsi);
struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
int i40e_fetch_switch_configuration(struct i40e_pf *pf,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 814acbe79ffd..72c04881d290 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -610,8 +610,10 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
if (hw->aq.api_maj_ver > 1 ||
(hw->aq.api_maj_ver == 1 &&
- hw->aq.api_min_ver >= 8))
+ hw->aq.api_min_ver >= 8)) {
hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
+ hw->flags |= I40E_HW_FLAG_DROP_MODE;
+ }
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 6536023fa074..21cccec328e3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -11,8 +11,8 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0008
-#define I40E_FW_API_VERSION_MINOR_X710 0x0008
+#define I40E_FW_API_VERSION_MINOR_X722 0x0009
+#define I40E_FW_API_VERSION_MINOR_X710 0x0009
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
@@ -2051,20 +2051,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
struct i40e_aq_set_mac_config {
__le16 max_frame_size;
u8 params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80
u8 tx_timer_priority; /* bitmap */
__le16 tx_timer_value;
__le16 fc_refresh_threshold;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 906cf68d3453..46e649c09f72 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -13,7 +13,7 @@
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
**/
-static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+i40e_status i40e_set_mac_type(struct i40e_hw *hw)
{
i40e_status status = 0;
@@ -1577,19 +1577,22 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, abilities,
abilities_size, cmd_details);
- if (status)
- break;
-
- if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
+ switch (hw->aq.asq_last_status) {
+ case I40E_AQ_RC_EIO:
status = I40E_ERR_UNKNOWN_PHY;
break;
- } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
+ case I40E_AQ_RC_EAGAIN:
usleep_range(1000, 2000);
total_delay++;
status = I40E_ERR_TIMEOUT;
+ break;
+ /* also covers I40E_AQ_RC_OK */
+ default:
+ break;
}
- } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
- (total_delay < max_delay));
+
+ } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
+ (total_delay < max_delay));
if (status)
return status;
@@ -1643,25 +1646,15 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
return status;
}
-/**
- * i40e_set_fc
- * @hw: pointer to the hw struct
- * @aq_failures: buffer to return AdminQ failure information
- * @atomic_restart: whether to enable atomic link restart
- *
- * Set the requested flow control mode using set_phy_config.
- **/
-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
- bool atomic_restart)
+static noinline_for_stack enum i40e_status_code
+i40e_set_fc_status(struct i40e_hw *hw,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ bool atomic_restart)
{
- enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
- struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_aq_set_phy_config config;
- enum i40e_status_code status;
+ enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
u8 pause_mask = 0x0;
- *aq_failures = 0x0;
-
switch (fc_mode) {
case I40E_FC_FULL:
pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
@@ -1677,6 +1670,48 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
break;
}
+ memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+ /* clear the old pause settings */
+ config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
+ ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
+ /* set the new abilities */
+ config.abilities |= pause_mask;
+ /* If the abilities have changed, then set the new config */
+ if (config.abilities == abilities->abilities)
+ return 0;
+
+ /* Auto restart link so settings take effect */
+ if (atomic_restart)
+ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ /* Copy over all the old settings */
+ config.phy_type = abilities->phy_type;
+ config.phy_type_ext = abilities->phy_type_ext;
+ config.link_speed = abilities->link_speed;
+ config.eee_capability = abilities->eee_capability;
+ config.eeer = abilities->eeer_val;
+ config.low_power_ctrl = abilities->d3_lpan;
+ config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
+
+ return i40e_aq_set_phy_config(hw, &config, NULL);
+}
+
+/**
+ * i40e_set_fc
+ * @hw: pointer to the hw struct
+ * @aq_failures: buffer to return AdminQ failure information
+ * @atomic_restart: whether to enable atomic link restart
+ *
+ * Set the requested flow control mode using set_phy_config.
+ **/
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_restart)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code status;
+
+ *aq_failures = 0x0;
+
/* Get the current phy config */
status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
NULL);
@@ -1685,31 +1720,10 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
return status;
}
- memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
- /* clear the old pause settings */
- config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
- ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
- /* set the new abilities */
- config.abilities |= pause_mask;
- /* If the abilities have changed, then set the new config */
- if (config.abilities != abilities.abilities) {
- /* Auto restart link so settings take effect */
- if (atomic_restart)
- config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- /* Copy over all the old settings */
- config.phy_type = abilities.phy_type;
- config.phy_type_ext = abilities.phy_type_ext;
- config.link_speed = abilities.link_speed;
- config.eee_capability = abilities.eee_capability;
- config.eeer = abilities.eeer_val;
- config.low_power_ctrl = abilities.d3_lpan;
- config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
- I40E_AQ_PHY_FEC_CONFIG_MASK;
- status = i40e_aq_set_phy_config(hw, &config, NULL);
+ status = i40e_set_fc_status(hw, &abilities, atomic_restart);
+ if (status)
+ *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
- if (status)
- *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
- }
/* Update the link info */
status = i40e_update_link_info(hw);
if (status) {
@@ -2537,7 +2551,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
* i40e_updatelink_status - update status of the HW network link
* @hw: pointer to the hw struct
**/
-i40e_status i40e_update_link_info(struct i40e_hw *hw)
+noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
{
struct i40e_aq_get_phy_abilities_resp abilities;
i40e_status status = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 292eeb3def10..200a1cb3b536 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -877,7 +877,23 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
return I40E_NOT_SUPPORTED;
/* Read LLDP NVM area */
- ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) {
+ u8 offset = 0;
+
+ if (hw->mac.type == I40E_MAC_XL710)
+ offset = I40E_LLDP_CURRENT_STATUS_XL710_OFFSET;
+ else if (hw->mac.type == I40E_MAC_X722)
+ offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET;
+ else
+ return I40E_NOT_SUPPORTED;
+
+ ret = i40e_read_nvm_module_data(hw,
+ I40E_SR_EMP_SR_SETTINGS_PTR,
+ offset, 1,
+ &lldp_cfg.adminstatus);
+ } else {
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ }
if (ret)
return I40E_ERR_NOT_READY;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index ddb48ae7cce4..2a80c5daa376 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -30,6 +30,8 @@
#define I40E_CEE_SUBTYPE_APP_PRI 4
#define I40E_CEE_MAX_FEAT_TYPE 3
+#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B
+#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31
/* Defines for LLDP TLV header */
#define I40E_LLDP_TLV_LEN_SHIFT 0
#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 55d20acfcf70..41232898d8ae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1732,29 +1732,15 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
**/
void i40e_dbg_pf_init(struct i40e_pf *pf)
{
- struct dentry *pfile;
const char *name = pci_name(pf->pdev);
- const struct device *dev = &pf->pdev->dev;
pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
- if (!pf->i40e_dbg_pf)
- return;
-
- pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
- &i40e_dbg_command_fops);
- if (!pfile)
- goto create_failed;
- pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
- &i40e_dbg_netdev_ops_fops);
- if (!pfile)
- goto create_failed;
+ debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_command_fops);
- return;
-
-create_failed:
- dev_info(dev, "debugfs dir/file for %s failed\n", name);
- debugfs_remove_recursive(pf->i40e_dbg_pf);
+ debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_netdev_ops_fops);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 527eb52c5401..41e1240acaea 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -711,6 +711,35 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
}
/**
+ * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask
+ * @req_fec_info: mask request FEC info
+ * @ks: ethtool ksettings to fill in
+ **/
+static void i40e_get_settings_link_up_fec(u8 req_fec_info,
+ struct ethtool_link_ksettings *ks)
+{
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+
+ if (I40E_AQ_SET_FEC_REQUEST_RS & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ } else if (I40E_AQ_SET_FEC_REQUEST_KR & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ } else {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_NONE);
+ if (I40E_AQ_SET_FEC_AUTO & req_fec_info) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_RS);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ FEC_BASER);
+ }
+ }
+}
+
+/**
* i40e_get_settings_link_up - Get the Link settings for when link is up
* @hw: hw structure
* @ks: ethtool ksettings to fill in
@@ -769,13 +798,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseSR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseSR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseSR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -892,9 +915,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseKR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, supported,
20000baseKR2_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -908,10 +928,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseKR_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
ethtool_link_ksettings_add_link_mode(ks, advertising,
20000baseKR2_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -929,13 +946,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+
break;
case I40E_PHY_TYPE_25GBASE_AOC:
case I40E_PHY_TYPE_25GBASE_ACC:
@@ -945,13 +957,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
25000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
25000baseCR_Full);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
- ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- FEC_BASER);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
+
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseCR_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
@@ -2250,7 +2257,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- struct i40e_veb *veb = pf->veb[pf->lan_veb];
+ struct i40e_veb *veb = NULL;
unsigned int i;
bool veb_stats;
u64 *p = data;
@@ -2273,8 +2280,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
goto check_data_pointer;
veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
+ (pf->lan_veb < I40E_MAX_VEB) &&
(pf->flags & I40E_FLAG_VEB_STATS_ENABLED));
+ if (veb_stats) {
+ veb = pf->veb[pf->lan_veb];
+ i40e_update_veb_stats(veb);
+ }
+
/* If veb stats aren't enabled, pass NULL instead of the veb so that
* we initialize stats to zero and update the data pointer
* intelligently
@@ -2329,7 +2342,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
}
if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
- return;
+ goto check_data_pointer;
i40e_add_stat_strings(&data, i40e_gstrings_veb_stats);
@@ -2341,6 +2354,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
+check_data_pointer:
WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
"stat strings count mismatch!");
}
@@ -5123,6 +5137,12 @@ static int i40e_get_module_info(struct net_device *netdev,
/* Module is not SFF-8472 compliant */
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else if (!(sff8472_swap & I40E_MODULE_SFF_DDM_IMPLEMENTED)) {
+ /* Module is SFF-8472 compliant but doesn't implement
+ * Digital Diagnostic Monitoring (DDM).
+ */
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 9ebbe3da61bb..fdf43d87e983 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -534,6 +534,10 @@ void i40e_pf_reset_stats(struct i40e_pf *pf)
sizeof(pf->veb[i]->stats));
memset(&pf->veb[i]->stats_offsets, 0,
sizeof(pf->veb[i]->stats_offsets));
+ memset(&pf->veb[i]->tc_stats, 0,
+ sizeof(pf->veb[i]->tc_stats));
+ memset(&pf->veb[i]->tc_stats_offsets, 0,
+ sizeof(pf->veb[i]->tc_stats_offsets));
pf->veb[i]->stat_offsets_loaded = false;
}
}
@@ -677,7 +681,7 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
* i40e_update_veb_stats - Update Switch component statistics
* @veb: the VEB being updated
**/
-static void i40e_update_veb_stats(struct i40e_veb *veb)
+void i40e_update_veb_stats(struct i40e_veb *veb)
{
struct i40e_pf *pf = veb->pf;
struct i40e_hw *hw = &pf->hw;
@@ -2530,6 +2534,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
vsi_name,
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
+ } else {
+ dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
+ vsi->netdev->name,
+ cur_multipromisc ? "entering" : "leaving");
}
}
@@ -3360,7 +3368,7 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
err = i40e_configure_tx_ring(vsi->tx_rings[i]);
- if (!i40e_enabled_xdp_vsi(vsi))
+ if (err || !i40e_enabled_xdp_vsi(vsi))
return err;
for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
@@ -6412,50 +6420,6 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
}
/**
- * i40e_update_dcb_config
- * @hw: pointer to the HW struct
- * @enable_mib_change: enable MIB change event
- *
- * Update DCB configuration from the firmware
- **/
-static enum i40e_status_code
-i40e_update_dcb_config(struct i40e_hw *hw, bool enable_mib_change)
-{
- struct i40e_lldp_variables lldp_cfg;
- i40e_status ret;
-
- if (!hw->func_caps.dcb)
- return I40E_NOT_SUPPORTED;
-
- /* Read LLDP NVM area */
- ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
- if (ret)
- return I40E_ERR_NOT_READY;
-
- /* Get DCBX status */
- ret = i40e_get_dcbx_status(hw, &hw->dcbx_status);
- if (ret)
- return ret;
-
- /* Check the DCBX Status */
- if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
- hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
- /* Get current DCBX configuration */
- ret = i40e_get_dcb_config(hw);
- if (ret)
- return ret;
- } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
- return I40E_ERR_NOT_READY;
- }
-
- /* Configure the LLDP MIB change event */
- if (enable_mib_change)
- ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
-
- return ret;
-}
-
-/**
* i40e_init_pf_dcb - Initialize DCB configuration
* @pf: PF being configured
*
@@ -6477,7 +6441,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
goto out;
}
- err = i40e_update_dcb_config(hw, true);
+ err = i40e_init_dcb(hw, true);
if (!err) {
/* Device/Function is not DCBX capable */
if ((!hw->func_caps.dcb) ||
@@ -8486,6 +8450,11 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
dev_dbg(&pf->pdev->dev, "PFR requested\n");
i40e_handle_reset_warning(pf, lock_acquired);
+ dev_info(&pf->pdev->dev,
+ pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
+ "FW LLDP is disabled\n" :
+ "FW LLDP is enabled\n");
+
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
@@ -14569,9 +14538,20 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
**/
static bool i40e_check_recovery_mode(struct i40e_pf *pf)
{
- u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
-
- if (val & I40E_GL_FWSTS_FWS1B_MASK) {
+ u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
+ bool is_recovery_mode = false;
+
+ if (pf->hw.mac.type == I40E_MAC_XL710)
+ is_recovery_mode =
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK ||
+ val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK;
+ if (pf->hw.mac.type == I40E_MAC_X722)
+ is_recovery_mode =
+ val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
+ val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK;
+ if (is_recovery_mode) {
dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
set_bit(__I40E_RECOVERY_MODE, pf->state);
@@ -14585,6 +14565,51 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf)
}
/**
+ * i40e_pf_loop_reset - perform reset in a loop.
+ * @pf: board private structure
+ *
+ * This function is useful when a NIC is about to enter recovery mode.
+ * When a NIC's internal data structures are corrupted the NIC's
+ * firmware is going to enter recovery mode.
+ * Right after a POR it takes about 7 minutes for firmware to enter
+ * recovery mode. Until that time a NIC is in some kind of intermediate
+ * state. After that time period the NIC almost surely enters
+ * recovery mode. The only way for a driver to detect intermediate
+ * state is to issue a series of pf-resets and check a return value.
+ * If a PF reset returns success then the firmware could be in recovery
+ * mode so the caller of this code needs to check for recovery mode
+ * if this function returns success. There is a little chance that
+ * firmware will hang in intermediate state forever.
+ * Since waiting 7 minutes is quite a lot of time this function waits
+ * 10 seconds and then gives up by returning an error.
+ *
+ * Return 0 on success, negative on failure.
+ **/
+static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
+{
+ const unsigned short MAX_CNT = 1000;
+ const unsigned short MSECS = 10;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ int cnt;
+
+ for (cnt = 0; cnt < MAX_CNT; ++cnt) {
+ ret = i40e_pf_reset(hw);
+ if (!ret)
+ break;
+ msleep(MSECS);
+ }
+
+ if (cnt == MAX_CNT) {
+ dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
+ return ret;
+ }
+
+ pf->pfr_count++;
+ return ret;
+}
+
+/**
* i40e_init_recovery_mode - initialize subsystems needed in recovery mode
* @pf: board private structure
* @hw: ptr to the hardware info
@@ -14812,14 +14837,22 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Reset here to make sure all is clean and to define PF 'n' */
i40e_clear_hw(hw);
- if (!i40e_check_recovery_mode(pf)) {
- err = i40e_pf_reset(hw);
- if (err) {
- dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
- goto err_pf_reset;
- }
- pf->pfr_count++;
+
+ err = i40e_set_mac_type(hw);
+ if (err) {
+ dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
+ err);
+ goto err_pf_reset;
+ }
+
+ err = i40e_pf_loop_reset(pf);
+ if (err) {
+ dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+ goto err_pf_reset;
}
+
+ i40e_check_recovery_mode(pf);
+
hw->aq.num_arq_entries = I40E_AQ_LEN;
hw->aq.num_asq_entries = I40E_AQ_LEN;
hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
@@ -15605,8 +15638,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
**/
static int __maybe_unused i40e_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_pf *pf = dev_get_drvdata(dev);
struct i40e_hw *hw = &pf->hw;
/* If we're already suspended, then there is nothing to do */
@@ -15656,8 +15688,7 @@ static int __maybe_unused i40e_suspend(struct device *dev)
**/
static int __maybe_unused i40e_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct i40e_pf *pf = pci_get_drvdata(pdev);
+ struct i40e_pf *pf = dev_get_drvdata(dev);
int err;
/* If we're not suspended, then there is nothing to do */
@@ -15674,7 +15705,7 @@ static int __maybe_unused i40e_resume(struct device *dev)
*/
err = i40e_restore_interrupt_scheme(pf);
if (err) {
- dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
+ dev_err(dev, "Cannot restore interrupt scheme: %d\n",
err);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index c508b75c3c09..e4d8d20baf3b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -322,6 +322,77 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
}
/**
+ * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
+ * @hw: pointer to the HW structure
+ * @module_ptr: Pointer to module in words with respect to NVM beginning
+ * @offset: offset in words from module start
+ * @words_data_size: Words to read from NVM
+ * @data_ptr: Pointer to memory location where resulting buffer will be stored
+ **/
+i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr, u16 offset,
+ u16 words_data_size,
+ u16 *data_ptr)
+{
+ i40e_status status;
+ u16 ptr_value = 0;
+ u32 flat_offset;
+
+ if (module_ptr != 0) {
+ status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm word failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+ }
+#define I40E_NVM_INVALID_PTR_VAL 0x7FFF
+#define I40E_NVM_INVALID_VAL 0xFFFF
+
+ /* Pointer not initialized */
+ if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
+ ptr_value == I40E_NVM_INVALID_VAL)
+ return I40E_ERR_BAD_PTR;
+
+ /* Check whether the module is in SR mapped area or outside */
+ if (ptr_value & I40E_PTR_TYPE) {
+ /* Pointer points outside of the Shared RAM mapped area */
+ ptr_value &= ~I40E_PTR_TYPE;
+
+ /* PtrValue in 4kB units, need to convert to words */
+ ptr_value /= 2;
+ flat_offset = ((u32)ptr_value * 0x1000) + (u32)offset;
+ status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!status) {
+ status = i40e_aq_read_nvm(hw, 0, 2 * flat_offset,
+ 2 * words_data_size,
+ data_ptr, true, NULL);
+ i40e_release_nvm(hw);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm aq failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+ } else {
+ return I40E_ERR_NVM;
+ }
+ } else {
+ /* Read from the Shadow RAM */
+ status = i40e_read_nvm_buffer(hw, ptr_value + offset,
+ &words_data_size, data_ptr);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm buffer failed.Error code: %d.\n",
+ status);
+ }
+ }
+
+ return status;
+}
+
+/**
* i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -430,6 +501,36 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
}
/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+ data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ }
+
+ return ret_code;
+}
+
+/**
* i40e_write_nvm_aq - Writes Shadow RAM.
* @hw: pointer to the HW structure.
* @module_pointer: module pointer location in words from the NVM beginning
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index eac88bcc6c06..5250441bf75b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -315,6 +315,12 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
void i40e_release_nvm(struct i40e_hw *hw);
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
+i40e_status i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr, u16 offset,
+ u16 words_data_size,
+ u16 *data_ptr);
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
u16 *checksum);
@@ -326,6 +332,8 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 52e3680c57f8..d35d690ca10f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -58,7 +58,7 @@
#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
#define I40E_PF_ARQT_ARQT_SHIFT 0
#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
@@ -81,7 +81,7 @@
#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
#define I40E_PF_ATQT_ATQT_SHIFT 0
#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
@@ -108,7 +108,7 @@
#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ARQT_MAX_INDEX 127
#define I40E_VF_ARQT_ARQT_SHIFT 0
@@ -136,7 +136,7 @@
#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ATQT_MAX_INDEX 127
#define I40E_VF_ATQT_ATQT_SHIFT 0
@@ -259,7 +259,7 @@
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
@@ -363,6 +363,12 @@
#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
#define I40E_GL_FWSTS_FWS1B_SHIFT 16
#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT)
#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
@@ -503,7 +509,7 @@
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MSRWD_MAX_INDEX 3
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
@@ -1242,14 +1248,14 @@
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
#define I40E_QRX_ENA_MAX_INDEX 1535
#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
@@ -1658,7 +1664,7 @@
#define I40E_GLNVM_SRCTL_START_SHIFT 30
#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
@@ -3025,7 +3031,7 @@
#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VP_MDET_RX_MAX_INDEX 127
#define I40E_VP_MDET_RX_VALID_SHIFT 0
@@ -3161,7 +3167,7 @@
#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
#define I40E_VF_ARQT1_ARQT_SHIFT 0
#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
@@ -3184,7 +3190,7 @@
#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
#define I40E_VF_ATQT1_ATQT_SHIFT 0
#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2a2fe3ec7926..e3f29dc8b290 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3262,7 +3262,7 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
**/
bool __i40e_chk_linearize(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag, *stale;
+ const skb_frag_t *frag, *stale;
int nr_frags, sum;
/* no need to check if number of frags is less than 7 */
@@ -3306,7 +3306,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
* descriptor associated with the fragment.
*/
if (stale_size > I40E_MAX_DATA_PER_TXD) {
- int align_pad = -(stale->page_offset) &
+ int align_pad = -(skb_frag_off(stale)) &
(I40E_MAX_READ_REQ_SIZE - 1);
sum -= align_pad;
@@ -3349,7 +3349,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc;
u16 i = tx_ring->next_to_use;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 100e92d2982f..36d37f31a287 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -521,7 +521,7 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
**/
static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
int count = 0, size = skb_headlen(skb);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 8f43aa47c263..b43ec94a0f29 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -443,6 +443,7 @@ struct i40e_nvm_access {
#define I40E_MODULE_SFF_8472_COMP 0x5E
#define I40E_MODULE_SFF_8472_SWAP 0x5C
#define I40E_MODULE_SFF_ADDR_MODE 0x04
+#define I40E_MODULE_SFF_DDM_IMPLEMENTED 0x40
#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D
#define I40E_MODULE_TYPE_QSFP28 0x11
#define I40E_MODULE_QSFP_MAX_LEN 640
@@ -623,6 +624,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
+#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
u64 flags;
/* Used in set switch config AQ command */
@@ -1316,6 +1318,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_VPD_PTR 0x2F
#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+#define I40E_SR_EMP_SR_SETTINGS_PTR 0x48
/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 02b09a8ad54c..f8aa4deceb5e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -55,7 +55,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
- if (vf->link_forced) {
+
+ /* Always report link is down if the VF queues aren't enabled */
+ if (!vf->queues_enabled) {
+ pfe.event_data.link_event.link_status = false;
+ pfe.event_data.link_event.link_speed = 0;
+ } else if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
(vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
@@ -65,6 +70,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
pfe.event_data.link_event.link_speed =
i40e_virtchnl_link_speed(ls->link_speed);
}
+
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL);
}
@@ -2037,30 +2043,33 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
alluni = true;
aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
alluni);
- if (!aq_ret) {
- if (allmulti) {
+ if (aq_ret)
+ goto err_out;
+
+ if (allmulti) {
+ if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
+ &vf->vf_states))
dev_info(&pf->pdev->dev,
"VF %d successfully set multicast promiscuous mode\n",
vf->vf_id);
- set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
- } else {
- dev_info(&pf->pdev->dev,
- "VF %d successfully unset multicast promiscuous mode\n",
- vf->vf_id);
- clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
- }
- if (alluni) {
+ } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
+ &vf->vf_states))
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully unset multicast promiscuous mode\n",
+ vf->vf_id);
+
+ if (alluni) {
+ if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
+ &vf->vf_states))
dev_info(&pf->pdev->dev,
"VF %d successfully set unicast promiscuous mode\n",
vf->vf_id);
- set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
- } else {
- dev_info(&pf->pdev->dev,
- "VF %d successfully unset unicast promiscuous mode\n",
- vf->vf_id);
- clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
- }
- }
+ } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
+ &vf->vf_states))
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully unset unicast promiscuous mode\n",
+ vf->vf_id);
+
err_out:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
@@ -2153,7 +2162,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
* VF does not know about these additional VSIs and all
* it cares is about its own queues. PF configures these queues
* to its appropriate VSIs based on TC mapping
- **/
+ */
if (vf->adq_enabled) {
if (idx >= ARRAY_SIZE(vf->ch)) {
aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
@@ -2364,6 +2373,8 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
}
}
+ vf->queues_enabled = true;
+
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
@@ -2385,6 +2396,9 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
+ /* Immediately mark queues as disabled */
+ vf->queues_enabled = false;
+
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -3953,10 +3967,15 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* When the VF is resetting wait until it is done.
* It can take up to 200 milliseconds,
* but wait for up to 300 milliseconds to be safe.
+ * If the VF is indeed in reset, the vsi pointer has
+ * to show on the newly loaded vsi under pf->vsi[id].
*/
for (i = 0; i < 15; i++) {
- if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
+ if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
+ if (i > 0)
+ vsi = pf->vsi[vf->lan_vsi_idx];
break;
+ }
msleep(20);
}
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index f65cc0c16550..7164b9bb294f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -99,6 +99,7 @@ struct i40e_vf {
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if VF link is forced */
+ bool queues_enabled; /* true if the VF queues are enabled */
bool spoofchk;
u16 num_mac;
u16 num_vlan;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 0cca1b589b56..7a30d5d5ef53 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -2161,7 +2161,7 @@ static void iavf_create_tx_ctx(struct iavf_ring *tx_ring,
**/
bool __iavf_chk_linearize(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag, *stale;
+ const skb_frag_t *frag, *stale;
int nr_frags, sum;
/* no need to check if number of frags is less than 7 */
@@ -2205,7 +2205,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb)
* descriptor associated with the fragment.
*/
if (stale_size > IAVF_MAX_DATA_PER_TXD) {
- int align_pad = -(stale->page_offset) &
+ int align_pad = -(skb_frag_off(stale)) &
(IAVF_MAX_READ_REQ_SIZE - 1);
sum -= align_pad;
@@ -2269,7 +2269,7 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct iavf_tx_buffer *tx_bi;
struct iavf_tx_desc *tx_desc;
u16 i = tx_ring->next_to_use;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index 71e7d090f8db..dd3348f9da9d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -462,7 +462,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb);
**/
static inline int iavf_xmit_descriptor_count(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
int count = 0, size = skb_headlen(skb);
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 9ee6b55553c0..fb2bc836b20a 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -69,11 +69,10 @@ extern const char ice_drv_ver[];
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
#define ICE_AQ_LEN 64
-#define ICE_MBXQ_LEN 64
+#define ICE_MBXSQ_LEN 64
+#define ICE_MBXRQ_LEN 512
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
-#define ICE_MAX_TXQS 2048
-#define ICE_MAX_RXQS 2048
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
#define ICE_MAX_SCATTER_TXQS 16
@@ -86,16 +85,6 @@ extern const char ice_drv_ver[];
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
#define ICE_INVAL_Q_INDEX 0xffff
#define ICE_INVAL_VFID 256
-#define ICE_MAX_VF_COUNT 256
-#define ICE_MAX_QS_PER_VF 256
-#define ICE_MIN_QS_PER_VF 1
-#define ICE_DFLT_QS_PER_VF 4
-#define ICE_NONQ_VECS_VF 1
-#define ICE_MAX_SCATTER_QS_PER_VF 16
-#define ICE_MAX_BASE_QS_PER_VF 16
-#define ICE_MAX_INTR_PER_VF 65
-#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
-#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_RESET_WAIT 20
@@ -220,6 +209,7 @@ enum ice_state {
__ICE_CFG_BUSY,
__ICE_SERVICE_SCHED,
__ICE_SERVICE_DIS,
+ __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */
__ICE_STATE_NBITS /* must be last */
};
@@ -292,8 +282,8 @@ struct ice_vsi {
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
- u16 txq_map[ICE_MAX_TXQS]; /* index in pf->avail_txqs */
- u16 rxq_map[ICE_MAX_RXQS]; /* index in pf->avail_rxqs */
+ u16 *txq_map; /* index in pf->avail_txqs */
+ u16 *rxq_map; /* index in pf->avail_rxqs */
u16 alloc_txq; /* Allocated Tx queues */
u16 num_txq; /* Used Tx queues */
u16 alloc_rxq; /* Allocated Rx queues */
@@ -329,7 +319,6 @@ struct ice_q_vector {
} ____cacheline_internodealigned_in_smp;
enum ice_pf_flags {
- ICE_FLAG_MSIX_ENA,
ICE_FLAG_FLTR_SYNC,
ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA,
@@ -337,7 +326,8 @@ enum ice_pf_flags {
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
- ICE_FLAG_ENABLE_FW_LLDP,
+ ICE_FLAG_NO_MEDIA,
+ ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -363,9 +353,9 @@ struct ice_pf {
u16 num_vf_qps; /* num queue pairs per VF */
u16 num_vf_msix; /* num vectors per VF */
DECLARE_BITMAP(state, __ICE_STATE_NBITS);
- DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS);
- DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
+ unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
+ unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */
unsigned long serv_tmr_period;
unsigned long serv_tmr_prev;
struct timer_list serv_tmr;
@@ -376,6 +366,8 @@ struct ice_pf {
u32 hw_csum_rx_error;
u32 oicr_idx; /* Other interrupt cause MSIX vector index */
u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
+ u16 max_pf_txqs; /* Total Tx queues PF wide */
+ u16 max_pf_rxqs; /* Total Rx queues PF wide */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
@@ -455,6 +447,8 @@ ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type)
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
+void ice_update_vsi_stats(struct ice_vsi *vsi);
+void ice_update_pf_stats(struct ice_pf *pf);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 765e3c2ed045..bf9aa533a7c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1610,6 +1610,7 @@ enum ice_aq_err {
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
+ ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
};
/* Admin Queue command opcodes */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 2e0731c1e1a3..302ad981129c 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -263,21 +263,23 @@ enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
- struct ice_link_status *hw_link_info_old, *hw_link_info;
struct ice_aqc_get_link_status_data link_data = { 0 };
struct ice_aqc_get_link_status *resp;
+ struct ice_link_status *li_old, *li;
enum ice_media_type *hw_media_type;
struct ice_fc_info *hw_fc_info;
bool tx_pause, rx_pause;
struct ice_aq_desc desc;
enum ice_status status;
+ struct ice_hw *hw;
u16 cmd_flags;
if (!pi)
return ICE_ERR_PARAM;
- hw_link_info_old = &pi->phy.link_info_old;
+ hw = pi->hw;
+ li_old = &pi->phy.link_info_old;
hw_media_type = &pi->phy.media_type;
- hw_link_info = &pi->phy.link_info;
+ li = &pi->phy.link_info;
hw_fc_info = &pi->fc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
@@ -286,27 +288,27 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
resp->cmd_flags = cpu_to_le16(cmd_flags);
resp->lport_num = pi->lport;
- status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data),
- cd);
+ status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
if (status)
return status;
/* save off old link status information */
- *hw_link_info_old = *hw_link_info;
+ *li_old = *li;
/* update current link status information */
- hw_link_info->link_speed = le16_to_cpu(link_data.link_speed);
- hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low);
- hw_link_info->phy_type_high = le64_to_cpu(link_data.phy_type_high);
+ li->link_speed = le16_to_cpu(link_data.link_speed);
+ li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
+ li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
*hw_media_type = ice_get_media_type(pi);
- hw_link_info->link_info = link_data.link_info;
- hw_link_info->an_info = link_data.an_info;
- hw_link_info->ext_info = link_data.ext_info;
- hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
- hw_link_info->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
- hw_link_info->topo_media_conflict = link_data.topo_media_conflict;
- hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
+ li->link_info = link_data.link_info;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
+ ICE_AQ_CFG_PACING_TYPE_M);
/* update fc info */
tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
@@ -320,12 +322,24 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
else
hw_fc_info->current_mode = ICE_FC_NONE;
- hw_link_info->lse_ena =
- !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
+ li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
+
+ ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ (unsigned long long)li->phy_type_low);
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ (unsigned long long)li->phy_type_high);
+ ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
+ ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
+ ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
+ ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
+ ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
+ ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
+ ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
/* save link status information */
if (link)
- *link = *hw_link_info;
+ *link = *li;
/* flag cleared so calling functions don't call AQ again */
pi->phy.get_link_info = false;
@@ -740,7 +754,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
ice_get_itr_intrl_gran(hw);
- status = ice_init_all_ctrlq(hw);
+ status = ice_create_all_ctrlq(hw);
if (status)
goto err_unroll_cqinit;
@@ -855,7 +869,7 @@ err_unroll_sched:
err_unroll_alloc:
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
err_unroll_cqinit:
- ice_shutdown_all_ctrlq(hw);
+ ice_destroy_all_ctrlq(hw);
return status;
}
@@ -881,7 +895,7 @@ void ice_deinit_hw(struct ice_hw *hw)
/* Attempt to disable FW logging before shutting down control queues */
ice_cfg_fw_log(hw, false);
- ice_shutdown_all_ctrlq(hw);
+ ice_destroy_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */
ice_clear_all_vsi_ctx(hw);
@@ -1078,6 +1092,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
+ ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
{ 0 }
};
@@ -1088,7 +1103,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* @rxq_index: the index of the Rx queue
*
* Converts rxq context from sparse to dense structure and then writes
- * it to HW register space
+ * it to HW register space and enables the hardware to prefetch descriptors
+ * instead of only fetching them on demand
*/
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
@@ -1096,6 +1112,11 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+ if (!rlan_ctx)
+ return ICE_ERR_BAD_PTR;
+
+ rlan_ctx->prefena = 1;
+
ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
}
@@ -1993,6 +2014,17 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
desc.params.set_phy.lport_num = lport;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(cfg->phy_type_low));
+ ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+ (unsigned long long)le64_to_cpu(cfg->phy_type_high));
+ ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
+ ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
+ cfg->low_power_ctrl);
+ ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
+ ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
+
return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
}
@@ -2024,7 +2056,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
if (!pcaps)
return ICE_ERR_NO_MEMORY;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
pcaps, NULL);
if (!status)
memcpy(li->module_type, &pcaps->module_type,
@@ -2174,27 +2206,24 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
{
switch (fec) {
case ICE_FEC_BASER:
- /* Clear auto FEC and RS bits, and AND BASE-R ability
+ /* Clear RS bits, and AND BASE-R ability
* bits and OR request bits.
*/
- cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
ICE_AQC_PHY_FEC_25G_KR_REQ;
break;
case ICE_FEC_RS:
- /* Clear auto FEC and BASE-R bits, and AND RS ability
+ /* Clear BASE-R bits, and AND RS ability
* bits and OR request bits.
*/
- cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
ICE_AQC_PHY_FEC_25G_RS_544_REQ;
break;
case ICE_FEC_NONE:
- /* Clear auto FEC and all FEC option bits. */
- cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
+ /* Clear all FEC option bits. */
cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
break;
case ICE_FEC_AUTO:
@@ -3240,40 +3269,44 @@ void ice_replay_post(struct ice_hw *hw)
/**
* ice_stat_update40 - read 40 bit stat from the chip and update stat values
* @hw: ptr to the hardware info
- * @hireg: high 32 bit HW register to read from
- * @loreg: low 32 bit HW register to read from
+ * @reg: offset of 64 bit HW register to read from
* @prev_stat_loaded: bool to specify if previous stats are loaded
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
void
-ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
+ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat)
{
- u64 new_data;
-
- new_data = rd32(hw, loreg);
- new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+ u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
/* device stats are not reset at PFR, they likely will not be zeroed
- * when the driver starts. So save the first values read and use them as
- * offsets to be subtracted from the raw values in order to report stats
- * that count from zero.
+ * when the driver starts. Thus, save the value from the first read
+ * without adding to the statistic value so that we report stats which
+ * count up from zero.
*/
- if (!prev_stat_loaded)
+ if (!prev_stat_loaded) {
*prev_stat = new_data;
+ return;
+ }
+
+ /* Calculate the difference between the new and old values, and then
+ * add it to the software stat value.
+ */
if (new_data >= *prev_stat)
- *cur_stat = new_data - *prev_stat;
+ *cur_stat += new_data - *prev_stat;
else
/* to manage the potential roll-over */
- *cur_stat = (new_data + BIT_ULL(40)) - *prev_stat;
- *cur_stat &= 0xFFFFFFFFFFULL;
+ *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
+
+ /* Update the previously stored value to prepare for next read */
+ *prev_stat = new_data;
}
/**
* ice_stat_update32 - read 32 bit stat from the chip and update stat values
* @hw: ptr to the hardware info
- * @reg: HW register to read from
+ * @reg: offset of HW register to read from
* @prev_stat_loaded: bool to specify if previous stats are loaded
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
@@ -3287,17 +3320,26 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
new_data = rd32(hw, reg);
/* device stats are not reset at PFR, they likely will not be zeroed
- * when the driver starts. So save the first values read and use them as
- * offsets to be subtracted from the raw values in order to report stats
- * that count from zero.
+ * when the driver starts. Thus, save the value from the first read
+ * without adding to the statistic value so that we report stats which
+ * count up from zero.
*/
- if (!prev_stat_loaded)
+ if (!prev_stat_loaded) {
*prev_stat = new_data;
+ return;
+ }
+
+ /* Calculate the difference between the new and old values, and then
+ * add it to the software stat value.
+ */
if (new_data >= *prev_stat)
- *cur_stat = new_data - *prev_stat;
+ *cur_stat += new_data - *prev_stat;
else
/* to manage the potential roll-over */
- *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
+ *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
+
+ /* Update the previously stored value to prepare for next read */
+ *prev_stat = new_data;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index d1f8353fe6bb..e376d1eadba4 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -17,8 +17,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
+enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw);
+void ice_destroy_all_ctrlq(struct ice_hw *hw);
enum ice_status
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending);
@@ -123,8 +125,8 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
void
-ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
+ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat);
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index e91ac4df0242..2353166c654e 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -310,7 +310,7 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @cq: pointer to the specific Control queue
*
* This is the main initialization routine for the Control Send Queue
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_sq_entries
* - cq->sq_buf_size
@@ -369,7 +369,7 @@ init_ctrlq_exit:
* @cq: pointer to the specific Control queue
*
* The main initialization routine for the Admin Receive (Event) Queue.
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_rq_entries
* - cq->rq_buf_size
@@ -569,14 +569,8 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
return 0;
init_ctrlq_free_rq:
- if (cq->rq.count) {
- ice_shutdown_rq(hw, cq);
- mutex_destroy(&cq->rq_lock);
- }
- if (cq->sq.count) {
- ice_shutdown_sq(hw, cq);
- mutex_destroy(&cq->sq_lock);
- }
+ ice_shutdown_rq(hw, cq);
+ ice_shutdown_sq(hw, cq);
return status;
}
@@ -585,12 +579,14 @@ init_ctrlq_free_rq:
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
*
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver *MUST* set the following fields
* in the cq->structure:
* - cq->num_sq_entries
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
+ *
+ * NOTE: this function does not initialize the controlq locks
*/
static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
@@ -616,8 +612,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
!cq->rq_buf_size || !cq->sq_buf_size) {
return ICE_ERR_CFG;
}
- mutex_init(&cq->sq_lock);
- mutex_init(&cq->rq_lock);
/* setup SQ command write back timeout */
cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
@@ -625,7 +619,7 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
/* allocate the ATQ */
ret_code = ice_init_sq(hw, cq);
if (ret_code)
- goto init_ctrlq_destroy_locks;
+ return ret_code;
/* allocate the ARQ */
ret_code = ice_init_rq(hw, cq);
@@ -637,9 +631,6 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
init_ctrlq_free_sq:
ice_shutdown_sq(hw, cq);
-init_ctrlq_destroy_locks:
- mutex_destroy(&cq->sq_lock);
- mutex_destroy(&cq->rq_lock);
return ret_code;
}
@@ -647,12 +638,14 @@ init_ctrlq_destroy_locks:
* ice_init_all_ctrlq - main initialization routine for all control queues
* @hw: pointer to the hardware structure
*
- * Prior to calling this function, drivers *MUST* set the following fields
+ * Prior to calling this function, the driver MUST* set the following fields
* in the cq->structure for all control queues:
* - cq->num_sq_entries
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
+ *
+ * NOTE: this function does not initialize the controlq locks.
*/
enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
{
@@ -672,9 +665,47 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
}
/**
+ * ice_init_ctrlq_locks - Initialize locks for a control queue
+ * @cq: pointer to the control queue
+ *
+ * Initializes the send and receive queue locks for a given control queue.
+ */
+static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
+{
+ mutex_init(&cq->sq_lock);
+ mutex_init(&cq->rq_lock);
+}
+
+/**
+ * ice_create_all_ctrlq - main initialization routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, the driver *MUST* set the following fields
+ * in the cq->structure for all control queues:
+ * - cq->num_sq_entries
+ * - cq->num_rq_entries
+ * - cq->rq_buf_size
+ * - cq->sq_buf_size
+ *
+ * This function creates all the control queue locks and then calls
+ * ice_init_all_ctrlq. It should be called once during driver load. If the
+ * driver needs to re-initialize control queues at run time it should call
+ * ice_init_all_ctrlq instead.
+ */
+enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
+{
+ ice_init_ctrlq_locks(&hw->adminq);
+ ice_init_ctrlq_locks(&hw->mailboxq);
+
+ return ice_init_all_ctrlq(hw);
+}
+
+/**
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
+ *
+ * NOTE: this function does not destroy the control queue locks.
*/
static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
@@ -693,19 +724,17 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
return;
}
- if (cq->sq.count) {
- ice_shutdown_sq(hw, cq);
- mutex_destroy(&cq->sq_lock);
- }
- if (cq->rq.count) {
- ice_shutdown_rq(hw, cq);
- mutex_destroy(&cq->rq_lock);
- }
+ ice_shutdown_sq(hw, cq);
+ ice_shutdown_rq(hw, cq);
}
/**
* ice_shutdown_all_ctrlq - shutdown routine for all control queues
* @hw: pointer to the hardware structure
+ *
+ * NOTE: this function does not destroy the control queue locks. The driver
+ * may call this at runtime to shutdown and later restart control queues, such
+ * as in response to a reset event.
*/
void ice_shutdown_all_ctrlq(struct ice_hw *hw)
{
@@ -716,6 +745,37 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
}
/**
+ * ice_destroy_ctrlq_locks - Destroy locks for a control queue
+ * @cq: pointer to the control queue
+ *
+ * Destroys the send and receive queue locks for a given control queue.
+ */
+static void
+ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
+{
+ mutex_destroy(&cq->sq_lock);
+ mutex_destroy(&cq->rq_lock);
+}
+
+/**
+ * ice_destroy_all_ctrlq - exit routine for all control queues
+ * @hw: pointer to the hardware structure
+ *
+ * This function shuts down all the control queues and then destroys the
+ * control queue locks. It should be called once during driver unload. The
+ * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
+ * reinitialize control queues, such as in response to a reset event.
+ */
+void ice_destroy_all_ctrlq(struct ice_hw *hw)
+{
+ /* shut down all the control queues first */
+ ice_shutdown_all_ctrlq(hw);
+
+ ice_destroy_ctrlq_locks(&hw->adminq);
+ ice_destroy_ctrlq_locks(&hw->mailboxq);
+}
+
+/**
* ice_clean_sq - cleans Admin send queue (ATQ)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index c2002ded65f6..d60c942249e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -954,7 +954,8 @@ enum ice_status ice_init_dcb(struct ice_hw *hw)
pi->dcbx_status = ice_get_dcbx_status(hw);
if (pi->dcbx_status == ICE_DCBX_STATUS_DONE ||
- pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS) {
+ pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
+ pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
/* Get current DCBX configuration */
ret = ice_get_dcb_cfg(pi);
pi->is_sw_lldp = (hw->adminq.sq_last_status == ICE_AQ_RC_EPERM);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index fe88b127ca42..d9578919aad8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -204,15 +204,86 @@ out:
}
/**
+ * ice_cfg_etsrec_defaults - Set default ETS recommended DCB config
+ * @pi: port information structure
+ */
+static void ice_cfg_etsrec_defaults(struct ice_port_info *pi)
+{
+ struct ice_dcbx_cfg *dcbcfg = &pi->local_dcbx_cfg;
+ u8 i;
+
+ /* Ensure ETS recommended DCB configuration is not already set */
+ if (dcbcfg->etsrec.maxtcs)
+ return;
+
+ /* In CEE mode, set the default to 1 TC */
+ dcbcfg->etsrec.maxtcs = 1;
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ dcbcfg->etsrec.tcbwtable[i] = i ? 0 : 100;
+ dcbcfg->etsrec.tsatable[i] = i ? ICE_IEEE_TSA_STRICT :
+ ICE_IEEE_TSA_ETS;
+ }
+}
+
+/**
+ * ice_dcb_need_recfg - Check if DCB needs reconfig
+ * @pf: board private structure
+ * @old_cfg: current DCB config
+ * @new_cfg: new DCB config
+ */
+static bool
+ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
+ struct ice_dcbx_cfg *new_cfg)
+{
+ bool need_reconfig = false;
+
+ /* Check if ETS configuration has changed */
+ if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg,
+ sizeof(new_cfg->etscfg))) {
+ /* If Priority Table has changed reconfig is needed */
+ if (memcmp(&new_cfg->etscfg.prio_table,
+ &old_cfg->etscfg.prio_table,
+ sizeof(new_cfg->etscfg.prio_table))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
+ }
+
+ if (memcmp(&new_cfg->etscfg.tcbwtable,
+ &old_cfg->etscfg.tcbwtable,
+ sizeof(new_cfg->etscfg.tcbwtable)))
+ dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
+
+ if (memcmp(&new_cfg->etscfg.tsatable,
+ &old_cfg->etscfg.tsatable,
+ sizeof(new_cfg->etscfg.tsatable)))
+ dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
+ }
+
+ /* Check if PFC configuration has changed */
+ if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
+ }
+
+ /* Check if APP Table has changed */
+ if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
+ need_reconfig = true;
+ dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
+ }
+
+ dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
+ return need_reconfig;
+}
+
+/**
* ice_dcb_rebuild - rebuild DCB post reset
* @pf: physical function instance
*/
void ice_dcb_rebuild(struct ice_pf *pf)
{
+ struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
- struct ice_dcbx_cfg *prev_cfg;
enum ice_status ret;
- u8 willing;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
@@ -224,9 +295,15 @@ void ice_dcb_rebuild(struct ice_pf *pf)
if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return;
+ local_dcbx_cfg = &pf->hw.port_info->local_dcbx_cfg;
+ desired_dcbx_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+
/* Save current willing state and force FW to unwilling */
- willing = pf->hw.port_info->local_dcbx_cfg.etscfg.willing;
- pf->hw.port_info->local_dcbx_cfg.etscfg.willing = 0x0;
+ local_dcbx_cfg->etscfg.willing = 0x0;
+ local_dcbx_cfg->pfc.willing = 0x0;
+ local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING;
+
+ ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n");
@@ -234,8 +311,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
}
/* Retrieve DCB config and ensure same as current in SW */
- prev_cfg = devm_kmemdup(&pf->pdev->dev,
- &pf->hw.port_info->local_dcbx_cfg,
+ prev_cfg = devm_kmemdup(&pf->pdev->dev, local_dcbx_cfg,
sizeof(*prev_cfg), GFP_KERNEL);
if (!prev_cfg) {
dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n");
@@ -243,22 +319,29 @@ void ice_dcb_rebuild(struct ice_pf *pf)
}
ice_init_dcb(&pf->hw);
- if (memcmp(prev_cfg, &pf->hw.port_info->local_dcbx_cfg,
- sizeof(*prev_cfg))) {
+ if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
+ pf->hw.port_info->is_sw_lldp = true;
+ else
+ pf->hw.port_info->is_sw_lldp = false;
+
+ if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) {
/* difference in cfg detected - disable DCB till next MIB */
dev_err(&pf->pdev->dev, "Set local MIB not accurate\n");
- devm_kfree(&pf->pdev->dev, prev_cfg);
goto dcb_error;
}
/* fetched config congruent to previous configuration */
devm_kfree(&pf->pdev->dev, prev_cfg);
- /* Configuration replayed - reset willing state to previous */
- pf->hw.port_info->local_dcbx_cfg.etscfg.willing = willing;
+ /* Set the local desired config */
+ if (local_dcbx_cfg->dcbx_mode == ICE_DCBX_MODE_CEE)
+ memcpy(local_dcbx_cfg, desired_dcbx_cfg,
+ sizeof(*local_dcbx_cfg));
+
+ ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
- dev_err(&pf->pdev->dev, "Fail restoring prev willing state\n");
+ dev_err(&pf->pdev->dev, "Failed to set desired config\n");
goto dcb_error;
}
dev_info(&pf->pdev->dev, "DCB restored after reset\n");
@@ -364,35 +447,17 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
struct device *dev = &pf->pdev->dev;
struct ice_port_info *port_info;
struct ice_hw *hw = &pf->hw;
- int sw_default = 0;
int err;
port_info = hw->port_info;
err = ice_init_dcb(hw);
if (err) {
- /* FW LLDP is not active, default to SW DCBX/LLDP */
- dev_info(&pf->pdev->dev, "FW LLDP is not active\n");
- hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
- hw->port_info->is_sw_lldp = true;
- }
-
- if (port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
- dev_info(&pf->pdev->dev, "DCBX disabled\n");
-
- /* LLDP disabled in FW */
- if (port_info->is_sw_lldp) {
- sw_default = 1;
- dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n");
- clear_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags);
- } else {
- set_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags);
- }
-
- if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED)
- dev_info(&pf->pdev->dev, "DCBX not started\n");
-
- if (sw_default) {
+ /* FW LLDP is disabled, activate SW DCBX/LLDP mode */
+ dev_info(&pf->pdev->dev,
+ "FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
+ port_info->is_sw_lldp = true;
+ clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
err = ice_dcb_sw_dflt_cfg(pf, locked);
if (err) {
dev_err(&pf->pdev->dev,
@@ -407,6 +472,9 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
return 0;
}
+ port_info->is_sw_lldp = false;
+ set_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
+
/* DCBX in FW and LLDP enabled in FW */
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
@@ -432,30 +500,31 @@ void ice_update_dcb_stats(struct ice_pf *pf)
{
struct ice_hw_port_stats *prev_ps, *cur_ps;
struct ice_hw *hw = &pf->hw;
- u8 pf_id = hw->pf_id;
+ u8 port;
int i;
+ port = hw->port_info->lport;
prev_ps = &pf->stats_prev;
cur_ps = &pf->stats;
for (i = 0; i < 8; i++) {
- ice_stat_update32(hw, GLPRT_PXOFFRXC(pf_id, i),
+ ice_stat_update32(hw, GLPRT_PXOFFRXC(port, i),
pf->stat_prev_loaded,
&prev_ps->priority_xoff_rx[i],
&cur_ps->priority_xoff_rx[i]);
- ice_stat_update32(hw, GLPRT_PXONRXC(pf_id, i),
+ ice_stat_update32(hw, GLPRT_PXONRXC(port, i),
pf->stat_prev_loaded,
&prev_ps->priority_xon_rx[i],
&cur_ps->priority_xon_rx[i]);
- ice_stat_update32(hw, GLPRT_PXONTXC(pf_id, i),
+ ice_stat_update32(hw, GLPRT_PXONTXC(port, i),
pf->stat_prev_loaded,
&prev_ps->priority_xon_tx[i],
&cur_ps->priority_xon_tx[i]);
- ice_stat_update32(hw, GLPRT_PXOFFTXC(pf_id, i),
+ ice_stat_update32(hw, GLPRT_PXOFFTXC(port, i),
pf->stat_prev_loaded,
&prev_ps->priority_xoff_tx[i],
&cur_ps->priority_xoff_tx[i]);
- ice_stat_update32(hw, GLPRT_RXON2OFFCNT(pf_id, i),
+ ice_stat_update32(hw, GLPRT_RXON2OFFCNT(port, i),
pf->stat_prev_loaded,
&prev_ps->priority_xon_2_xoff[i],
&cur_ps->priority_xon_2_xoff[i]);
@@ -502,55 +571,6 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
}
/**
- * ice_dcb_need_recfg - Check if DCB needs reconfig
- * @pf: board private structure
- * @old_cfg: current DCB config
- * @new_cfg: new DCB config
- */
-static bool ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
- struct ice_dcbx_cfg *new_cfg)
-{
- bool need_reconfig = false;
-
- /* Check if ETS configuration has changed */
- if (memcmp(&new_cfg->etscfg, &old_cfg->etscfg,
- sizeof(new_cfg->etscfg))) {
- /* If Priority Table has changed reconfig is needed */
- if (memcmp(&new_cfg->etscfg.prio_table,
- &old_cfg->etscfg.prio_table,
- sizeof(new_cfg->etscfg.prio_table))) {
- need_reconfig = true;
- dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
- }
-
- if (memcmp(&new_cfg->etscfg.tcbwtable,
- &old_cfg->etscfg.tcbwtable,
- sizeof(new_cfg->etscfg.tcbwtable)))
- dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
-
- if (memcmp(&new_cfg->etscfg.tsatable,
- &old_cfg->etscfg.tsatable,
- sizeof(new_cfg->etscfg.tsatable)))
- dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
- }
-
- /* Check if PFC configuration has changed */
- if (memcmp(&new_cfg->pfc, &old_cfg->pfc, sizeof(new_cfg->pfc))) {
- need_reconfig = true;
- dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
- }
-
- /* Check if APP Table has changed */
- if (memcmp(&new_cfg->app, &old_cfg->app, sizeof(new_cfg->app))) {
- need_reconfig = true;
- dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
- }
-
- dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
- return need_reconfig;
-}
-
-/**
* ice_dcb_process_lldp_set_mib_change - Process MIB change
* @pf: ptr to ice_pf
* @event: pointer to the admin queue receive event
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 52083a63dee6..f7dd0bd03d39 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -155,7 +155,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
- ICE_PRIV_FLAG("enable-fw-lldp", ICE_FLAG_ENABLE_FW_LLDP),
+ ICE_PRIV_FLAG("fw-lldp-agent", ICE_FLAG_FW_LLDP_AGENT),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -1201,8 +1201,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
- if (test_bit(ICE_FLAG_ENABLE_FW_LLDP, change_flags)) {
- if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) {
+ if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) {
+ if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) {
enum ice_status status;
/* Disable FW LLDP engine */
@@ -1319,14 +1319,17 @@ ice_get_ethtool_stats(struct net_device *netdev,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_ring *ring;
- unsigned int j = 0;
+ unsigned int j;
int i = 0;
char *p;
+ ice_update_pf_stats(pf);
+ ice_update_vsi_stats(vsi);
+
for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
/* populate per queue stats */
@@ -1716,6 +1719,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_port_info *pi = np->vsi->port_info;
struct ethtool_link_ksettings cap_ksettings;
struct ice_link_status *link_info;
struct ice_vsi *vsi = np->vsi;
@@ -2040,6 +2044,33 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
break;
}
ks->base.duplex = DUPLEX_FULL;
+
+ if (link_info->an_info & ICE_AQ_AN_COMPLETED)
+ ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
+ Autoneg);
+
+ /* Set flow control negotiated Rx/Tx pause */
+ switch (pi->fc.current_mode) {
+ case ICE_FC_FULL:
+ ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause);
+ break;
+ case ICE_FC_TX_PAUSE:
+ ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Pause);
+ ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
+ Asym_Pause);
+ break;
+ case ICE_FC_RX_PAUSE:
+ ethtool_link_ksettings_add_link_mode(ks, lp_advertising,
+ Asym_Pause);
+ break;
+ case ICE_FC_PFC:
+ /* fall through */
+ default:
+ ethtool_link_ksettings_del_link_mode(ks, lp_advertising, Pause);
+ ethtool_link_ksettings_del_link_mode(ks, lp_advertising,
+ Asym_Pause);
+ break;
+ }
}
/**
@@ -2078,9 +2109,12 @@ ice_get_link_ksettings(struct net_device *netdev,
struct ice_aqc_get_phy_caps_data *caps;
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
+ enum ice_status status;
+ int err = 0;
ethtool_link_ksettings_zero_link_mode(ks, supported);
ethtool_link_ksettings_zero_link_mode(ks, advertising);
+ ethtool_link_ksettings_zero_link_mode(ks, lp_advertising);
hw_link_info = &vsi->port_info->phy.link_info;
/* set speed and duplex */
@@ -2125,48 +2159,36 @@ ice_get_link_ksettings(struct net_device *netdev,
/* flow control is symmetric and always supported */
ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
- switch (vsi->port_info->fc.req_mode) {
- case ICE_FC_FULL:
+ caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ status = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_SW_CFG, caps, NULL);
+ if (status) {
+ err = -EIO;
+ goto done;
+ }
+
+ /* Set the advertised flow control based on the PHY capability */
+ if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) &&
+ (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)) {
ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
- break;
- case ICE_FC_TX_PAUSE:
ethtool_link_ksettings_add_link_mode(ks, advertising,
Asym_Pause);
- break;
- case ICE_FC_RX_PAUSE:
+ } else if (caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) {
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Asym_Pause);
+ } else if (caps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) {
ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
ethtool_link_ksettings_add_link_mode(ks, advertising,
Asym_Pause);
- break;
- case ICE_FC_PFC:
- default:
+ } else {
ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
ethtool_link_ksettings_del_link_mode(ks, advertising,
Asym_Pause);
- break;
}
- caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
- if (!caps)
- goto done;
-
- if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_TOPO_CAP,
- caps, NULL))
- netdev_info(netdev, "Get phy capability failed.\n");
-
- /* Set supported FEC modes based on PHY capability */
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
-
- if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
- caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
- if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
- ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
-
- if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_SW_CFG,
- caps, NULL))
- netdev_info(netdev, "Get phy capability failed.\n");
-
/* Set advertised FEC modes based on PHY capability */
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
@@ -2178,9 +2200,25 @@ ice_get_link_ksettings(struct net_device *netdev,
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
+ status = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_TOPO_CAP, caps, NULL);
+ if (status) {
+ err = -EIO;
+ goto done;
+ }
+
+ /* Set supported FEC modes based on PHY capability */
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
+
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
+ caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
+ if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
+ ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+
done:
devm_kfree(&vsi->back->pdev->dev, caps);
- return 0;
+ return err;
}
/**
@@ -2763,6 +2801,11 @@ static int ice_nway_reset(struct net_device *netdev)
* ice_get_pauseparam - Get Flow Control status
* @netdev: network interface device structure
* @pause: ethernet pause (flow control) parameters
+ *
+ * Get requested flow control status from PHY capability.
+ * If autoneg is true, then ethtool will send the ETHTOOL_GSET ioctl which
+ * is handled by ice_get_link_ksettings. ice_get_link_ksettings will report
+ * the negotiated Rx/Tx pause via lp_advertising.
*/
static void
ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
@@ -2816,6 +2859,7 @@ static int
ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_link_status *hw_link_info;
struct ice_pf *pf = np->vsi->back;
struct ice_dcbx_cfg *dcbx_cfg;
@@ -2826,6 +2870,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
u8 aq_failures;
bool link_up;
int err = 0;
+ u32 is_an;
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
@@ -2840,7 +2885,30 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return -EOPNOTSUPP;
}
- if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
+ /* Get pause param reports configured and negotiated flow control pause
+ * when ETHTOOL_GLINKSETTINGS is defined. Since ETHTOOL_GLINKSETTINGS is
+ * defined get pause param pause->autoneg reports SW configured setting,
+ * so compare pause->autoneg with SW configured to prevent the user from
+ * using set pause param to chance autoneg.
+ */
+ pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ /* Get current PHY config */
+ status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (status) {
+ kfree(pcaps);
+ return -EIO;
+ }
+
+ is_an = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ kfree(pcaps);
+
+ if (pause->autoneg != is_an) {
netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 6c5ce05742b1..6f78ff5534af 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -127,8 +127,11 @@
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2)
#define GLINT_DYN_CTL_ITR_INDX_S 3
+#define GLINT_DYN_CTL_ITR_INDX_M ICE_M(0x3, 3)
#define GLINT_DYN_CTL_INTERVAL_S 5
+#define GLINT_DYN_CTL_INTERVAL_M ICE_M(0xFFF, 5)
#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
+#define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30)
#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4))
#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4))
@@ -281,14 +284,10 @@
#define GL_PWR_MODE_CTL 0x000B820C
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
-#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8))
#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8))
-#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8))
#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8))
#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8))
-#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8))
#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8))
-#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8))
#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8))
#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8))
#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8))
@@ -296,38 +295,22 @@
#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8))
#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8))
#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8))
-#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8))
#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8))
-#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8))
#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8))
#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8))
-#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8))
#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8))
-#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8))
#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8))
-#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8))
#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8))
-#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8))
#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8))
-#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8))
#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8))
-#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8))
#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8))
-#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8))
#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8))
-#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8))
#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8))
-#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8))
#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8))
-#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8))
#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8))
-#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8))
#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8))
-#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8))
#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8))
-#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8))
#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
-#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
#define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64))
#define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64))
@@ -340,32 +323,23 @@
#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
#define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64))
#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
-#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
-#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8))
#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8))
-#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8))
#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8))
-#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8))
#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8))
-#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8))
#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8))
-#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8))
#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8))
-#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8))
#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8))
-#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8))
#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8))
#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4))
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
-#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
-#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PF_VT_PFALLOC_HIF 0x0009DD80
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
+#define PRTRPB_RDPC 0x000AC260
#endif /* _ICE_HW_AUTOGEN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 510a8c900e61..57ea6811fe2c 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -290,6 +290,7 @@ struct ice_rlan_ctx {
u8 tphdata_ena;
u8 tphhead_ena;
u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
};
struct ice_ctx_ele {
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a19f5920733b..a39767e8c2a2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -191,41 +191,58 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
}
/**
- * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
+ * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
* @vsi: the VSI being configured
* @ena: start or stop the Rx rings
+ * @rxq_idx: Rx queue index
*/
-static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
+#ifndef CONFIG_PCI_IOV
+static
+#endif /* !CONFIG_PCI_IOV */
+int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
{
+ int pf_q = vsi->rxq_map[rxq_idx];
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- int i, ret = 0;
+ int ret = 0;
+ u32 rx_reg;
- for (i = 0; i < vsi->num_rxq; i++) {
- int pf_q = vsi->rxq_map[i];
- u32 rx_reg;
+ rx_reg = rd32(hw, QRX_CTRL(pf_q));
- rx_reg = rd32(hw, QRX_CTRL(pf_q));
+ /* Skip if the queue is already in the requested state */
+ if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
+ return 0;
- /* Skip if the queue is already in the requested state */
- if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
- continue;
+ /* turn on/off the queue */
+ if (ena)
+ rx_reg |= QRX_CTRL_QENA_REQ_M;
+ else
+ rx_reg &= ~QRX_CTRL_QENA_REQ_M;
+ wr32(hw, QRX_CTRL(pf_q), rx_reg);
- /* turn on/off the queue */
- if (ena)
- rx_reg |= QRX_CTRL_QENA_REQ_M;
- else
- rx_reg &= ~QRX_CTRL_QENA_REQ_M;
- wr32(hw, QRX_CTRL(pf_q), rx_reg);
-
- /* wait for the change to finish */
- ret = ice_pf_rxq_wait(pf, pf_q, ena);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "VSI idx %d Rx ring %d %sable timeout\n",
- vsi->idx, pf_q, (ena ? "en" : "dis"));
+ /* wait for the change to finish */
+ ret = ice_pf_rxq_wait(pf, pf_q, ena);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "VSI idx %d Rx ring %d %sable timeout\n",
+ vsi->idx, pf_q, (ena ? "en" : "dis"));
+
+ return ret;
+}
+
+/**
+ * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
+ * @vsi: the VSI being configured
+ * @ena: start or stop the Rx rings
+ */
+static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < vsi->num_rxq; i++) {
+ ret = ice_vsi_ctrl_rx_ring(vsi, ena, i);
+ if (ret)
break;
- }
}
return ret;
@@ -246,12 +263,24 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
sizeof(*vsi->tx_rings), GFP_KERNEL);
if (!vsi->tx_rings)
- goto err_txrings;
+ return -ENOMEM;
vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
sizeof(*vsi->rx_rings), GFP_KERNEL);
if (!vsi->rx_rings)
- goto err_rxrings;
+ goto err_rings;
+
+ vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+ sizeof(*vsi->txq_map), GFP_KERNEL);
+
+ if (!vsi->txq_map)
+ goto err_txq_map;
+
+ vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
+ sizeof(*vsi->rxq_map), GFP_KERNEL);
+ if (!vsi->rxq_map)
+ goto err_rxq_map;
+
/* There is no need to allocate q_vectors for a loopback VSI. */
if (vsi->type == ICE_VSI_LB)
@@ -266,10 +295,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
return 0;
err_vectors:
+ devm_kfree(&pf->pdev->dev, vsi->rxq_map);
+err_rxq_map:
+ devm_kfree(&pf->pdev->dev, vsi->txq_map);
+err_txq_map:
devm_kfree(&pf->pdev->dev, vsi->rx_rings);
-err_rxrings:
+err_rings:
devm_kfree(&pf->pdev->dev, vsi->tx_rings);
-err_txrings:
return -ENOMEM;
}
@@ -416,6 +448,14 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
devm_kfree(&pf->pdev->dev, vsi->rx_rings);
vsi->rx_rings = NULL;
}
+ if (vsi->txq_map) {
+ devm_kfree(&pf->pdev->dev, vsi->txq_map);
+ vsi->txq_map = NULL;
+ }
+ if (vsi->rxq_map) {
+ devm_kfree(&pf->pdev->dev, vsi->rxq_map);
+ vsi->rxq_map = NULL;
+ }
}
/**
@@ -647,7 +687,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
struct ice_qs_cfg tx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
.pf_map = pf->avail_txqs,
- .pf_map_size = ICE_MAX_TXQS,
+ .pf_map_size = pf->max_pf_txqs,
.q_count = vsi->alloc_txq,
.scatter_count = ICE_MAX_SCATTER_TXQS,
.vsi_map = vsi->txq_map,
@@ -657,7 +697,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
struct ice_qs_cfg rx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
.pf_map = pf->avail_rxqs,
- .pf_map_size = ICE_MAX_RXQS,
+ .pf_map_size = pf->max_pf_rxqs,
.q_count = vsi->alloc_rxq,
.scatter_count = ICE_MAX_SCATTER_RXQS,
.vsi_map = vsi->rxq_map,
@@ -1010,6 +1050,13 @@ static int ice_vsi_init(struct ice_vsi *vsi)
ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
}
+ /* Allow control frames out of main VSI */
+ if (vsi->type == ICE_VSI_PF) {
+ ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
+ ctxt->info.valid_sections |=
+ cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+ }
+
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(&pf->pdev->dev,
@@ -1129,12 +1176,7 @@ static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
return -EEXIST;
}
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- num_q_vectors = vsi->num_q_vectors;
- } else {
- err = -EINVAL;
- goto err_out;
- }
+ num_q_vectors = vsi->num_q_vectors;
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
err = ice_vsi_alloc_q_vector(vsi, v_idx);
@@ -1180,9 +1222,6 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
return -EEXIST;
}
- if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- return -ENOENT;
-
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
@@ -1477,40 +1516,32 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
prev_es = &vsi->eth_stats_prev;
cur_es = &vsi->eth_stats;
- ice_stat_update40(hw, GLV_GORCH(vsi_num), GLV_GORCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_bytes,
- &cur_es->rx_bytes);
+ ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->rx_bytes, &cur_es->rx_bytes);
- ice_stat_update40(hw, GLV_UPRCH(vsi_num), GLV_UPRCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_unicast,
- &cur_es->rx_unicast);
+ ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->rx_unicast, &cur_es->rx_unicast);
- ice_stat_update40(hw, GLV_MPRCH(vsi_num), GLV_MPRCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_multicast,
- &cur_es->rx_multicast);
+ ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->rx_multicast, &cur_es->rx_multicast);
- ice_stat_update40(hw, GLV_BPRCH(vsi_num), GLV_BPRCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->rx_broadcast,
- &cur_es->rx_broadcast);
+ ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->rx_broadcast, &cur_es->rx_broadcast);
ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
&prev_es->rx_discards, &cur_es->rx_discards);
- ice_stat_update40(hw, GLV_GOTCH(vsi_num), GLV_GOTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_bytes,
- &cur_es->tx_bytes);
+ ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->tx_bytes, &cur_es->tx_bytes);
- ice_stat_update40(hw, GLV_UPTCH(vsi_num), GLV_UPTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_unicast,
- &cur_es->tx_unicast);
+ ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->tx_unicast, &cur_es->tx_unicast);
- ice_stat_update40(hw, GLV_MPTCH(vsi_num), GLV_MPTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_multicast,
- &cur_es->tx_multicast);
+ ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->tx_multicast, &cur_es->tx_multicast);
- ice_stat_update40(hw, GLV_BPTCH(vsi_num), GLV_BPTCL(vsi_num),
- vsi->stat_offsets_loaded, &prev_es->tx_broadcast,
- &cur_es->tx_broadcast);
+ ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
+ &prev_es->tx_broadcast, &cur_es->tx_broadcast);
ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
&prev_es->tx_errors, &cur_es->tx_errors);
@@ -1658,6 +1689,62 @@ setup_rings:
}
/**
+ * ice_vsi_cfg_txq - Configure single Tx queue
+ * @vsi: the VSI that queue belongs to
+ * @ring: Tx ring to be configured
+ * @tc_q_idx: queue index within given TC
+ * @qg_buf: queue group buffer
+ * @tc: TC that Tx ring belongs to
+ */
+static int
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,
+ struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc)
+{
+ struct ice_tlan_ctx tlan_ctx = { 0 };
+ struct ice_aqc_add_txqs_perq *txq;
+ struct ice_pf *pf = vsi->back;
+ u8 buf_len = sizeof(*qg_buf);
+ enum ice_status status;
+ u16 pf_q;
+
+ pf_q = ring->reg_idx;
+ ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ /* copy context contents into the qg_buf */
+ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
+ ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_tlan_ctx_info);
+
+ /* init queue specific tail reg. It is referred as
+ * transmit comm scheduler queue doorbell.
+ */
+ ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+
+ /* Add unique software queue handle of the Tx queue per
+ * TC into the VSI Tx ring
+ */
+ ring->q_handle = tc_q_idx;
+
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
+ 1, qg_buf, buf_len, NULL);
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ return -ENODEV;
+ }
+
+ /* Add Tx Queue TEID into the VSI Tx ring from the
+ * response. This will complete configuring and
+ * enabling the queue.
+ */
+ txq = &qg_buf->txqs[0];
+ if (pf_q == le16_to_cpu(txq->txq_id))
+ ring->txq_teid = le32_to_cpu(txq->q_teid);
+
+ return 0;
+}
+
+/**
* ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured
* @rings: Tx ring array to be configured
@@ -1670,20 +1757,16 @@ static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
- struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
- u8 num_q_grps, q_idx = 0;
- enum ice_status status;
- u16 buf_len, i, pf_q;
- int err = 0, tc;
+ u16 q_idx = 0, i;
+ int err = 0;
+ u8 tc;
- buf_len = sizeof(*qg_buf);
- qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
+ qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
qg_buf->num_txqs = 1;
- num_q_grps = 1;
/* set up and configure the Tx queues for each enabled TC */
ice_for_each_traffic_class(tc) {
@@ -1691,39 +1774,10 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
break;
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
- struct ice_tlan_ctx tlan_ctx = { 0 };
-
- pf_q = vsi->txq_map[q_idx + offset];
- ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q);
- /* copy context contents into the qg_buf */
- qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
-
- /* init queue specific tail reg. It is referred as
- * transmit comm scheduler queue doorbell.
- */
- rings[q_idx]->tail =
- pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
- i, num_q_grps, qg_buf,
- buf_len, NULL);
- if (status) {
- dev_err(&pf->pdev->dev,
- "Failed to set LAN Tx queue context, error: %d\n",
- status);
- err = -ENODEV;
+ err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset,
+ qg_buf, tc);
+ if (err)
goto err_cfg_txqs;
- }
-
- /* Add Tx Queue TEID into the VSI Tx ring from the
- * response. This will complete configuring and
- * enabling the queue.
- */
- txq = &qg_buf->txqs[0];
- if (pf_q == le16_to_cpu(txq->txq_id))
- rings[q_idx]->txq_teid =
- le32_to_cpu(txq->q_teid);
q_idx++;
}
@@ -2070,45 +2124,112 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
}
/**
- * ice_vsi_stop_tx_rings - Disable Tx rings
+ * ice_vsi_stop_tx_ring - Disable single Tx ring
* @vsi: the VSI being configured
* @rst_src: reset source
* @rel_vmvf_num: Relative ID of VF/VM
- * @rings: Tx ring array to be stopped
- * @offset: offset within vsi->txq_map
+ * @ring: Tx ring to be stopped
+ * @txq_meta: Meta data of Tx ring to be stopped
*/
-static int
-ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring **rings, int offset)
+#ifndef CONFIG_PCI_IOV
+static
+#endif /* !CONFIG_PCI_IOV */
+int
+ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta)
{
struct ice_pf *pf = vsi->back;
+ struct ice_q_vector *q_vector;
struct ice_hw *hw = &pf->hw;
- int tc, q_idx = 0, err = 0;
- u16 *q_ids, *q_handles, i;
enum ice_status status;
- u32 *q_teids, val;
+ u32 val;
- if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
- return -EINVAL;
+ /* clear cause_ena bit for disabled queues */
+ val = rd32(hw, QINT_TQCTL(ring->reg_idx));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(ring->reg_idx), val);
- q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids),
- GFP_KERNEL);
- if (!q_teids)
- return -ENOMEM;
+ /* software is expected to wait for 100 ns */
+ ndelay(100);
- q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids),
- GFP_KERNEL);
- if (!q_ids) {
- err = -ENOMEM;
- goto err_alloc_q_ids;
+ /* trigger a software interrupt for the vector
+ * associated to the queue to schedule NAPI handler
+ */
+ q_vector = ring->q_vector;
+ if (q_vector)
+ ice_trigger_sw_intr(hw, q_vector);
+
+ status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
+ txq_meta->tc, 1, &txq_meta->q_handle,
+ &txq_meta->q_id, &txq_meta->q_teid, rst_src,
+ rel_vmvf_num, NULL);
+
+ /* if the disable queue command was exercised during an
+ * active reset flow, ICE_ERR_RESET_ONGOING is returned.
+ * This is not an error as the reset operation disables
+ * queues at the hardware level anyway.
+ */
+ if (status == ICE_ERR_RESET_ONGOING) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "Reset in progress. LAN Tx queues already disabled\n");
+ } else if (status == ICE_ERR_DOES_NOT_EXIST) {
+ dev_dbg(&vsi->back->pdev->dev,
+ "LAN Tx queues do not exist, nothing to disable\n");
+ } else if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to disable LAN Tx queues, error: %d\n", status);
+ return -ENODEV;
}
- q_handles = devm_kcalloc(&pf->pdev->dev, vsi->num_txq,
- sizeof(*q_handles), GFP_KERNEL);
- if (!q_handles) {
- err = -ENOMEM;
- goto err_alloc_q_handles;
- }
+ return 0;
+}
+
+/**
+ * ice_fill_txq_meta - Prepare the Tx queue's meta data
+ * @vsi: VSI that ring belongs to
+ * @ring: ring that txq_meta will be based on
+ * @txq_meta: a helper struct that wraps Tx queue's information
+ *
+ * Set up a helper struct that will contain all the necessary fields that
+ * are needed for stopping Tx queue
+ */
+#ifndef CONFIG_PCI_IOV
+static
+#endif /* !CONFIG_PCI_IOV */
+void
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta)
+{
+ u8 tc = 0;
+
+#ifdef CONFIG_DCB
+ tc = ring->dcb_tc;
+#endif /* CONFIG_DCB */
+ txq_meta->q_id = ring->reg_idx;
+ txq_meta->q_teid = ring->txq_teid;
+ txq_meta->q_handle = ring->q_handle;
+ txq_meta->vsi_idx = vsi->idx;
+ txq_meta->tc = tc;
+}
+
+/**
+ * ice_vsi_stop_tx_rings - Disable Tx rings
+ * @vsi: the VSI being configured
+ * @rst_src: reset source
+ * @rel_vmvf_num: Relative ID of VF/VM
+ * @rings: Tx ring array to be stopped
+ */
+static int
+ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring **rings)
+{
+ u16 i, q_idx = 0;
+ int status;
+ u8 tc;
+
+ if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
+ return -EINVAL;
/* set up the Tx queue list to be disabled for each enabled TC */
ice_for_each_traffic_class(tc) {
@@ -2116,64 +2237,24 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
break;
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
- struct ice_q_vector *q_vector;
-
- if (!rings || !rings[q_idx]) {
- err = -EINVAL;
- goto err_out;
- }
+ struct ice_txq_meta txq_meta = { };
- q_ids[i] = vsi->txq_map[q_idx + offset];
- q_teids[i] = rings[q_idx]->txq_teid;
- q_handles[i] = i;
+ if (!rings || !rings[q_idx])
+ return -EINVAL;
- /* clear cause_ena bit for disabled queues */
- val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val);
+ ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
+ status = ice_vsi_stop_tx_ring(vsi, rst_src,
+ rel_vmvf_num,
+ rings[q_idx], &txq_meta);
- /* software is expected to wait for 100 ns */
- ndelay(100);
-
- /* trigger a software interrupt for the vector
- * associated to the queue to schedule NAPI handler
- */
- q_vector = rings[i]->q_vector;
- if (q_vector)
- ice_trigger_sw_intr(hw, q_vector);
+ if (status)
+ return status;
q_idx++;
}
- status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc,
- vsi->num_txq, q_handles, q_ids,
- q_teids, rst_src, rel_vmvf_num, NULL);
-
- /* if the disable queue command was exercised during an active
- * reset flow, ICE_ERR_RESET_ONGOING is returned. This is not
- * an error as the reset operation disables queues at the
- * hardware level anyway.
- */
- if (status == ICE_ERR_RESET_ONGOING) {
- dev_dbg(&pf->pdev->dev,
- "Reset in progress. LAN Tx queues already disabled\n");
- } else if (status) {
- dev_err(&pf->pdev->dev,
- "Failed to disable LAN Tx queues, error: %d\n",
- status);
- err = -ENODEV;
- }
}
-err_out:
- devm_kfree(&pf->pdev->dev, q_handles);
-
-err_alloc_q_handles:
- devm_kfree(&pf->pdev->dev, q_ids);
-
-err_alloc_q_ids:
- devm_kfree(&pf->pdev->dev, q_teids);
-
- return err;
+ return 0;
}
/**
@@ -2186,8 +2267,7 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num)
{
- return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings,
- 0);
+ return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
}
/**
@@ -2519,7 +2599,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = pf->num_lan_tx;
+ max_txqs[i] = vsi->alloc_txq;
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
@@ -2547,7 +2627,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_cfg_sw_lldp(vsi, true, true);
/* Rx LLDP packets */
- if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags))
+ if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, true);
}
@@ -2610,39 +2690,36 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int base = vsi->base_vector;
+ int i;
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- int i;
-
- if (!vsi->q_vectors || !vsi->irqs_ready)
- return;
+ if (!vsi->q_vectors || !vsi->irqs_ready)
+ return;
- ice_vsi_release_msix(vsi);
- if (vsi->type == ICE_VSI_VF)
- return;
+ ice_vsi_release_msix(vsi);
+ if (vsi->type == ICE_VSI_VF)
+ return;
- vsi->irqs_ready = false;
- ice_for_each_q_vector(vsi, i) {
- u16 vector = i + base;
- int irq_num;
+ vsi->irqs_ready = false;
+ ice_for_each_q_vector(vsi, i) {
+ u16 vector = i + base;
+ int irq_num;
- irq_num = pf->msix_entries[vector].vector;
+ irq_num = pf->msix_entries[vector].vector;
- /* free only the irqs that were actually requested */
- if (!vsi->q_vectors[i] ||
- !(vsi->q_vectors[i]->num_ring_tx ||
- vsi->q_vectors[i]->num_ring_rx))
- continue;
+ /* free only the irqs that were actually requested */
+ if (!vsi->q_vectors[i] ||
+ !(vsi->q_vectors[i]->num_ring_tx ||
+ vsi->q_vectors[i]->num_ring_rx))
+ continue;
- /* clear the affinity notifier in the IRQ descriptor */
- irq_set_affinity_notifier(irq_num, NULL);
+ /* clear the affinity notifier in the IRQ descriptor */
+ irq_set_affinity_notifier(irq_num, NULL);
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
- synchronize_irq(irq_num);
- devm_free_irq(&pf->pdev->dev, irq_num,
- vsi->q_vectors[i]);
- }
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(irq_num, NULL);
+ synchronize_irq(irq_num);
+ devm_free_irq(&pf->pdev->dev, irq_num,
+ vsi->q_vectors[i]);
}
}
@@ -2821,15 +2898,17 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
}
/* disable each interrupt */
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- ice_for_each_q_vector(vsi, i)
- wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
+ ice_for_each_q_vector(vsi, i)
+ wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
- ice_flush(hw);
+ ice_flush(hw);
- ice_for_each_q_vector(vsi, i)
- synchronize_irq(pf->msix_entries[i + base].vector);
- }
+ /* don't call synchronize_irq() for VF's from the host */
+ if (vsi->type == ICE_VSI_VF)
+ return;
+
+ ice_for_each_q_vector(vsi, i)
+ synchronize_irq(pf->msix_entries[i + base].vector);
}
/**
@@ -2895,7 +2974,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
*/
- if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags))
+ if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, false);
}
@@ -2962,6 +3041,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
vsi->base_vector = 0;
}
+ ice_vsi_put_qs(vsi);
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
ice_dev_onetime_setup(&pf->hw);
@@ -2969,6 +3049,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_set_num_qs(vsi, vf->vf_id);
else
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
+
+ ret = ice_vsi_alloc_arrays(vsi);
+ if (ret < 0)
+ goto err_vsi;
+
+ ice_vsi_get_qs(vsi);
ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
@@ -2976,9 +3062,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret < 0)
goto err_vsi;
- ret = ice_vsi_alloc_arrays(vsi);
- if (ret < 0)
- goto err_vsi;
switch (vsi->type) {
case ICE_VSI_PF:
@@ -2986,6 +3069,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret)
goto err_rings;
+ ret = ice_vsi_setup_vector_base(vsi);
+ if (ret)
+ goto err_vectors;
+
ret = ice_vsi_set_q_vectors_reg_idx(vsi);
if (ret)
goto err_vectors;
@@ -3007,10 +3094,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret)
goto err_rings;
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto err_vectors;
-
ret = ice_vsi_set_q_vectors_reg_idx(vsi);
if (ret)
goto err_vectors;
@@ -3028,7 +3111,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = pf->num_lan_tx;
+ max_txqs[i] = vsi->alloc_txq;
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
@@ -3145,7 +3228,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
if (ena_tc & BIT(i))
num_tc++;
/* populate max_txqs per TC */
- max_txqs[i] = pf->num_lan_tx;
+ max_txqs[i] = vsi->alloc_txq;
}
vsi->tc_cfg.ena_tc = ena_tc;
@@ -3188,3 +3271,33 @@ out:
return ret;
}
#endif /* CONFIG_DCB */
+
+/**
+ * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
+ * @vsi: the VSI being configured MAC filter
+ * @macaddr: the MAC address to be added.
+ * @set: Add or delete a MAC filter
+ *
+ * Adds or removes MAC address filter entry for VF VSI
+ */
+enum ice_status
+ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
+{
+ LIST_HEAD(tmp_add_list);
+ enum ice_status status;
+
+ /* Update MAC filter list to be added or removed for a VSI */
+ if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) {
+ status = ICE_ERR_NO_MEMORY;
+ goto cfg_mac_fltr_exit;
+ }
+
+ if (set)
+ status = ice_add_mac(&vsi->back->hw, &tmp_add_list);
+ else
+ status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
+
+cfg_mac_fltr_exit:
+ ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 6e43ef03bfc3..7faf8db844f6 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -6,8 +6,22 @@
#include "ice.h"
-int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
- const u8 *macaddr);
+struct ice_txq_meta {
+ /* Tx-scheduler element identifier */
+ u32 q_teid;
+ /* Entry in VSI's txq_map bitmap */
+ u16 q_id;
+ /* Relative index of Tx queue within TC */
+ u16 q_handle;
+ /* VSI index that Tx queue belongs to */
+ u16 vsi_idx;
+ /* TC number that Tx queue belongs to */
+ u8 tc;
+};
+
+int
+ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
+ const u8 *macaddr);
void ice_free_fltr_list(struct device *dev, struct list_head *h);
@@ -25,6 +39,16 @@ ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
void
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
+
+int
+ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta);
+
+void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ struct ice_txq_meta *txq_meta);
+
+int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
#endif /* CONFIG_PCI_IOV */
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
@@ -95,4 +119,8 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
+
+enum ice_status
+ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
+
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 63db08d9bafa..f029aee32913 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -9,7 +9,7 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
-#define DRV_VERSION "0.7.4-k"
+#define DRV_VERSION "0.7.5-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -34,19 +34,17 @@ static const struct net_device_ops ice_netdev_ops;
static void ice_rebuild(struct ice_pf *pf);
static void ice_vsi_release_all(struct ice_pf *pf);
-static void ice_update_vsi_stats(struct ice_vsi *vsi);
-static void ice_update_pf_stats(struct ice_pf *pf);
/**
* ice_get_tx_pending - returns number of Tx descriptors not processed
* @ring: the ring of descriptors
*/
-static u32 ice_get_tx_pending(struct ice_ring *ring)
+static u16 ice_get_tx_pending(struct ice_ring *ring)
{
- u32 head, tail;
+ u16 head, tail;
head = ring->next_to_clean;
- tail = readl(ring->tail);
+ tail = ring->next_to_use;
if (head != tail)
return (head < tail) ?
@@ -118,10 +116,9 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
*/
static int ice_init_mac_fltr(struct ice_pf *pf)
{
- LIST_HEAD(tmp_add_list);
+ enum ice_status status;
u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
- int status;
vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
if (!vsi)
@@ -132,8 +129,7 @@ static int ice_init_mac_fltr(struct ice_pf *pf)
*/
/* Add a unicast MAC filter so the VSI can get its packets */
- status = ice_add_mac_to_list(vsi, &tmp_add_list,
- vsi->port_info->mac.perm_addr);
+ status = ice_vsi_cfg_mac_fltr(vsi, vsi->port_info->mac.perm_addr, true);
if (status)
goto unregister;
@@ -141,18 +137,11 @@ static int ice_init_mac_fltr(struct ice_pf *pf)
* MAC address to the list as well.
*/
eth_broadcast_addr(broadcast);
- status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
+ status = ice_vsi_cfg_mac_fltr(vsi, broadcast, true);
if (status)
- goto free_mac_list;
-
- /* Program MAC filters for entries in tmp_add_list */
- status = ice_add_mac(&pf->hw, &tmp_add_list);
- if (status)
- status = -ENOMEM;
-
-free_mac_list:
- ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ goto unregister;
+ return 0;
unregister:
/* We aren't useful with no MAC filters, so unregister if we
* had an error
@@ -166,7 +155,7 @@ unregister:
vsi->netdev = NULL;
}
- return status;
+ return -EIO;
}
/**
@@ -447,13 +436,13 @@ static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
- if (!locked) {
+ if (!locked)
rtnl_lock();
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+
+ ice_stop(vsi->netdev);
+
+ if (!locked)
rtnl_unlock();
- } else {
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
- }
} else {
ice_vsi_close(vsi);
}
@@ -488,6 +477,7 @@ static void
ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
+ int i;
/* already prepared for reset */
if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
@@ -497,6 +487,10 @@ ice_prepare_for_reset(struct ice_pf *pf)
if (ice_check_sq_alive(hw, &hw->mailboxq))
ice_vc_notify_reset(pf);
+ /* Disable VFs until reset is completed */
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ ice_set_vf_state_qs_dis(&pf->vf[i]);
+
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf, false);
@@ -810,6 +804,20 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (!vsi || !vsi->port_info)
return -EINVAL;
+ /* turn off PHY if media was removed */
+ if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) &&
+ !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
+ set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
+
+ result = ice_aq_set_link_restart_an(pi, false, NULL);
+ if (result) {
+ dev_dbg(&pf->pdev->dev,
+ "Failed to set link down, VSI %d error %d\n",
+ vsi->vsi_num, result);
+ return result;
+ }
+ }
+
ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up);
@@ -1307,14 +1315,134 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (vf_mdd_detected) {
vf->num_mdd_events++;
- if (vf->num_mdd_events > 1)
- dev_info(&pf->pdev->dev, "VF %d has had %llu MDD events since last boot\n",
+ if (vf->num_mdd_events &&
+ vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
+ dev_info(&pf->pdev->dev,
+ "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
i, vf->num_mdd_events);
}
}
}
/**
+ * ice_force_phys_link_state - Force the physical link state
+ * @vsi: VSI to force the physical link state to up/down
+ * @link_up: true/false indicates to set the physical link to up/down
+ *
+ * Force the physical link state by getting the current PHY capabilities from
+ * hardware and setting the PHY config based on the determined capabilities. If
+ * link changes a link event will be triggered because both the Enable Automatic
+ * Link Update and LESM Enable bits are set when setting the PHY capabilities.
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
+{
+ struct ice_aqc_get_phy_caps_data *pcaps;
+ struct ice_aqc_set_phy_cfg_data *cfg;
+ struct ice_port_info *pi;
+ struct device *dev;
+ int retcode;
+
+ if (!vsi || !vsi->port_info || !vsi->back)
+ return -EINVAL;
+ if (vsi->type != ICE_VSI_PF)
+ return 0;
+
+ dev = &vsi->back->pdev->dev;
+
+ pi = vsi->port_info;
+
+ pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
+ if (!pcaps)
+ return -ENOMEM;
+
+ retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+ NULL);
+ if (retcode) {
+ dev_err(dev,
+ "Failed to get phy capabilities, VSI %d error %d\n",
+ vsi->vsi_num, retcode);
+ retcode = -EIO;
+ goto out;
+ }
+
+ /* No change in link */
+ if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
+ link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
+ goto out;
+
+ cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ retcode = -ENOMEM;
+ goto out;
+ }
+
+ cfg->phy_type_low = pcaps->phy_type_low;
+ cfg->phy_type_high = pcaps->phy_type_high;
+ cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+ cfg->low_power_ctrl = pcaps->low_power_ctrl;
+ cfg->eee_cap = pcaps->eee_cap;
+ cfg->eeer_value = pcaps->eeer_value;
+ cfg->link_fec_opt = pcaps->link_fec_options;
+ if (link_up)
+ cfg->caps |= ICE_AQ_PHY_ENA_LINK;
+ else
+ cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
+
+ retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
+ if (retcode) {
+ dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
+ vsi->vsi_num, retcode);
+ retcode = -EIO;
+ }
+
+ devm_kfree(dev, cfg);
+out:
+ devm_kfree(dev, pcaps);
+ return retcode;
+}
+
+/**
+ * ice_check_media_subtask - Check for media; bring link up if detected.
+ * @pf: pointer to PF struct
+ */
+static void ice_check_media_subtask(struct ice_pf *pf)
+{
+ struct ice_port_info *pi;
+ struct ice_vsi *vsi;
+ int err;
+
+ vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
+ if (!vsi)
+ return;
+
+ /* No need to check for media if it's already present or the interface
+ * is down
+ */
+ if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) ||
+ test_bit(__ICE_DOWN, vsi->state))
+ return;
+
+ /* Refresh link info and check if media is present */
+ pi = vsi->port_info;
+ err = ice_update_link_info(pi);
+ if (err)
+ return;
+
+ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
+ err = ice_force_phys_link_state(vsi, true);
+ if (err)
+ return;
+ clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
+
+ /* A Link Status Event will be generated; the event handler
+ * will complete bringing the interface up
+ */
+ }
+}
+
+/**
* ice_service_task - manage and run subtasks
* @work: pointer to work_struct contained by the PF struct
*/
@@ -1336,6 +1464,7 @@ static void ice_service_task(struct work_struct *work)
return;
}
+ ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf);
ice_sync_fltr_subtask(pf);
ice_handle_mdd_event(pf);
@@ -1369,8 +1498,8 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->adminq.num_sq_entries = ICE_AQ_LEN;
hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN;
hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN;
- hw->mailboxq.num_rq_entries = ICE_MBXQ_LEN;
- hw->mailboxq.num_sq_entries = ICE_MBXQ_LEN;
+ hw->mailboxq.num_rq_entries = ICE_MBXRQ_LEN;
+ hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
}
@@ -1409,15 +1538,11 @@ static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
*/
static int ice_vsi_ena_irq(struct ice_vsi *vsi)
{
- struct ice_pf *pf = vsi->back;
- struct ice_hw *hw = &pf->hw;
+ struct ice_hw *hw = &vsi->back->hw;
+ int i;
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- int i;
-
- ice_for_each_q_vector(vsi, i)
- ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
- }
+ ice_for_each_q_vector(vsi, i)
+ ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
ice_flush(hw);
return 0;
@@ -1665,7 +1790,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
wr32(hw, PFINT_OICR_ENA, 0);
ice_flush(hw);
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) {
+ if (pf->msix_entries) {
synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
devm_free_irq(&pf->pdev->dev,
pf->msix_entries[pf->oicr_idx].vector, pf);
@@ -2082,16 +2207,25 @@ static void ice_deinit_pf(struct ice_pf *pf)
ice_service_task_stop(pf);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->avail_q_mutex);
+
+ if (pf->avail_txqs) {
+ bitmap_free(pf->avail_txqs);
+ pf->avail_txqs = NULL;
+ }
+
+ if (pf->avail_rxqs) {
+ bitmap_free(pf->avail_rxqs);
+ pf->avail_rxqs = NULL;
+ }
}
/**
* ice_init_pf - Initialize general software structures (struct ice_pf)
* @pf: board private structure to initialize
*/
-static void ice_init_pf(struct ice_pf *pf)
+static int ice_init_pf(struct ice_pf *pf)
{
bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
- set_bit(ICE_FLAG_MSIX_ENA, pf->flags);
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
struct ice_hw *hw = &pf->hw;
@@ -2105,12 +2239,6 @@ static void ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex);
mutex_init(&pf->avail_q_mutex);
- /* Clear avail_[t|r]x_qs bitmaps (set all to avail) */
- mutex_lock(&pf->avail_q_mutex);
- bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
- bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
- mutex_unlock(&pf->avail_q_mutex);
-
if (pf->hw.func_caps.common_cap.rss_table_size)
set_bit(ICE_FLAG_RSS_ENA, pf->flags);
@@ -2119,6 +2247,22 @@ static void ice_init_pf(struct ice_pf *pf)
pf->serv_tmr_period = HZ;
INIT_WORK(&pf->serv_task, ice_service_task);
clear_bit(__ICE_SERVICE_SCHED, pf->state);
+
+ pf->max_pf_txqs = pf->hw.func_caps.common_cap.num_txq;
+ pf->max_pf_rxqs = pf->hw.func_caps.common_cap.num_rxq;
+
+ pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
+ if (!pf->avail_txqs)
+ return -ENOMEM;
+
+ pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
+ if (!pf->avail_rxqs) {
+ devm_kfree(&pf->pdev->dev, pf->avail_txqs);
+ pf->avail_txqs = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
}
/**
@@ -2137,13 +2281,18 @@ static int ice_ena_msix_range(struct ice_pf *pf)
/* reserve one vector for miscellaneous handler */
needed = 1;
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
v_budget += needed;
v_left -= needed;
/* reserve vectors for LAN traffic */
- pf->num_lan_msix = min_t(int, num_online_cpus(), v_left);
- v_budget += pf->num_lan_msix;
- v_left -= pf->num_lan_msix;
+ needed = min_t(int, num_online_cpus(), v_left);
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
+ pf->num_lan_msix = needed;
+ v_budget += needed;
+ v_left -= needed;
pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
sizeof(*pf->msix_entries), GFP_KERNEL);
@@ -2168,18 +2317,18 @@ static int ice_ena_msix_range(struct ice_pf *pf)
if (v_actual < v_budget) {
dev_warn(&pf->pdev->dev,
- "not enough vectors. requested = %d, obtained = %d\n",
+ "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
- if (v_actual >= (pf->num_lan_msix + 1)) {
- pf->num_avail_sw_msix = v_actual -
- (pf->num_lan_msix + 1);
- } else if (v_actual >= 2) {
- pf->num_lan_msix = 1;
- pf->num_avail_sw_msix = v_actual - 2;
- } else {
+/* 2 vectors for LAN (traffic + OICR) */
+#define ICE_MIN_LAN_VECS 2
+
+ if (v_actual < ICE_MIN_LAN_VECS) {
+ /* error if we can't get minimum vectors */
pci_disable_msix(pf->pdev);
err = -ERANGE;
goto msix_err;
+ } else {
+ pf->num_lan_msix = ICE_MIN_LAN_VECS;
}
}
@@ -2189,9 +2338,13 @@ msix_err:
devm_kfree(&pf->pdev->dev, pf->msix_entries);
goto exit_err;
+no_hw_vecs_left_err:
+ dev_err(&pf->pdev->dev,
+ "not enough device MSI-X vectors. requested = %d, available = %d\n",
+ needed, v_left);
+ err = -ERANGE;
exit_err:
pf->num_lan_msix = 0;
- clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
return err;
}
@@ -2204,7 +2357,6 @@ static void ice_dis_msix(struct ice_pf *pf)
pci_disable_msix(pf->pdev);
devm_kfree(&pf->pdev->dev, pf->msix_entries);
pf->msix_entries = NULL;
- clear_bit(ICE_FLAG_MSIX_ENA, pf->flags);
}
/**
@@ -2213,8 +2365,7 @@ static void ice_dis_msix(struct ice_pf *pf)
*/
static void ice_clear_interrupt_scheme(struct ice_pf *pf)
{
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_dis_msix(pf);
+ ice_dis_msix(pf);
if (pf->irq_tracker) {
devm_kfree(&pf->pdev->dev, pf->irq_tracker);
@@ -2230,10 +2381,7 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
{
int vectors;
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- vectors = ice_ena_msix_range(pf);
- else
- return -ENODEV;
+ vectors = ice_ena_msix_range(pf);
if (vectors < 0)
return vectors;
@@ -2349,7 +2497,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
hw->api_maj_ver, hw->api_min_ver);
- ice_init_pf(pf);
+ err = ice_init_pf(pf);
+ if (err) {
+ dev_err(dev, "ice_init_pf failed: %d\n", err);
+ goto err_init_pf_unroll;
+ }
err = ice_init_pf_dcb(pf, false);
if (err) {
@@ -2390,12 +2542,10 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
* the misc functionality and queue processing is combined in
* the same vector and that gets setup at open.
*/
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- err = ice_req_irq_msix_misc(pf);
- if (err) {
- dev_err(dev, "setup of misc vector failed: %d\n", err);
- goto err_init_interrupt_unroll;
- }
+ err = ice_req_irq_msix_misc(pf);
+ if (err) {
+ dev_err(dev, "setup of misc vector failed: %d\n", err);
+ goto err_init_interrupt_unroll;
}
/* create switch struct for the switch element created by FW on boot */
@@ -2483,9 +2633,9 @@ static void ice_remove(struct pci_dev *pdev)
continue;
ice_vsi_free_q_vectors(pf->vsi[i]);
}
- ice_clear_interrupt_scheme(pf);
ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw);
+ ice_clear_interrupt_scheme(pf);
pci_disable_pcie_error_reporting(pdev);
}
@@ -2711,10 +2861,8 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
struct ice_hw *hw = &pf->hw;
struct sockaddr *addr = pi;
enum ice_status status;
- LIST_HEAD(a_mac_list);
- LIST_HEAD(r_mac_list);
u8 flags = 0;
- int err;
+ int err = 0;
u8 *mac;
mac = (u8 *)addr->sa_data;
@@ -2737,42 +2885,23 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
/* When we change the MAC address we also have to change the MAC address
* based filter rules that were created previously for the old MAC
* address. So first, we remove the old filter rule using ice_remove_mac
- * and then create a new filter rule using ice_add_mac. Note that for
- * both these operations, we first need to form a "list" of MAC
- * addresses (even though in this case, we have only 1 MAC address to be
- * added/removed) and this done using ice_add_mac_to_list. Depending on
- * the ensuing operation this "list" of MAC addresses is either to be
- * added or removed from the filter.
+ * and then create a new filter rule using ice_add_mac via
+ * ice_vsi_cfg_mac_fltr function call for both add and/or remove
+ * filters.
*/
- err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
- if (err) {
- err = -EADDRNOTAVAIL;
- goto free_lists;
- }
-
- status = ice_remove_mac(hw, &r_mac_list);
+ status = ice_vsi_cfg_mac_fltr(vsi, netdev->dev_addr, false);
if (status) {
err = -EADDRNOTAVAIL;
- goto free_lists;
+ goto err_update_filters;
}
- err = ice_add_mac_to_list(vsi, &a_mac_list, mac);
- if (err) {
- err = -EADDRNOTAVAIL;
- goto free_lists;
- }
-
- status = ice_add_mac(hw, &a_mac_list);
+ status = ice_vsi_cfg_mac_fltr(vsi, mac, true);
if (status) {
err = -EADDRNOTAVAIL;
- goto free_lists;
+ goto err_update_filters;
}
-free_lists:
- /* free list entries */
- ice_free_fltr_list(&pf->pdev->dev, &r_mac_list);
- ice_free_fltr_list(&pf->pdev->dev, &a_mac_list);
-
+err_update_filters:
if (err) {
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
mac);
@@ -2788,8 +2917,8 @@ free_lists:
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
if (status) {
- netdev_err(netdev, "can't set MAC %pM. write to firmware failed.\n",
- mac);
+ netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
+ mac, status);
}
return 0;
}
@@ -3008,10 +3137,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
int err;
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_vsi_cfg_msix(vsi);
- else
- return -ENOTSUPP;
+ ice_vsi_cfg_msix(vsi);
/* Enable only Rx rings, Tx rings were enabled by the FW when the
* Tx queue group list was configured and the context bits were
@@ -3132,7 +3258,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
* ice_update_vsi_stats - Update VSI stats counters
* @vsi: the VSI to be updated
*/
-static void ice_update_vsi_stats(struct ice_vsi *vsi)
+void ice_update_vsi_stats(struct ice_vsi *vsi)
{
struct rtnl_link_stats64 *cur_ns = &vsi->net_stats;
struct ice_eth_stats *cur_es = &vsi->eth_stats;
@@ -3159,6 +3285,8 @@ static void ice_update_vsi_stats(struct ice_vsi *vsi)
cur_ns->rx_errors = pf->stats.crc_errors +
pf->stats.illegal_bytes;
cur_ns->rx_length_errors = pf->stats.rx_len_errors;
+ /* record drops from the port level */
+ cur_ns->rx_missed_errors = pf->stats.eth.rx_discards;
}
}
@@ -3166,149 +3294,139 @@ static void ice_update_vsi_stats(struct ice_vsi *vsi)
* ice_update_pf_stats - Update PF port stats counters
* @pf: PF whose stats needs to be updated
*/
-static void ice_update_pf_stats(struct ice_pf *pf)
+void ice_update_pf_stats(struct ice_pf *pf)
{
struct ice_hw_port_stats *prev_ps, *cur_ps;
struct ice_hw *hw = &pf->hw;
- u8 pf_id;
+ u8 port;
+ port = hw->port_info->lport;
prev_ps = &pf->stats_prev;
cur_ps = &pf->stats;
- pf_id = hw->pf_id;
- ice_stat_update40(hw, GLPRT_GORCH(pf_id), GLPRT_GORCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.rx_bytes,
+ ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.rx_bytes,
&cur_ps->eth.rx_bytes);
- ice_stat_update40(hw, GLPRT_UPRCH(pf_id), GLPRT_UPRCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.rx_unicast,
+ ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.rx_unicast,
&cur_ps->eth.rx_unicast);
- ice_stat_update40(hw, GLPRT_MPRCH(pf_id), GLPRT_MPRCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.rx_multicast,
+ ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.rx_multicast,
&cur_ps->eth.rx_multicast);
- ice_stat_update40(hw, GLPRT_BPRCH(pf_id), GLPRT_BPRCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.rx_broadcast,
+ ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.rx_broadcast,
&cur_ps->eth.rx_broadcast);
- ice_stat_update40(hw, GLPRT_GOTCH(pf_id), GLPRT_GOTCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.tx_bytes,
+ ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded,
+ &prev_ps->eth.rx_discards,
+ &cur_ps->eth.rx_discards);
+
+ ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.tx_bytes,
&cur_ps->eth.tx_bytes);
- ice_stat_update40(hw, GLPRT_UPTCH(pf_id), GLPRT_UPTCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.tx_unicast,
+ ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.tx_unicast,
&cur_ps->eth.tx_unicast);
- ice_stat_update40(hw, GLPRT_MPTCH(pf_id), GLPRT_MPTCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.tx_multicast,
+ ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.tx_multicast,
&cur_ps->eth.tx_multicast);
- ice_stat_update40(hw, GLPRT_BPTCH(pf_id), GLPRT_BPTCL(pf_id),
- pf->stat_prev_loaded, &prev_ps->eth.tx_broadcast,
+ ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
+ &prev_ps->eth.tx_broadcast,
&cur_ps->eth.tx_broadcast);
- ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
&prev_ps->tx_dropped_link_down,
&cur_ps->tx_dropped_link_down);
- ice_stat_update40(hw, GLPRT_PRC64H(pf_id), GLPRT_PRC64L(pf_id),
- pf->stat_prev_loaded, &prev_ps->rx_size_64,
- &cur_ps->rx_size_64);
+ ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
+ &prev_ps->rx_size_64, &cur_ps->rx_size_64);
- ice_stat_update40(hw, GLPRT_PRC127H(pf_id), GLPRT_PRC127L(pf_id),
- pf->stat_prev_loaded, &prev_ps->rx_size_127,
- &cur_ps->rx_size_127);
+ ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
+ &prev_ps->rx_size_127, &cur_ps->rx_size_127);
- ice_stat_update40(hw, GLPRT_PRC255H(pf_id), GLPRT_PRC255L(pf_id),
- pf->stat_prev_loaded, &prev_ps->rx_size_255,
- &cur_ps->rx_size_255);
+ ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
+ &prev_ps->rx_size_255, &cur_ps->rx_size_255);
- ice_stat_update40(hw, GLPRT_PRC511H(pf_id), GLPRT_PRC511L(pf_id),
- pf->stat_prev_loaded, &prev_ps->rx_size_511,
- &cur_ps->rx_size_511);
+ ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
+ &prev_ps->rx_size_511, &cur_ps->rx_size_511);
- ice_stat_update40(hw, GLPRT_PRC1023H(pf_id),
- GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
&prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
- ice_stat_update40(hw, GLPRT_PRC1522H(pf_id),
- GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
&prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
- ice_stat_update40(hw, GLPRT_PRC9522H(pf_id),
- GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
&prev_ps->rx_size_big, &cur_ps->rx_size_big);
- ice_stat_update40(hw, GLPRT_PTC64H(pf_id), GLPRT_PTC64L(pf_id),
- pf->stat_prev_loaded, &prev_ps->tx_size_64,
- &cur_ps->tx_size_64);
+ ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
+ &prev_ps->tx_size_64, &cur_ps->tx_size_64);
- ice_stat_update40(hw, GLPRT_PTC127H(pf_id), GLPRT_PTC127L(pf_id),
- pf->stat_prev_loaded, &prev_ps->tx_size_127,
- &cur_ps->tx_size_127);
+ ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
+ &prev_ps->tx_size_127, &cur_ps->tx_size_127);
- ice_stat_update40(hw, GLPRT_PTC255H(pf_id), GLPRT_PTC255L(pf_id),
- pf->stat_prev_loaded, &prev_ps->tx_size_255,
- &cur_ps->tx_size_255);
+ ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
+ &prev_ps->tx_size_255, &cur_ps->tx_size_255);
- ice_stat_update40(hw, GLPRT_PTC511H(pf_id), GLPRT_PTC511L(pf_id),
- pf->stat_prev_loaded, &prev_ps->tx_size_511,
- &cur_ps->tx_size_511);
+ ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
+ &prev_ps->tx_size_511, &cur_ps->tx_size_511);
- ice_stat_update40(hw, GLPRT_PTC1023H(pf_id),
- GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
- ice_stat_update40(hw, GLPRT_PTC1522H(pf_id),
- GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
- ice_stat_update40(hw, GLPRT_PTC9522H(pf_id),
- GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded,
+ ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_big, &cur_ps->tx_size_big);
- ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
&prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
- ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
&prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
- ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
&prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
- ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
&prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
ice_update_dcb_stats(pf);
- ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
&prev_ps->crc_errors, &cur_ps->crc_errors);
- ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
&prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
- ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
&prev_ps->mac_local_faults,
&cur_ps->mac_local_faults);
- ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
&prev_ps->mac_remote_faults,
&cur_ps->mac_remote_faults);
- ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
&prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
- ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
&prev_ps->rx_undersize, &cur_ps->rx_undersize);
- ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
&prev_ps->rx_fragments, &cur_ps->rx_fragments);
- ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
&prev_ps->rx_oversize, &cur_ps->rx_oversize);
- ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded,
+ ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
&prev_ps->rx_jabber, &cur_ps->rx_jabber);
pf->stat_prev_loaded = true;
@@ -3372,85 +3490,6 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
}
/**
- * ice_force_phys_link_state - Force the physical link state
- * @vsi: VSI to force the physical link state to up/down
- * @link_up: true/false indicates to set the physical link to up/down
- *
- * Force the physical link state by getting the current PHY capabilities from
- * hardware and setting the PHY config based on the determined capabilities. If
- * link changes a link event will be triggered because both the Enable Automatic
- * Link Update and LESM Enable bits are set when setting the PHY capabilities.
- *
- * Returns 0 on success, negative on failure
- */
-static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
-{
- struct ice_aqc_get_phy_caps_data *pcaps;
- struct ice_aqc_set_phy_cfg_data *cfg;
- struct ice_port_info *pi;
- struct device *dev;
- int retcode;
-
- if (!vsi || !vsi->port_info || !vsi->back)
- return -EINVAL;
- if (vsi->type != ICE_VSI_PF)
- return 0;
-
- dev = &vsi->back->pdev->dev;
-
- pi = vsi->port_info;
-
- pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL);
- if (!pcaps)
- return -ENOMEM;
-
- retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
- NULL);
- if (retcode) {
- dev_err(dev,
- "Failed to get phy capabilities, VSI %d error %d\n",
- vsi->vsi_num, retcode);
- retcode = -EIO;
- goto out;
- }
-
- /* No change in link */
- if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) &&
- link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
- goto out;
-
- cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
- if (!cfg) {
- retcode = -ENOMEM;
- goto out;
- }
-
- cfg->phy_type_low = pcaps->phy_type_low;
- cfg->phy_type_high = pcaps->phy_type_high;
- cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
- cfg->low_power_ctrl = pcaps->low_power_ctrl;
- cfg->eee_cap = pcaps->eee_cap;
- cfg->eeer_value = pcaps->eeer_value;
- cfg->link_fec_opt = pcaps->link_fec_options;
- if (link_up)
- cfg->caps |= ICE_AQ_PHY_ENA_LINK;
- else
- cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
-
- retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
- if (retcode) {
- dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
- vsi->vsi_num, retcode);
- retcode = -EIO;
- }
-
- devm_kfree(dev, cfg);
-out:
- devm_kfree(dev, pcaps);
- return retcode;
-}
-
-/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
*/
@@ -3559,24 +3598,6 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
/**
- * ice_vsi_req_irq - Request IRQ from the OS
- * @vsi: The VSI IRQ is being requested for
- * @basename: name for the vector
- *
- * Return 0 on success and a negative value on error
- */
-static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
-{
- struct ice_pf *pf = vsi->back;
- int err = -EINVAL;
-
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- err = ice_vsi_req_irq_msix(vsi, basename);
-
- return err;
-}
-
-/**
* ice_vsi_open - Called when a network interface is made active
* @vsi: the VSI to open
*
@@ -3605,7 +3626,7 @@ static int ice_vsi_open(struct ice_vsi *vsi)
snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
- err = ice_vsi_req_irq(vsi, int_name);
+ err = ice_vsi_req_irq_msix(vsi, int_name);
if (err)
goto err_setup_rx;
@@ -3669,23 +3690,19 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
int err = 0;
if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
- return err;
+ return 0;
clear_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
- struct net_device *netd = vsi->netdev;
-
if (netif_running(vsi->netdev)) {
- if (locked) {
- err = netd->netdev_ops->ndo_open(netd);
- } else {
+ if (!locked)
rtnl_lock();
- err = netd->netdev_ops->ndo_open(netd);
+
+ err = ice_open(vsi->netdev);
+
+ if (!locked)
rtnl_unlock();
- }
- } else {
- err = ice_vsi_open(vsi);
}
}
@@ -3723,22 +3740,23 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf)
/* loop through pf->vsi array and reinit the VSI if found */
ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vsi = pf->vsi[i];
int err;
- if (!pf->vsi[i])
+ if (!vsi)
continue;
- err = ice_vsi_rebuild(pf->vsi[i]);
+ err = ice_vsi_rebuild(vsi);
if (err) {
dev_err(&pf->pdev->dev,
"VSI at index %d rebuild failed\n",
- pf->vsi[i]->idx);
+ vsi->idx);
return err;
}
dev_info(&pf->pdev->dev,
"VSI at index %d rebuilt. vsi_num = 0x%x\n",
- pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
+ vsi->idx, vsi->vsi_num);
}
return 0;
@@ -3756,25 +3774,27 @@ static int ice_vsi_replay_all(struct ice_pf *pf)
/* loop through pf->vsi array and replay the VSI if found */
ice_for_each_vsi(pf, i) {
- if (!pf->vsi[i])
+ struct ice_vsi *vsi = pf->vsi[i];
+
+ if (!vsi)
continue;
- ret = ice_replay_vsi(hw, pf->vsi[i]->idx);
+ ret = ice_replay_vsi(hw, vsi->idx);
if (ret) {
dev_err(&pf->pdev->dev,
"VSI at index %d replay failed %d\n",
- pf->vsi[i]->idx, ret);
+ vsi->idx, ret);
return -EIO;
}
/* Re-map HW VSI number, using VSI handle that has been
* previously validated in ice_replay_vsi() call above
*/
- pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx);
+ vsi->vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
dev_info(&pf->pdev->dev,
"VSI at index %d filter replayed successfully - vsi_num %i\n",
- pf->vsi[i]->idx, pf->vsi[i]->vsi_num);
+ vsi->idx, vsi->vsi_num);
}
/* Clean up replay filter after successful re-configuration */
@@ -3842,12 +3862,10 @@ static void ice_rebuild(struct ice_pf *pf)
}
/* start misc vector */
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- err = ice_req_irq_msix_misc(pf);
- if (err) {
- dev_err(dev, "misc vector setup failed: %d\n", err);
- goto err_vsi_rebuild;
- }
+ err = ice_req_irq_msix_misc(pf);
+ if (err) {
+ dev_err(dev, "misc vector setup failed: %d\n", err);
+ goto err_vsi_rebuild;
}
/* restart the VSIs that were rebuilt and running before the reset */
@@ -4244,9 +4262,7 @@ static void ice_tx_timeout(struct net_device *netdev)
head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
/* Read interrupt register */
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- val = rd32(hw,
- GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
+ val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
@@ -4295,6 +4311,7 @@ int ice_open(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_port_info *pi;
int err;
if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) {
@@ -4304,13 +4321,33 @@ int ice_open(struct net_device *netdev)
netif_carrier_off(netdev);
- err = ice_force_phys_link_state(vsi, true);
+ pi = vsi->port_info;
+ err = ice_update_link_info(pi);
if (err) {
- netdev_err(netdev,
- "Failed to set physical link up, error %d\n", err);
+ netdev_err(netdev, "Failed to get link info, error %d\n",
+ err);
return err;
}
+ /* Set PHY if there is media, otherwise, turn off PHY */
+ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
+ err = ice_force_phys_link_state(vsi, true);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to set physical link up, error %d\n",
+ err);
+ return err;
+ }
+ } else {
+ err = ice_aq_set_link_restart_an(pi, false, NULL);
+ if (err) {
+ netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
+ vsi->vsi_num, err);
+ return err;
+ }
+ set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags);
+ }
+
err = ice_vsi_open(vsi);
if (err)
netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2a232504379d..79d64f9ed609 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -260,33 +260,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
/**
* ice_sched_get_first_node - get the first node of the given layer
- * @hw: pointer to the HW struct
+ * @pi: port information structure
* @parent: pointer the base node of the subtree
* @layer: layer number
*
* This function retrieves the first node of the given layer from the subtree
*/
static struct ice_sched_node *
-ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent,
- u8 layer)
+ice_sched_get_first_node(struct ice_port_info *pi,
+ struct ice_sched_node *parent, u8 layer)
{
- u8 i;
-
- if (layer < hw->sw_entry_point_layer)
- return NULL;
- for (i = 0; i < parent->num_children; i++) {
- struct ice_sched_node *node = parent->children[i];
-
- if (node) {
- if (node->tx_sched_layer == layer)
- return node;
- /* this recursion is intentional, and wouldn't
- * go more than 9 calls
- */
- return ice_sched_get_first_node(hw, node, layer);
- }
- }
- return NULL;
+ return pi->sib_head[parent->tc_num][layer];
}
/**
@@ -342,7 +326,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
parent = node->parent;
/* root has no parent */
if (parent) {
- struct ice_sched_node *p, *tc_node;
+ struct ice_sched_node *p;
/* update the parent */
for (i = 0; i < parent->num_children; i++)
@@ -354,16 +338,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
break;
}
- /* search for previous sibling that points to this node and
- * remove the reference
- */
- tc_node = ice_sched_get_tc_node(pi, node->tc_num);
- if (!tc_node) {
- ice_debug(hw, ICE_DBG_SCHED,
- "Invalid TC number %d\n", node->tc_num);
- goto err_exit;
- }
- p = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer);
+ p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
while (p) {
if (p->sibling == node) {
p->sibling = node->sibling;
@@ -371,8 +346,13 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
}
p = p->sibling;
}
+
+ /* update the sibling head if head is getting removed */
+ if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
+ pi->sib_head[node->tc_num][node->tx_sched_layer] =
+ node->sibling;
}
-err_exit:
+
/* leaf nodes have no children */
if (node->children)
devm_kfree(ice_hw_to_dev(hw), node->children);
@@ -743,13 +723,17 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
/* add it to previous node sibling pointer */
/* Note: siblings are not linked across branches */
- prev = ice_sched_get_first_node(hw, tc_node, layer);
+ prev = ice_sched_get_first_node(pi, tc_node, layer);
if (prev && prev != new_node) {
while (prev->sibling)
prev = prev->sibling;
prev->sibling = new_node;
}
+ /* initialize the sibling head */
+ if (!pi->sib_head[tc_node->tc_num][layer])
+ pi->sib_head[tc_node->tc_num][layer] = new_node;
+
if (i == 0)
*first_node_teid = teid;
}
@@ -1160,7 +1144,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
goto lan_q_exit;
/* get the first queue group node from VSI sub-tree */
- qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer);
+ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) {
/* make sure the qgroup node is part of the VSI subtree */
if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
@@ -1191,7 +1175,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
u8 vsi_layer;
vsi_layer = ice_sched_get_vsi_layer(hw);
- node = ice_sched_get_first_node(hw, tc_node, vsi_layer);
+ node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer);
/* Check whether it already exists */
while (node) {
@@ -1316,7 +1300,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
/* If intermediate nodes are reached max children
* then add a new one.
*/
- node = ice_sched_get_first_node(hw, tc_node, (u8)i);
+ node = ice_sched_get_first_node(hw->port_info, tc_node,
+ (u8)i);
/* scan all the siblings */
while (node) {
if (node->num_children < hw->max_children[i])
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 8271fd651725..99cf527d2b1a 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -2137,6 +2137,38 @@ out:
}
/**
+ * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
+ * @hw: pointer to the hardware structure
+ * @recp_id: lookup type for which the specified rule needs to be searched
+ * @f_info: rule information
+ *
+ * Helper function to search for a unicast rule entry - this is to be used
+ * to remove unicast MAC filter that is not shared with other VSIs on the
+ * PF switch.
+ *
+ * Returns pointer to entry storing the rule if found
+ */
+static struct ice_fltr_mgmt_list_entry *
+ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
+ struct ice_fltr_info *f_info)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_mgmt_list_entry *list_itr;
+ struct list_head *list_head;
+
+ list_head = &sw->recp_list[recp_id].filt_rules;
+ list_for_each_entry(list_itr, list_head, list_entry) {
+ if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
+ sizeof(f_info->l_data)) &&
+ f_info->fwd_id.hw_vsi_id ==
+ list_itr->fltr_info.fwd_id.hw_vsi_id &&
+ f_info->flag == list_itr->fltr_info.flag)
+ return list_itr;
+ }
+ return NULL;
+}
+
+/**
* ice_remove_mac - remove a MAC address based filter rule
* @hw: pointer to the hardware structure
* @m_list: list of MAC addresses and forwarding information
@@ -2153,15 +2185,39 @@ enum ice_status
ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_fltr_list_entry *list_itr, *tmp;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
if (!m_list)
return ICE_ERR_PARAM;
+ rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
+ u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
+ u16 vsi_handle;
if (l_type != ICE_SW_LKUP_MAC)
return ICE_ERR_PARAM;
+
+ vsi_handle = list_itr->fltr_info.vsi_handle;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ list_itr->fltr_info.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, vsi_handle);
+ if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
+ /* Don't remove the unicast address that belongs to
+ * another VSI on the switch, since it is not being
+ * shared...
+ */
+ mutex_lock(rule_lock);
+ if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
+ &list_itr->fltr_info)) {
+ mutex_unlock(rule_lock);
+ return ICE_ERR_DOES_NOT_EXIST;
+ }
+ mutex_unlock(rule_lock);
+ }
list_itr->status = ice_remove_rule_internal(hw,
ICE_SW_LKUP_MAC,
list_itr);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 3c83230434b6..5bf5c179a738 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -377,18 +377,28 @@ err:
*/
static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
{
+ u16 prev_ntu = rx_ring->next_to_use;
+
rx_ring->next_to_use = val;
/* update next to alloc since we have filled the ring */
rx_ring->next_to_alloc = val;
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
+ /* QRX_TAIL will be updated with any tail value, but hardware ignores
+ * the lower 3 bits. This makes it so we only bump tail on meaningful
+ * boundaries. Also, this allows us to bump tail on intervals of 8 up to
+ * the budget depending on the current traffic load.
*/
- wmb();
- writel(val, rx_ring->tail);
+ val &= ~0x7;
+ if (prev_ntu != val) {
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+ }
}
/**
@@ -445,7 +455,13 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
*
- * Returns false if all allocations were successful, true if any fail
+ * Returns false if all allocations were successful, true if any fail. Returning
+ * true signals to the caller that we didn't replace cleaned_count buffers and
+ * there is more work to do.
+ *
+ * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
+ * buffers. Then bump tail at most one time. Grouping like this lets us avoid
+ * multiple tail writes per call.
*/
bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
{
@@ -462,8 +478,9 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
bi = &rx_ring->rx_buf[ntu];
do {
+ /* if we fail here, we have work remaining */
if (!ice_alloc_mapped_page(rx_ring, bi))
- goto no_bufs;
+ break;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
@@ -494,16 +511,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
if (rx_ring->next_to_use != ntu)
ice_release_rx_desc(rx_ring, ntu);
- return false;
-
-no_bufs:
- if (rx_ring->next_to_use != ntu)
- ice_release_rx_desc(rx_ring, ntu);
-
- /* make sure to come back via polling to try again after
- * allocation failure
- */
- return true;
+ return !!cleaned_count;
}
/**
@@ -599,6 +607,8 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int truesize = ICE_RXBUF_2048;
#endif
+ if (!size)
+ return;
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
rx_buf->page_offset, size, truesize);
@@ -654,6 +664,8 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
prefetchw(rx_buf->page);
*skb = rx_buf->skb;
+ if (!size)
+ return rx_buf;
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
rx_buf->page_offset, size,
@@ -737,8 +749,11 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- /* hand second half of page back to the ring */
+ if (!rx_buf)
+ return;
+
if (ice_can_reuse_rx_page(rx_buf)) {
+ /* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
rx_ring->rx_stats.page_reuse_count++;
} else {
@@ -990,7 +1005,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
- bool failure = false;
+ bool failure;
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
@@ -1002,13 +1017,6 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
u16 vlan_tag = 0;
u8 rx_ptype;
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= ICE_RX_BUF_WRITE) {
- failure = failure ||
- ice_alloc_rx_bufs(rx_ring, cleaned_count);
- cleaned_count = 0;
- }
-
/* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
@@ -1030,8 +1038,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
+ /* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
- /* allocate (if needed) and populate skb */
+
if (skb)
ice_add_rx_frag(rx_buf, skb, size);
else
@@ -1040,7 +1049,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
- rx_buf->pagecnt_bias++;
+ if (rx_buf)
+ rx_buf->pagecnt_bias++;
break;
}
@@ -1085,6 +1095,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
total_rx_pkts++;
}
+ /* return up to cleaned_count buffers to hardware */
+ failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
+
/* update queue and vector specific stats */
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.pkts += total_rx_pkts;
@@ -1351,6 +1364,23 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
struct ice_ring_container *rx = &q_vector->rx;
u32 itr_val;
+ /* when exiting WB_ON_ITR lets set a low ITR value and trigger
+ * interrupts to expire right away in case we have more work ready to go
+ * already
+ */
+ if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
+ itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
+ /* set target back to last user set value */
+ rx->target_itr = rx->itr_setting;
+ /* set current to what we just wrote and dynamic if needed */
+ rx->current_itr = ICE_WB_ON_ITR_USECS |
+ (rx->itr_setting & ICE_ITR_DYNAMIC);
+ /* allow normal interrupt flow to start */
+ q_vector->itr_countdown = 0;
+ return;
+ }
+
/* This will do nothing if dynamic updates are not enabled */
ice_update_itr(q_vector, tx);
ice_update_itr(q_vector, rx);
@@ -1396,6 +1426,41 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
}
/**
+ * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
+ * @vsi: pointer to the VSI structure
+ * @q_vector: q_vector to set WB_ON_ITR on
+ *
+ * We need to tell hardware to write-back completed descriptors even when
+ * interrupts are disabled. Descriptors will be written back on cache line
+ * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
+ * descriptors may not be written back if they don't fill a cache line until the
+ * next interrupt.
+ *
+ * This sets the write-back frequency to 2 microseconds as that is the minimum
+ * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
+ * make sure hardware knows we aren't meddling with the INTENA_M bit.
+ */
+static void
+ice_set_wb_on_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+{
+ /* already in WB_ON_ITR mode no need to change it */
+ if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
+ return;
+
+ if (q_vector->num_ring_rx)
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
+ ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
+ ICE_RX_ITR));
+
+ if (q_vector->num_ring_tx)
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
+ ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
+ ICE_TX_ITR));
+
+ q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
+}
+
+/**
* ice_napi_poll - NAPI polling Rx/Tx cleanup routine
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
@@ -1409,10 +1474,9 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
struct ice_q_vector *q_vector =
container_of(napi, struct ice_q_vector, napi);
struct ice_vsi *vsi = q_vector->vsi;
- struct ice_pf *pf = vsi->back;
bool clean_complete = true;
- int budget_per_ring = 0;
struct ice_ring *ring;
+ int budget_per_ring;
int work_done = 0;
/* Since the actual Tx work is minimal, we can give the Tx a larger
@@ -1426,11 +1490,16 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
if (budget <= 0)
return budget;
- /* We attempt to distribute budget to each Rx queue fairly, but don't
- * allow the budget to go below 1 because that would exit polling early.
- */
- if (q_vector->num_ring_rx)
+ /* normally we have 1 Rx ring per q_vector */
+ if (unlikely(q_vector->num_ring_rx > 1))
+ /* We attempt to distribute budget to each Rx queue fairly, but
+ * don't allow the budget to go below 1 because that would exit
+ * polling early.
+ */
budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
+ else
+ /* Max of 1 Rx ring in this q_vector so give it the budget */
+ budget_per_ring = budget;
ice_for_each_ring(ring, q_vector->rx) {
int cleaned;
@@ -1450,8 +1519,9 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* poll us due to busy-polling
*/
if (likely(napi_complete_done(napi, work_done)))
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_update_ena_itr(vsi, q_vector);
+ ice_update_ena_itr(vsi, q_vector);
+ else
+ ice_set_wb_on_itr(vsi, q_vector);
return min_t(int, work_done, budget - 1);
}
@@ -1521,7 +1591,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
{
u64 td_offset, td_tag, td_cmd;
u16 i = tx_ring->next_to_use;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int data_len, size;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
@@ -1923,7 +1993,7 @@ static unsigned int ice_txd_use_count(unsigned int size)
*/
static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int count = 0, size = skb_headlen(skb);
@@ -1954,7 +2024,7 @@ static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
*/
static bool __ice_chk_linearize(struct sk_buff *skb)
{
- const struct skb_frag_struct *frag, *stale;
+ const skb_frag_t *frag, *stale;
int nr_frags, sum;
/* no need to check if number of frags is less than 7 */
@@ -2036,6 +2106,7 @@ static netdev_tx_t
ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
{
struct ice_tx_offload_params offload = { 0 };
+ struct ice_vsi *vsi = tx_ring->vsi;
struct ice_tx_buf *first;
unsigned int count;
int tso, csum;
@@ -2083,7 +2154,15 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
if (csum < 0)
goto out_drop;
- if (tso || offload.cd_tunnel_params) {
+ /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
+ if (unlikely(skb->priority == TC_PRIO_CONTROL &&
+ vsi->type == ICE_VSI_PF &&
+ vsi->port_info->is_sw_lldp))
+ offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ ICE_TX_CTX_DESC_SWTCH_UPLINK <<
+ ICE_TXD_CTX_QW1_CMD_S);
+
+ if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
int i = tx_ring->next_to_use;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index ec76aba347b9..94a9280193e2 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -144,6 +144,19 @@ enum ice_rx_dtype {
#define ICE_DFLT_INTRL 0
#define ICE_MAX_INTRL 236
+#define ICE_WB_ON_ITR_USECS 2
+#define ICE_IN_WB_ON_ITR_MODE 255
+/* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
+ * setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
+ * set the write-back latency to the usecs passed in.
+ */
+#define ICE_GLINT_DYN_CTL_WB_ON_ITR(usecs, itr_idx) \
+ ((((usecs) << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S)) & \
+ GLINT_DYN_CTL_INTERVAL_M) | \
+ (((itr_idx) << GLINT_DYN_CTL_ITR_INDX_S) & \
+ GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M | \
+ GLINT_DYN_CTL_WB_ON_ITR_M)
+
/* Legacy or Advanced Mode Queue */
#define ICE_TX_ADVANCED 0
#define ICE_TX_LEGACY 1
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 24bbef8bbe69..b538d0b9eb80 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -13,9 +13,9 @@
#define ICE_BYTES_PER_WORD 2
#define ICE_BYTES_PER_DWORD 4
-static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
+static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
- return test_bit(tc, (unsigned long *)&bitmap);
+ return test_bit(tc, &bitmap);
}
/* Driver always calls main vsi_handle first */
@@ -347,6 +347,8 @@ struct ice_port_info {
struct ice_mac_info mac;
struct ice_phy_info phy;
struct mutex sched_lock; /* protect access to TXSched tree */
+ struct ice_sched_node *
+ sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
/* DCBX info */
struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 5d24b539648f..b93324e9f4bc 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -252,6 +252,35 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf)
}
/**
+ * ice_set_vf_state_qs_dis - Set VF queues state to disabled
+ * @vf: pointer to the VF structure
+ */
+void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+{
+ /* Clear Rx/Tx enabled queues flag */
+ bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF);
+ bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
+ vf->num_qs_ena = 0;
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+}
+
+/**
+ * ice_dis_vf_qs - Disable the VF queues
+ * @vf: pointer to the VF structure
+ */
+static void ice_dis_vf_qs(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
+ ice_vsi_stop_rx_rings(vsi);
+ ice_set_vf_state_qs_dis(vf);
+}
+
+/**
* ice_free_vfs - Free all VFs
* @pf: pointer to the PF structure
*/
@@ -267,19 +296,9 @@ void ice_free_vfs(struct ice_pf *pf)
usleep_range(1000, 2000);
/* Avoid wait time by stopping all VFs at the same time */
- for (i = 0; i < pf->num_alloc_vfs; i++) {
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
- continue;
-
- vsi = pf->vsi[pf->vf[i].lan_vsi_idx];
- /* stop rings without wait time */
- ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, i);
- ice_vsi_stop_rx_rings(vsi);
-
- clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
- }
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
+ ice_dis_vf_qs(&pf->vf[i]);
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
@@ -297,13 +316,6 @@ void ice_free_vfs(struct ice_pf *pf)
if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
/* disable VF qp mappings */
ice_dis_vf_mappings(&pf->vf[i]);
-
- /* Set this state so that assigned VF vectors can be
- * reclaimed by PF for reuse in ice_vsi_release(). No
- * need to clear this bit since pf->vf array is being
- * freed anyways after this for loop
- */
- set_bit(ICE_VF_STATE_CFG_INTR, pf->vf[i].vf_states);
ice_free_vf_res(&pf->vf[i]);
}
}
@@ -389,12 +401,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
wr32(hw, PF_PCI_CIAA,
VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
- for (i = 0; i < 100; i++) {
+ for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
reg = rd32(hw, PF_PCI_CIAD);
- if ((reg & VF_TRANS_PENDING_M) != 0)
- dev_err(&pf->pdev->dev,
- "VF %d PCI transactions stuck\n", vf->vf_id);
- udelay(1);
+ /* no transactions pending so stop polling */
+ if ((reg & VF_TRANS_PENDING_M) == 0)
+ break;
+
+ dev_err(&pf->pdev->dev,
+ "VF %d PCI transactions stuck\n", vf->vf_id);
+ udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
}
}
@@ -481,19 +496,20 @@ ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
}
/**
- * ice_calc_vf_first_vector_idx - Calculate absolute MSIX vector index in HW
+ * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
* @pf: pointer to PF structure
* @vf: pointer to VF that the first MSIX vector index is being calculated for
*
- * This returns the first MSIX vector index in HW that is used by this VF and
- * this will always be the OICR index in the AVF driver so any functionality
+ * This returns the first MSIX vector index in PF space that is used by this VF.
+ * This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
* using vf->first_vector_idx for queue configuration will have to increment by
* 1 to avoid meddling with the OICR index.
*/
static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
{
- return pf->hw.func_caps.common_cap.msix_vector_first_id +
- pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
+ return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
}
/**
@@ -543,7 +559,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
status = ice_add_mac(&pf->hw, &tmp_add_list);
if (status)
- dev_err(&pf->pdev->dev, "could not add mac filters\n");
+ dev_err(&pf->pdev->dev,
+ "could not add mac filters error %d\n", status);
+ else
+ vf->num_mac = 1;
/* Clear this bit after VF initialization since we shouldn't reclaim
* and reassign interrupts for synchronous or asynchronous VFR events.
@@ -551,7 +570,6 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
* expect vector assignment to be changed unless there is a request for
* more vectors.
*/
- clear_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states);
ice_alloc_vsi_res_exit:
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
return status;
@@ -567,11 +585,6 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
int tx_rx_queue_left;
int status;
- /* setup VF VSI and necessary resources */
- status = ice_alloc_vsi_res(vf);
- if (status)
- goto ice_alloc_vf_res_exit;
-
/* Update number of VF queues, in case VF had requested for queue
* changes
*/
@@ -581,6 +594,11 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
vf->num_req_qs != vf->num_vf_qs)
vf->num_vf_qs = vf->num_req_qs;
+ /* setup VF VSI and necessary resources */
+ status = ice_alloc_vsi_res(vf);
+ if (status)
+ goto ice_alloc_vf_res_exit;
+
if (vf->trusted)
set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else
@@ -605,27 +623,30 @@ ice_alloc_vf_res_exit:
*/
static void ice_ena_vf_mappings(struct ice_vf *vf)
{
+ int abs_vf_id, abs_first, abs_last;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int first, last, v;
struct ice_hw *hw;
- int abs_vf_id;
u32 reg;
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
first = vf->first_vector_idx;
last = (first + pf->num_vf_msix) - 1;
+ abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
+ abs_last = (abs_first + pf->num_vf_msix) - 1;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
/* VF Vector allocation */
- reg = (((first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
- ((last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
+ reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
+ ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
VPINT_ALLOC_VALID_M);
wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
- reg = (((first << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) |
- ((last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
+ reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S)
+ & VPINT_ALLOC_PCI_FIRST_M) |
+ ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
VPINT_ALLOC_PCI_VALID_M);
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
/* map the interrupts to its functions */
@@ -983,6 +1004,47 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
}
/**
+ * ice_config_res_vfs - Finalize allocation of VFs resources in one go
+ * @pf: pointer to the PF structure
+ *
+ * This function is being called as last part of resetting all VFs, or when
+ * configuring VFs for the first time, where there is no resource to be freed
+ * Returns true if resources were properly allocated for all VFs, and false
+ * otherwise.
+ */
+static bool ice_config_res_vfs(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ int v;
+
+ if (ice_check_avail_res(pf)) {
+ dev_err(&pf->pdev->dev,
+ "Cannot allocate VF resources, try with fewer number of VFs\n");
+ return false;
+ }
+
+ /* rearm global interrupts */
+ if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+
+ /* Finish resetting each VF and allocate resources */
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ struct ice_vf *vf = &pf->vf[v];
+
+ vf->num_vf_qs = pf->num_vf_qps;
+ dev_dbg(&pf->pdev->dev,
+ "VF-id %d has %d queues configured\n",
+ vf->vf_id, vf->num_vf_qs);
+ ice_cleanup_and_realloc_vf(vf);
+ }
+
+ ice_flush(hw);
+ clear_bit(__ICE_VF_DIS, pf->state);
+
+ return true;
+}
+
+/**
* ice_reset_all_vfs - reset all allocated VFs in one go
* @pf: pointer to the PF structure
* @is_vflr: true if VFLR was issued, false if not
@@ -1012,17 +1074,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
for (v = 0; v < pf->num_alloc_vfs; v++)
ice_trigger_vf_reset(&pf->vf[v], is_vflr);
- for (v = 0; v < pf->num_alloc_vfs; v++) {
- struct ice_vsi *vsi;
-
- vf = &pf->vf[v];
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
- ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
- ice_vsi_stop_rx_rings(vsi);
- clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
- }
- }
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[v].vf_states))
+ ice_dis_vf_qs(&pf->vf[v]);
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
@@ -1031,7 +1085,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
* finished resetting.
*/
for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
- usleep_range(10000, 20000);
/* Check each VF in sequence */
while (v < pf->num_alloc_vfs) {
@@ -1039,8 +1092,11 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
vf = &pf->vf[v];
reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
- if (!(reg & VPGEN_VFRSTAT_VFRD_M))
+ if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
+ /* only delay if the check failed */
+ usleep_range(10, 20);
break;
+ }
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
@@ -1054,7 +1110,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
*/
if (v < pf->num_alloc_vfs)
dev_warn(&pf->pdev->dev, "VF reset check timeout\n");
- usleep_range(10000, 20000);
/* free VF resources to begin resetting the VSI state */
for (v = 0; v < pf->num_alloc_vfs; v++) {
@@ -1074,25 +1129,8 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
dev_err(&pf->pdev->dev,
"Failed to free MSIX resources used by SR-IOV\n");
- if (ice_check_avail_res(pf)) {
- dev_err(&pf->pdev->dev,
- "Cannot allocate VF resources, try with fewer number of VFs\n");
+ if (!ice_config_res_vfs(pf))
return false;
- }
-
- /* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++) {
- vf = &pf->vf[v];
-
- vf->num_vf_qs = pf->num_vf_qps;
- dev_dbg(&pf->pdev->dev,
- "VF-id %d has %d queues configured\n",
- vf->vf_id, vf->num_vf_qs);
- ice_cleanup_and_realloc_vf(vf);
- }
-
- ice_flush(hw);
- clear_bit(__ICE_VF_DIS, pf->state);
return true;
}
@@ -1114,27 +1152,31 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
u32 reg;
int i;
- /* If the VFs have been disabled, this means something else is
- * resetting the VF, so we shouldn't continue.
+ /* If the PF has been disabled, there is no need resetting VF until
+ * PF is active again.
*/
- if (test_and_set_bit(__ICE_VF_DIS, pf->state))
+ if (test_bit(__ICE_VF_DIS, pf->state))
+ return false;
+
+ /* If the VF has been disabled, this means something else is
+ * resetting the VF, so we shouldn't continue. Otherwise, set
+ * disable VF state bit for actual reset, and continue.
+ */
+ if (test_and_set_bit(ICE_VF_STATE_DIS, vf->vf_states))
return false;
ice_trigger_vf_reset(vf, is_vflr);
vsi = pf->vsi[vf->lan_vsi_idx];
- if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
- ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
- ice_vsi_stop_rx_rings(vsi);
- clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
- } else {
+ if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
+ ice_dis_vf_qs(vf);
+ else
/* Call Disable LAN Tx queue AQ call even when queues are not
- * enabled. This is needed for successful completiom of VFR
+ * enabled. This is needed for successful completion of VFR
*/
ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
NULL, ICE_VF_RESET, vf->vf_id, NULL);
- }
hw = &pf->hw;
/* poll VPGEN_VFRSTAT reg to make sure
@@ -1145,12 +1187,14 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
* poll the status register to make sure that the reset
* completed successfully.
*/
- usleep_range(10000, 20000);
reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
if (reg & VPGEN_VFRSTAT_VFRD_M) {
rsd = true;
break;
}
+
+ /* only sleep if the reset is not done */
+ usleep_range(10, 20);
}
/* Display a warning if VF didn't manage to reset in time, but need to
@@ -1160,8 +1204,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
dev_warn(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
vf->vf_id);
- usleep_range(10000, 20000);
-
/* disable promiscuous modes in case they were enabled
* ignore any error if disabling process failed
*/
@@ -1183,7 +1225,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
ice_cleanup_and_realloc_vf(vf);
ice_flush(hw);
- clear_bit(__ICE_VF_DIS, pf->state);
return true;
}
@@ -1257,7 +1298,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
-
+ set_bit(__ICE_OICR_INTR_DIS, pf->state);
ice_flush(hw);
ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
@@ -1283,19 +1324,16 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
vfs[i].spoofchk = true;
-
- /* Set this state so that PF driver does VF vector assignment */
- set_bit(ICE_VF_STATE_CFG_INTR, vfs[i].vf_states);
}
pf->num_alloc_vfs = num_alloc_vfs;
- /* VF resources get allocated during reset */
- if (!ice_reset_all_vfs(pf, true)) {
+ /* VF resources get allocated with initialization */
+ if (!ice_config_res_vfs(pf)) {
ret = -EIO;
goto err_unroll_sriov;
}
- goto err_unroll_intr;
+ return ret;
err_unroll_sriov:
pf->vf = NULL;
@@ -1307,6 +1345,7 @@ err_pci_disable_sriov:
err_unroll_intr:
/* rearm interrupts here */
ice_irq_dynamic_ena(hw, NULL, NULL);
+ clear_bit(__ICE_OICR_INTR_DIS, pf->state);
return ret;
}
@@ -1490,10 +1529,10 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
- if (aq_ret) {
+ if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
dev_info(&pf->pdev->dev,
- "Unable to send the message to VF %d aq_err %d\n",
- vf->vf_id, pf->hw.mailboxq.sq_last_status);
+ "Unable to send the message to VF %d ret %d aq_err %d\n",
+ vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
return -EIO;
}
@@ -1688,6 +1727,21 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
}
/**
+ * ice_vc_isvalid_ring_len
+ * @ring_len: length of ring
+ *
+ * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
+ * or zero
+ */
+static bool ice_vc_isvalid_ring_len(u16 ring_len)
+{
+ return ring_len == 0 ||
+ (ring_len >= ICE_MIN_NUM_DESC &&
+ ring_len <= ICE_MAX_NUM_DESC &&
+ !(ring_len % ICE_REQ_DESC_MULTIPLE));
+}
+
+/**
* ice_vc_config_rss_key
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1712,18 +1766,18 @@ static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (!vsi) {
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1759,18 +1813,18 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (!vsi) {
+ if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1839,6 +1893,8 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_queue_select *)msg;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1855,6 +1911,12 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
+ vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1865,12 +1927,48 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
* Tx queue group list was configured and the context bits were
* programmed using ice_vsi_cfg_txqs
*/
- if (ice_vsi_start_rx_rings(vsi))
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ q_map = vqs->rx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to enable Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ set_bit(vf_q_id, vf->rxq_ena);
+ vf->num_qs_ena++;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ q_map = vqs->tx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->txq_ena))
+ continue;
+
+ set_bit(vf_q_id, vf->txq_ena);
+ vf->num_qs_ena++;
+ }
/* Set flag to indicate that queues are enabled */
if (v_ret == VIRTCHNL_STATUS_SUCCESS)
- set_bit(ICE_VF_STATE_ENA, vf->vf_states);
+ set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
error_param:
/* send the response to the VF */
@@ -1893,9 +1991,11 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_queue_select *)msg;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+ !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1910,29 +2010,81 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
+ vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- if (ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to stop tx rings on VSI %d\n",
- vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ if (vqs->tx_queues) {
+ q_map = vqs->tx_queues;
+
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ struct ice_ring *ring = vsi->tx_rings[vf_q_id];
+ struct ice_txq_meta txq_meta = { 0 };
+
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if not enabled */
+ if (!test_bit(vf_q_id, vf->txq_ena))
+ continue;
+
+ ice_fill_txq_meta(vsi, ring, &txq_meta);
+
+ if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
+ ring, &txq_meta)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to stop Tx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(vf_q_id, vf->txq_ena);
+ vf->num_qs_ena--;
+ }
}
- if (ice_vsi_stop_rx_rings(vsi)) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to stop rx rings on VSI %d\n",
- vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ if (vqs->rx_queues) {
+ q_map = vqs->rx_queues;
+
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if not enabled */
+ if (!test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to stop Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(vf_q_id, vf->rxq_ena);
+ vf->num_qs_ena--;
+ }
}
/* Clear enabled queues flag */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS)
- clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena)
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
error_param:
/* send the response to the VF */
@@ -1962,12 +2114,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
irqmap_info = (struct virtchnl_irq_map_info *)msg;
num_q_vectors_mapped = irqmap_info->num_vectors;
- vsi = pf->vsi[vf->lan_vsi_idx];
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
/* Check to make sure number of VF vectors mapped is not greater than
* number of VF vectors originally allocated, and check that
* there is actually at least a single VF queue vector mapped
@@ -1979,6 +2125,12 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
for (i = 0; i < num_q_vectors_mapped; i++) {
struct ice_q_vector *q_vector;
@@ -2056,6 +2208,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
+ u16 num_rxq = 0, num_txq = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int i;
@@ -2071,13 +2224,16 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
vsi = pf->vsi[vf->lan_vsi_idx];
- if (!vsi)
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
+ }
- if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) {
+ if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
+ qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
dev_err(&pf->pdev->dev,
"VF-%d requesting more than supported number of queues: %d\n",
- vf->vf_id, qci->num_queue_pairs);
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -2087,37 +2243,52 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
if (qpi->txq.vsi_id != qci->vsi_id ||
qpi->rxq.vsi_id != qci->vsi_id ||
qpi->rxq.queue_id != qpi->txq.queue_id ||
+ qpi->txq.headwb_enabled ||
+ !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
+ !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
!ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* copy Tx queue info from VF into VSI */
- vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
- vsi->tx_rings[i]->count = qpi->txq.ring_len;
- /* copy Rx queue info from VF into VSI */
- vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
- vsi->rx_rings[i]->count = qpi->rxq.ring_len;
- if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
+ if (qpi->txq.ring_len > 0) {
+ num_txq++;
+ vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
+ vsi->tx_rings[i]->count = qpi->txq.ring_len;
}
- vsi->rx_buf_len = qpi->rxq.databuffer_size;
- if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
- qpi->rxq.max_pkt_size < 64) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
+
+ /* copy Rx queue info from VF into VSI */
+ if (qpi->rxq.ring_len > 0) {
+ num_rxq++;
+ vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
+ vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+
+ if (qpi->rxq.databuffer_size != 0 &&
+ (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
+ qpi->rxq.databuffer_size < 1024)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ vsi->rx_buf_len = qpi->rxq.databuffer_size;
+ vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
+ if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
+ qpi->rxq.max_pkt_size < 64) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
}
+
vsi->max_frame = qpi->rxq.max_pkt_size;
}
/* VF can request to configure less than allocated queues
* or default allocated queues. So update the VSI with new number
*/
- vsi->num_txq = qci->num_queue_pairs;
- vsi->num_rxq = qci->num_queue_pairs;
+ vsi->num_txq = num_txq;
+ vsi->num_rxq = num_rxq;
/* All queues of VF VSI are in TC 0 */
- vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs;
- vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs;
+ vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
+ vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
@@ -2171,7 +2342,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
(struct virtchnl_ether_addr_list *)msg;
struct ice_pf *pf = vf->pf;
enum virtchnl_ops vc_op;
- LIST_HEAD(mac_list);
+ enum ice_status status;
struct ice_vsi *vsi;
int mac_count = 0;
int i;
@@ -2245,33 +2416,32 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
goto handle_mac_exit;
}
- /* get here if maddr is multicast or if VF can change MAC */
- if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ /* program the updated filter list */
+ status = ice_vsi_cfg_mac_fltr(vsi, maddr, set);
+ if (status == ICE_ERR_DOES_NOT_EXIST ||
+ status == ICE_ERR_ALREADY_EXISTS) {
+ dev_info(&pf->pdev->dev,
+ "can't %s MAC filters %pM for VF %d, error %d\n",
+ set ? "add" : "remove", maddr, vf->vf_id,
+ status);
+ } else if (status) {
+ dev_err(&pf->pdev->dev,
+ "can't %s MAC filters for VF %d, error %d\n",
+ set ? "add" : "remove", vf->vf_id, status);
+ v_ret = ice_err_to_virt_err(status);
goto handle_mac_exit;
}
+
mac_count++;
}
- /* program the updated filter list */
+ /* Track number of MAC filters programmed for the VF VSI */
if (set)
- v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list));
+ vf->num_mac += mac_count;
else
- v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list));
-
- if (v_ret) {
- dev_err(&pf->pdev->dev,
- "can't update MAC filters for VF %d, error %d\n",
- vf->vf_id, v_ret);
- } else {
- if (set)
- vf->num_mac += mac_count;
- else
- vf->num_mac -= mac_count;
- }
+ vf->num_mac -= mac_count;
handle_mac_exit:
- ice_free_fltr_list(&pf->pdev->dev, &mac_list);
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
}
@@ -2315,11 +2485,11 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg;
- int req_queues = vfres->num_queue_pairs;
+ u16 req_queues = vfres->num_queue_pairs;
struct ice_pf *pf = vf->pf;
- int max_allowed_vf_queues;
- int tx_rx_queue_left;
- int cur_queues;
+ u16 max_allowed_vf_queues;
+ u16 tx_rx_queue_left;
+ u16 cur_queues;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -2327,29 +2497,30 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
}
cur_queues = vf->num_vf_qs;
- tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+ tx_rx_queue_left = min_t(u16, pf->q_left_tx, pf->q_left_rx);
max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
- if (req_queues <= 0) {
+ if (!req_queues) {
dev_err(&pf->pdev->dev,
- "VF %d tried to request %d queues. Ignoring.\n",
- vf->vf_id, req_queues);
+ "VF %d tried to request 0 queues. Ignoring.\n",
+ vf->vf_id);
} else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
"VF %d tried to request more than %d queues.\n",
vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
- } else if (req_queues - cur_queues > tx_rx_queue_left) {
+ } else if (req_queues > cur_queues &&
+ req_queues - cur_queues > tx_rx_queue_left) {
dev_warn(&pf->pdev->dev,
- "VF %d requested %d more queues, but only %d left.\n",
+ "VF %d requested %u more queues, but only %u left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
- vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues,
+ vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
ICE_MAX_BASE_QS_PER_VF);
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
ice_vc_dis_vf(vf);
dev_info(&pf->pdev->dev,
- "VF %d granted request of %d queues.\n",
+ "VF %d granted request of %u queues.\n",
vf->vf_id, req_queues);
return 0;
}
@@ -2731,20 +2902,6 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
err = -EPERM;
else
err = -EINVAL;
- goto error_handler;
- }
-
- /* Perform additional checks specific to RSS and Virtchnl */
- if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
- struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
-
- if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE)
- err = -EINVAL;
- } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
- struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
-
- if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE)
- err = -EINVAL;
}
error_handler:
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index c3ca522c245a..0d9880c8bba3 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -15,26 +15,38 @@
#define ICE_MAX_MACADDR_PER_VF 12
/* Malicious Driver Detection */
-#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED 3
#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
+#define ICE_MDD_EVENTS_THRESHOLD 30
/* Static VF transaction/status register def */
#define VF_DEVICE_STATUS 0xAA
#define VF_TRANS_PENDING_M 0x20
+/* wait defines for polling PF_PCI_CIAD register status */
+#define ICE_PCI_CIAD_WAIT_COUNT 100
+#define ICE_PCI_CIAD_WAIT_DELAY_US 1
+
+/* VF resources default values and limitation */
+#define ICE_MAX_VF_COUNT 256
+#define ICE_MAX_QS_PER_VF 256
+#define ICE_MIN_QS_PER_VF 1
+#define ICE_DFLT_QS_PER_VF 4
+#define ICE_NONQ_VECS_VF 1
+#define ICE_MAX_SCATTER_QS_PER_VF 16
+#define ICE_MAX_BASE_QS_PER_VF 16
+#define ICE_MAX_INTR_PER_VF 65
+#define ICE_MAX_POLICY_INTR_PER_VF 33
+#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
+#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
+
/* Specific VF states */
enum ice_vf_states {
- ICE_VF_STATE_INIT = 0,
- ICE_VF_STATE_ACTIVE,
- ICE_VF_STATE_ENA,
+ ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
+ ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */
+ ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */
ICE_VF_STATE_DIS,
ICE_VF_STATE_MC_PROMISC,
ICE_VF_STATE_UC_PROMISC,
- /* state to indicate if PF needs to do vector assignment for VF.
- * This needs to be set during first time VF initialization or later
- * when VF asks for more Vectors through virtchnl OP.
- */
- ICE_VF_STATE_CFG_INTR,
ICE_VF_STATES_NBITS
};
@@ -50,11 +62,14 @@ struct ice_vf {
s16 vf_id; /* VF ID in the PF space */
u16 lan_vsi_idx; /* index into PF struct */
- int first_vector_idx; /* first vector index of this VF */
+ /* first vector index of this VF in the PF space */
+ int first_vector_idx;
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
struct virtchnl_ether_addr dflt_lan_addr;
+ DECLARE_BITMAP(txq_ena, ICE_MAX_BASE_QS_PER_VF);
+ DECLARE_BITMAP(rxq_ena, ICE_MAX_BASE_QS_PER_VF);
u16 port_vlan_id;
u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
u8 trusted:1;
@@ -77,6 +92,7 @@ struct ice_vf {
u16 num_mac;
u16 num_vlan;
u16 num_vf_qs; /* num of queue configured per VF */
+ u16 num_qs_ena; /* total num of Tx/Rx queue enabled */
};
#ifdef CONFIG_PCI_IOV
@@ -103,12 +119,15 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
+
+void ice_set_vf_state_qs_dis(struct ice_vf *vf);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
#define ice_vc_process_vf_msg(pf, event) do {} while (0)
#define ice_vc_notify_link_state(pf) do {} while (0)
#define ice_vc_notify_reset(pf) do {} while (0)
+#define ice_set_vf_state_qs_dis(vf) do {} while (0)
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b4df3e319467..105b0624081a 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -4731,8 +4731,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
- if (rx_ring->skb)
- dev_kfree_skb(rx_ring->skb);
+ dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
/* Free all the Rx ring sk_buffs */
@@ -5918,7 +5917,7 @@ static int igb_tx_map(struct igb_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t dma;
unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
@@ -6074,7 +6073,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(
+ &skb_shinfo(skb)->frags[f]));
if (igb_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
@@ -8879,8 +8879,7 @@ static int __maybe_unused igb_resume(struct device *dev)
static int __maybe_unused igb_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct igb_adapter *adapter = netdev_priv(netdev);
if (!igb_has_link(adapter))
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 34cd30d7162f..0f2b68f4bb0f 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2174,7 +2174,7 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
goto dma_error;
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
count++;
i++;
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 59258d791106..db289bcce21d 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -40,7 +40,7 @@ static s32 igc_reset_hw_base(struct igc_hw *hw)
ctrl = rd32(IGC_CTRL);
hw_dbg("Issuing a global reset to MAC\n");
- wr32(IGC_CTRL, ctrl | IGC_CTRL_RST);
+ wr32(IGC_CTRL, ctrl | IGC_CTRL_DEV_RST);
ret_val = igc_get_auto_rd_done(hw);
if (ret_val) {
@@ -209,6 +209,9 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
switch (hw->device_id) {
case IGC_DEV_ID_I225_LM:
case IGC_DEV_ID_I225_V:
+ case IGC_DEV_ID_I225_I:
+ case IGC_DEV_ID_I220_V:
+ case IGC_DEV_ID_I225_K:
mac->type = igc_i225;
break;
default:
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index fc0ccfe38a20..11b99acf4abe 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -54,7 +54,7 @@
#define IGC_ERR_SWFW_SYNC 13
/* Device Control */
-#define IGC_CTRL_RST 0x04000000 /* Global reset */
+#define IGC_CTRL_DEV_RST 0x20000000 /* Device reset */
#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */
#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 1039a224ac80..abb2d72911ff 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -18,6 +18,9 @@
#define IGC_DEV_ID_I225_LM 0x15F2
#define IGC_DEV_ID_I225_V 0x15F3
+#define IGC_DEV_ID_I225_I 0x15F8
+#define IGC_DEV_ID_I220_V 0x15F7
+#define IGC_DEV_ID_I225_K 0x3100
#define IGC_FUNC_0 0
@@ -151,16 +154,10 @@ struct igc_phy_info {
u16 autoneg_advertised;
u16 autoneg_mask;
- u16 cable_length;
- u16 max_cable_length;
- u16 min_cable_length;
- u16 pair_length[4];
u8 mdix;
- bool disable_polarity_correction;
bool is_mdix;
- bool polarity_correction;
bool reset_disable;
bool speed_downgraded;
bool autoneg_wait_to_complete;
@@ -190,12 +187,7 @@ struct igc_fc_info {
};
struct igc_dev_spec_base {
- bool global_device_reset;
- bool eee_disable;
bool clear_semaphore_once;
- bool module_plugged;
- u8 media_port;
- bool mas_capable;
};
struct igc_hw {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index aa9323e55406..251552855c40 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -36,6 +36,9 @@ static const struct igc_info *igc_info_tbl[] = {
static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
/* required last entry */
{0, }
};
@@ -349,8 +352,7 @@ static void igc_clean_rx_ring(struct igc_ring *rx_ring)
{
u16 i = rx_ring->next_to_clean;
- if (rx_ring->skb)
- dev_kfree_skb(rx_ring->skb);
+ dev_kfree_skb(rx_ring->skb);
rx_ring->skb = NULL;
/* Free all the Rx ring sk_buffs */
@@ -861,7 +863,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
struct igc_tx_buffer *tx_buffer;
union igc_adv_tx_desc *tx_desc;
u32 tx_flags = first->tx_flags;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
u16 i = tx_ring->next_to_use;
unsigned int data_len, size;
dma_addr_t dma;
@@ -1015,7 +1017,8 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(
+ &skb_shinfo(skb)->frags[f]));
if (igc_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index e5ac2d3fd816..0940a0da16f2 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1331,9 +1331,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
}
for (f = 0; f < nr_frags; f++) {
- const struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[f];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
len = skb_frag_size(frag);
offset = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 50dfb02fa34c..171cdc552961 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -190,22 +190,12 @@ static const struct file_operations ixgbe_dbg_netdev_ops_fops = {
void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
{
const char *name = pci_name(adapter->pdev);
- struct dentry *pfile;
+
adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root);
- if (adapter->ixgbe_dbg_adapter) {
- pfile = debugfs_create_file("reg_ops", 0600,
- adapter->ixgbe_dbg_adapter, adapter,
- &ixgbe_dbg_reg_ops_fops);
- if (!pfile)
- e_dev_err("debugfs reg_ops for %s failed\n", name);
- pfile = debugfs_create_file("netdev_ops", 0600,
- adapter->ixgbe_dbg_adapter, adapter,
- &ixgbe_dbg_netdev_ops_fops);
- if (!pfile)
- e_dev_err("debugfs netdev_ops for %s failed\n", name);
- } else {
- e_dev_err("debugfs entry for %s failed\n", name);
- }
+ debugfs_create_file("reg_ops", 0600, adapter->ixgbe_dbg_adapter,
+ adapter, &ixgbe_dbg_reg_ops_fops);
+ debugfs_create_file("netdev_ops", 0600, adapter->ixgbe_dbg_adapter,
+ adapter, &ixgbe_dbg_netdev_ops_fops);
}
/**
@@ -224,8 +214,6 @@ void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
void ixgbe_dbg_init(void)
{
ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL);
- if (ixgbe_dbg_root == NULL)
- pr_err("init of debugfs failed\n");
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7882148abb43..17b7ae9f46ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1785,7 +1785,7 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
struct sk_buff *skb)
{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
unsigned char *va;
unsigned int pull_len;
@@ -1807,7 +1807,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
/* update all of the pointers */
skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
+ skb_frag_off_add(frag, pull_len);
skb->data_len -= pull_len;
skb->tail += pull_len;
}
@@ -1840,11 +1840,11 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
skb_headlen(skb),
DMA_FROM_DEVICE);
} else {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
dma_sync_single_range_for_cpu(rx_ring->dev,
IXGBE_CB(skb)->dma,
- frag->page_offset,
+ skb_frag_off(frag),
skb_frag_size(frag),
DMA_FROM_DEVICE);
}
@@ -8183,7 +8183,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t dma;
unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
@@ -8602,7 +8602,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ count += TXD_USE_COUNT(skb_frag_size(
+ &skb_shinfo(skb)->frags[f]));
if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring->tx_stats.tx_busy++;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d2b41f9f87f8..8c011d4ce7a9 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3949,7 +3949,7 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct ixgbevf_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
dma_addr_t dma;
unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
@@ -4134,8 +4134,11 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
* otherwise try next time
*/
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+
+ count += TXD_USE_COUNT(skb_frag_size(frag));
+ }
#else
count += skb_shinfo(skb)->nr_frags;
#endif
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 0b668357db4d..6d52cf5ce20e 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2030,23 +2030,22 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
int i, nr_frags = skb_shinfo(skb)->nr_frags;
int mask = jme->tx_ring_mask;
- const struct skb_frag_struct *frag;
u32 len;
int ret = 0;
for (i = 0 ; i < nr_frags ; ++i) {
- frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
ctxdesc = txdesc + ((idx + i + 2) & (mask));
ctxbi = txbi + ((idx + i + 2) & (mask));
ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
- skb_frag_page(frag),
- frag->page_offset, skb_frag_size(frag), hidma);
+ skb_frag_page(frag), skb_frag_off(frag),
+ skb_frag_size(frag), hidma);
if (ret) {
jme_drop_tx_map(jme, idx, i);
goto out;
}
-
}
len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
@@ -3193,8 +3192,7 @@ jme_shutdown(struct pci_dev *pdev)
static int
jme_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct jme_adapter *jme = netdev_priv(netdev);
if (!netif_running(netdev))
@@ -3236,8 +3234,7 @@ jme_suspend(struct device *dev)
static int
jme_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *netdev = pci_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct jme_adapter *jme = netdev_priv(netdev);
if (!netif_running(netdev))
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index cda641ef89af..900affbdcc0e 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -458,17 +458,11 @@ static int xrx200_probe(struct platform_device *pdev)
}
priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx");
- if (priv->chan_rx.dma.irq < 0) {
- dev_err(dev, "failed to get RX IRQ, %i\n",
- priv->chan_rx.dma.irq);
+ if (priv->chan_rx.dma.irq < 0)
return -ENOENT;
- }
priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx");
- if (priv->chan_tx.dma.irq < 0) {
- dev_err(dev, "failed to get TX IRQ, %i\n",
- priv->chan_tx.dma.irq);
+ if (priv->chan_tx.dma.irq < 0)
return -ENOENT;
- }
/* get the clock */
priv->clk = devm_clk_get(dev, NULL);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 88ea5ac83c93..82ea55ae5053 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -659,7 +659,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
- if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
+ if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7)
return 1;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 895bfed26a8a..e49820675c8c 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2350,10 +2350,10 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- void *addr = page_address(frag->page.p) + frag->page_offset;
+ void *addr = skb_frag_address(frag);
tx_desc = mvneta_txq_next_desc_get(txq);
- tx_desc->data_size = frag->size;
+ tx_desc->data_size = skb_frag_size(frag);
tx_desc->buf_phys_addr =
dma_map_single(pp->dev->dev.parent, addr,
@@ -4469,7 +4469,6 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
/* Device initialization routine */
static int mvneta_probe(struct platform_device *pdev)
{
- struct resource *res;
struct device_node *dn = pdev->dev.of_node;
struct device_node *bm_node;
struct mvneta_port *pp;
@@ -4553,8 +4552,7 @@ static int mvneta_probe(struct platform_device *pdev)
if (!IS_ERR(pp->clk_bus))
clk_prepare_enable(pp->clk_bus);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pp->base = devm_ioremap_resource(&pdev->dev, res);
+ pp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pp->base)) {
err = PTR_ERR(pp->base);
goto err_clk;
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 82ee2bcca6fd..46c942ef2287 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -411,15 +411,13 @@ static int mvneta_bm_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct mvneta_bm *priv;
- struct resource *res;
int err;
priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->reg_base))
return PTR_ERR(priv->reg_base);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 4d9564ba68f6..ee3bab508ee8 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -829,9 +829,8 @@ struct mvpp2_pcpu_stats {
/* Per-CPU port control */
struct mvpp2_port_pcpu {
struct hrtimer tx_done_timer;
+ struct net_device *dev;
bool timer_scheduled;
- /* Tasklet for egress finalization */
- struct tasklet_struct tx_done_tasklet;
};
struct mvpp2_queue_vector {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 274fb07362cb..4a3baa7e0142 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -452,8 +452,6 @@ static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
struct dentry *port_dir;
port_dir = debugfs_create_dir(port->dev->name, parent);
- if (IS_ERR(port_dir))
- return PTR_ERR(port_dir);
port_entry = &port->priv->dbgfs_entries->port_flow_entries[port->id];
@@ -480,8 +478,6 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
sprintf(flow_entry_name, "%02d", flow);
flow_entry_dir = debugfs_create_dir(flow_entry_name, parent);
- if (!flow_entry_dir)
- return -ENOMEM;
entry = &priv->dbgfs_entries->flow_entries[flow];
@@ -514,8 +510,6 @@ static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv)
int i, ret;
flow_dir = debugfs_create_dir("flows", parent);
- if (!flow_dir)
- return -ENOMEM;
for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i);
@@ -539,8 +533,6 @@ static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
sprintf(prs_entry_name, "%03d", tid);
prs_entry_dir = debugfs_create_dir(prs_entry_name, parent);
- if (!prs_entry_dir)
- return -ENOMEM;
entry = &priv->dbgfs_entries->prs_entries[tid];
@@ -578,8 +570,6 @@ static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
int i, ret;
prs_dir = debugfs_create_dir("parser", parent);
- if (!prs_dir)
- return -ENOMEM;
for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
ret = mvpp2_dbgfs_prs_entry_init(prs_dir, priv, i);
@@ -688,8 +678,6 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent,
struct dentry *port_dir;
port_dir = debugfs_create_dir(port->dev->name, parent);
- if (IS_ERR(port_dir))
- return PTR_ERR(port_dir);
debugfs_create_file("parser_entries", 0444, port_dir, port,
&mvpp2_dbgfs_port_parser_fops);
@@ -716,15 +704,10 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
int ret, i;
mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
- if (!mvpp2_root) {
+ if (!mvpp2_root)
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
- if (IS_ERR(mvpp2_root))
- return;
- }
mvpp2_dir = debugfs_create_dir(name, mvpp2_root);
- if (IS_ERR(mvpp2_dir))
- return;
priv->dbgfs_dir = mvpp2_dir;
priv->dbgfs_entries = kzalloc(sizeof(*priv->dbgfs_entries), GFP_KERNEL);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index ccdd47f3b8fb..12e799e99803 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2651,31 +2651,21 @@ handled:
return IRQ_HANDLED;
}
-static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
-{
- ktime_t interval;
-
- if (!port_pcpu->timer_scheduled) {
- port_pcpu->timer_scheduled = true;
- interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
- hrtimer_start(&port_pcpu->tx_done_timer, interval,
- HRTIMER_MODE_REL_PINNED);
- }
-}
-
-static void mvpp2_tx_proc_cb(unsigned long data)
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
{
- struct net_device *dev = (struct net_device *)data;
- struct mvpp2_port *port = netdev_priv(dev);
+ struct net_device *dev;
+ struct mvpp2_port *port;
struct mvpp2_port_pcpu *port_pcpu;
unsigned int tx_todo, cause;
- port_pcpu = per_cpu_ptr(port->pcpu,
- mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
+ port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
+ dev = port_pcpu->dev;
if (!netif_running(dev))
- return;
+ return HRTIMER_NORESTART;
+
port_pcpu->timer_scheduled = false;
+ port = netdev_priv(dev);
/* Process all the Tx queues */
cause = (1 << port->ntxqs) - 1;
@@ -2683,18 +2673,13 @@ static void mvpp2_tx_proc_cb(unsigned long data)
mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
/* Set the timer in case not all the packets were processed */
- if (tx_todo)
- mvpp2_timer_set(port_pcpu);
-}
-
-static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
-{
- struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
- struct mvpp2_port_pcpu,
- tx_done_timer);
-
- tasklet_schedule(&port_pcpu->tx_done_tasklet);
+ if (tx_todo && !port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ hrtimer_forward_now(&port_pcpu->tx_done_timer,
+ MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+ return HRTIMER_RESTART;
+ }
return HRTIMER_NORESTART;
}
@@ -2923,14 +2908,15 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- void *addr = page_address(frag->page.p) + frag->page_offset;
+ void *addr = skb_frag_address(frag);
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
- mvpp2_txdesc_size_set(port, tx_desc, frag->size);
+ mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
- frag->size, DMA_TO_DEVICE);
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
mvpp2_txq_desc_put(txq);
goto cleanup;
@@ -3181,7 +3167,12 @@ out:
txq_pcpu->count > 0) {
struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
- mvpp2_timer_set(port_pcpu);
+ if (!port_pcpu->timer_scheduled) {
+ port_pcpu->timer_scheduled = true;
+ hrtimer_start(&port_pcpu->tx_done_timer,
+ MVPP2_TXDONE_HRTIMER_PERIOD_NS,
+ HRTIMER_MODE_REL_PINNED_SOFT);
+ }
}
if (test_bit(thread, &port->priv->lock_map))
@@ -3618,7 +3609,6 @@ static int mvpp2_stop(struct net_device *dev)
hrtimer_cancel(&port_pcpu->tx_done_timer);
port_pcpu->timer_scheduled = false;
- tasklet_kill(&port_pcpu->tx_done_tasklet);
}
}
mvpp2_cleanup_rxqs(port);
@@ -5010,7 +5000,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
struct device_node *port_node = to_of_node(port_fwnode);
netdev_features_t features;
struct net_device *dev;
- struct resource *res;
struct phylink *phylink;
char *mac_from = "";
unsigned int ntxqs, nrxqs, thread;
@@ -5114,8 +5103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->comphy = comphy;
if (priv->hw_version == MVPP21) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
- port->base = devm_ioremap_resource(&pdev->dev, res);
+ port->base = devm_platform_ioremap_resource(pdev, 2 + id);
if (IS_ERR(port->base)) {
err = PTR_ERR(port->base);
goto err_free_irq;
@@ -5184,13 +5172,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
+ HRTIMER_MODE_REL_PINNED_SOFT);
port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
port_pcpu->timer_scheduled = false;
-
- tasklet_init(&port_pcpu->tx_done_tasklet,
- mvpp2_tx_proc_cb,
- (unsigned long)dev);
+ port_pcpu->dev = dev;
}
}
@@ -5544,14 +5529,12 @@ static int mvpp2_probe(struct platform_device *pdev)
if (priv->hw_version == MVPP21)
queue_mode = MVPP2_QDIST_SINGLE_MODE;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
if (priv->hw_version == MVPP21) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
+ priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->lms_base))
return PTR_ERR(priv->lms_base);
} else {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 3aa998797bc1..51b77c2de400 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1425,8 +1425,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->dev = dev;
pep->clk = clk;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pep->base = devm_ioremap_resource(&pdev->dev, res);
+ pep->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pep->base)) {
err = -ENOMEM;
goto err_netdev;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 9ac854c2b371..0a2ec387a482 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3731,7 +3731,6 @@ static int skge_device_event(struct notifier_block *unused,
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct skge_port *skge;
- struct dentry *d;
if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug)
goto done;
@@ -3739,33 +3738,20 @@ static int skge_device_event(struct notifier_block *unused,
skge = netdev_priv(dev);
switch (event) {
case NETDEV_CHANGENAME:
- if (skge->debugfs) {
- d = debugfs_rename(skge_debug, skge->debugfs,
- skge_debug, dev->name);
- if (d)
- skge->debugfs = d;
- else {
- netdev_info(dev, "rename failed\n");
- debugfs_remove(skge->debugfs);
- }
- }
+ if (skge->debugfs)
+ skge->debugfs = debugfs_rename(skge_debug,
+ skge->debugfs,
+ skge_debug, dev->name);
break;
case NETDEV_GOING_DOWN:
- if (skge->debugfs) {
- debugfs_remove(skge->debugfs);
- skge->debugfs = NULL;
- }
+ debugfs_remove(skge->debugfs);
+ skge->debugfs = NULL;
break;
case NETDEV_UP:
- d = debugfs_create_file(dev->name, 0444,
- skge_debug, dev,
- &skge_debug_fops);
- if (!d || IS_ERR(d))
- netdev_info(dev, "debugfs create failed\n");
- else
- skge->debugfs = d;
+ skge->debugfs = debugfs_create_file(dev->name, 0444, skge_debug,
+ dev, &skge_debug_fops);
break;
}
@@ -3780,15 +3766,8 @@ static struct notifier_block skge_notifier = {
static __init void skge_debug_init(void)
{
- struct dentry *ent;
+ skge_debug = debugfs_create_dir("skge", NULL);
- ent = debugfs_create_dir("skge", NULL);
- if (!ent || IS_ERR(ent)) {
- pr_info("debugfs create directory failed\n");
- return;
- }
-
- skge_debug = ent;
register_netdevice_notifier(&skge_notifier);
}
@@ -4078,8 +4057,7 @@ static void skge_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int skge_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct skge_hw *hw = pci_get_drvdata(pdev);
+ struct skge_hw *hw = dev_get_drvdata(dev);
int i;
if (!hw)
@@ -4103,8 +4081,7 @@ static int skge_suspend(struct device *dev)
static int skge_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct skge_hw *hw = pci_get_drvdata(pdev);
+ struct skge_hw *hw = dev_get_drvdata(dev);
int i, err;
if (!hw)
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index e0363870f3a5..5f56ee83e3b1 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5174,8 +5174,7 @@ static void sky2_remove(struct pci_dev *pdev)
static int sky2_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct sky2_hw *hw = pci_get_drvdata(pdev);
+ struct sky2_hw *hw = dev_get_drvdata(dev);
int i;
if (!hw)
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 1f7fff81f24d..4968352ba188 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_VENDOR_MEDIATEK
bool "MediaTek ethernet driver"
- depends on ARCH_MEDIATEK || SOC_MT7621
+ depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620
---help---
If you have a Mediatek SoC with ethernet, say Y.
@@ -9,7 +9,7 @@ if NET_VENDOR_MEDIATEK
config NET_MEDIATEK_SOC
tristate "MediaTek SoC Gigabit Ethernet support"
- select PHYLIB
+ select PHYLINK
---help---
This driver supports the gigabit ethernet MACs in the
MediaTek SoC family.
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index 7f05880cf9ef..ef11cf3d1ccc 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -239,10 +239,9 @@ out:
return err;
}
-static int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
+int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
- unsigned int val = 0;
- int sid, err, path;
+ int err, path;
path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
MTK_ETH_PATH_GMAC2_SGMII;
@@ -252,33 +251,10 @@ static int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
if (err)
return err;
- /* The path GMAC to SGMII will be enabled once the SGMIISYS is being
- * setup done.
- */
- regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
-
- regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
- SYSCFG0_SGMII_MASK, ~(u32)SYSCFG0_SGMII_MASK);
-
- /* Decide how GMAC and SGMIISYS be mapped */
- sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? 0 : mac_id;
-
- /* Setup SGMIISYS with the determined property */
- if (MTK_HAS_FLAGS(eth->sgmii->flags[sid], MTK_SGMII_PHYSPEED_AN))
- err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
- else
- err = mtk_sgmii_setup_mode_force(eth->sgmii, sid);
-
- if (err)
- return err;
-
- regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
- SYSCFG0_SGMII_MASK, val);
-
return 0;
}
-static int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
+int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
{
int err, path = 0;
@@ -296,7 +272,7 @@ static int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
return 0;
}
-static int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
+int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
{
int err, path;
@@ -311,42 +287,3 @@ static int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
return 0;
}
-int mtk_setup_hw_path(struct mtk_eth *eth, int mac_id, int phymode)
-{
- int err;
-
- switch (phymode) {
- case PHY_INTERFACE_MODE_TRGMII:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_REVMII:
- case PHY_INTERFACE_MODE_RMII:
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
- err = mtk_gmac_rgmii_path_setup(eth, mac_id);
- if (err)
- return err;
- }
- break;
- case PHY_INTERFACE_MODE_SGMII:
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
- err = mtk_gmac_sgmii_path_setup(eth, mac_id);
- if (err)
- return err;
- }
- break;
- case PHY_INTERFACE_MODE_GMII:
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
- err = mtk_gmac_gephy_path_setup(eth, mac_id);
- if (err)
- return err;
- }
- break;
- default:
- break;
- }
-
- return 0;
-}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index c39d7f4ab1d4..c61069340f4f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -18,6 +18,7 @@
#include <linux/tcp.h>
#include <linux/interrupt.h>
#include <linux/pinctrl/devinfo.h>
+#include <linux/phylink.h>
#include "mtk_eth_soc.h"
@@ -186,165 +187,339 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
mtk_w32(eth, val, TRGMII_TCK_CTRL);
}
-static void mtk_phy_link_adjust(struct net_device *dev)
+static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- struct mtk_mac *mac = netdev_priv(dev);
- u16 lcl_adv = 0, rmt_adv = 0;
- u8 flowctrl;
- u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
- MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
- MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
- MAC_MCR_BACKPR_EN;
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new, sid;
+ int val, ge_mode, err;
+
+ /* MT76x8 has no hardware settings between for the MAC */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
+ mac->interface != state->interface) {
+ /* Setup soc pin functions */
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ if (mac->id)
+ goto err_phy;
+ if (!MTK_HAS_CAPS(mac->hw->soc->caps,
+ MTK_GMAC1_TRGMII))
+ goto err_phy;
+ /* fall through */
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_RMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
+ err = mtk_gmac_rgmii_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
+ err = mtk_gmac_sgmii_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
+ err = mtk_gmac_gephy_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
+ break;
+ default:
+ goto err_phy;
+ }
- if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ /* Setup clock for 1st gmac */
+ if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(state->interface) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
+ if (MTK_HAS_CAPS(mac->hw->soc->caps,
+ MTK_TRGMII_MT7621_CLK)) {
+ if (mt7621_gmac0_rgmii_adjust(mac->hw,
+ state->interface))
+ goto err_phy;
+ } else {
+ if (state->interface !=
+ PHY_INTERFACE_MODE_TRGMII)
+ mtk_gmac0_rgmii_adjust(mac->hw,
+ state->speed);
+ }
+ }
+
+ ge_mode = 0;
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ ge_mode = 1;
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ ge_mode = 2;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (mac->id)
+ goto err_phy;
+ ge_mode = 3;
+ break;
+ default:
+ break;
+ }
+
+ /* put the gmac into the right mode */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+ val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
+ val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
+ regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
+
+ mac->interface = state->interface;
+ }
+
+ /* SGMII */
+ if (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(state->interface)) {
+ /* The path GMAC to SGMII will be enabled once the SGMIISYS is
+ * being setup done.
+ */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK,
+ ~(u32)SYSCFG0_SGMII_MASK);
+
+ /* Decide how GMAC and SGMIISYS be mapped */
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
+ 0 : mac->id;
+
+ /* Setup SGMIISYS with the determined property */
+ if (state->interface != PHY_INTERFACE_MODE_SGMII)
+ err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
+ state);
+ else if (phylink_autoneg_inband(mode))
+ err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
+
+ if (err)
+ goto init_err;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+ } else if (phylink_autoneg_inband(mode)) {
+ dev_err(eth->dev,
+ "In-band mode not supported in non SGMII mode!\n");
return;
+ }
- switch (dev->phydev->speed) {
+ /* Setup gmac */
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur;
+ mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
+ MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
+ MAC_MCR_FORCE_RX_FC);
+ mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
+
+ switch (state->speed) {
+ case SPEED_2500:
case SPEED_1000:
- mcr |= MAC_MCR_SPEED_1000;
+ mcr_new |= MAC_MCR_SPEED_1000;
break;
case SPEED_100:
- mcr |= MAC_MCR_SPEED_100;
+ mcr_new |= MAC_MCR_SPEED_100;
break;
}
-
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) && !mac->id) {
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
- if (mt7621_gmac0_rgmii_adjust(mac->hw,
- dev->phydev->interface))
- return;
- } else {
- if (!mac->trgmii)
- mtk_gmac0_rgmii_adjust(mac->hw,
- dev->phydev->speed);
- }
+ if (state->duplex == DUPLEX_FULL) {
+ mcr_new |= MAC_MCR_FORCE_DPX;
+ if (state->pause & MLO_PAUSE_TX)
+ mcr_new |= MAC_MCR_FORCE_TX_FC;
+ if (state->pause & MLO_PAUSE_RX)
+ mcr_new |= MAC_MCR_FORCE_RX_FC;
}
- if (dev->phydev->link)
- mcr |= MAC_MCR_FORCE_LINK;
+ /* Only update control register when needed! */
+ if (mcr_new != mcr_cur)
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
- if (dev->phydev->duplex) {
- mcr |= MAC_MCR_FORCE_DPX;
+ return;
- if (dev->phydev->pause)
- rmt_adv = LPA_PAUSE_CAP;
- if (dev->phydev->asym_pause)
- rmt_adv |= LPA_PAUSE_ASYM;
+err_phy:
+ dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
+ mac->id, phy_modes(state->interface));
+ return;
- lcl_adv = linkmode_adv_to_lcl_adv_t(dev->phydev->advertising);
- flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+init_err:
+ dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
+ mac->id, phy_modes(state->interface), err);
+}
- if (flowctrl & FLOW_CTRL_TX)
- mcr |= MAC_MCR_FORCE_TX_FC;
- if (flowctrl & FLOW_CTRL_RX)
- mcr |= MAC_MCR_FORCE_RX_FC;
+static int mtk_mac_link_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
- netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
- flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
- flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
+ state->link = (pmsr & MAC_MSR_LINK);
+ state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
+
+ switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
+ case 0:
+ state->speed = SPEED_10;
+ break;
+ case MAC_MSR_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case MAC_MSR_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
}
- mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+ state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
+ if (pmsr & MAC_MSR_RX_FC)
+ state->pause |= MLO_PAUSE_RX;
+ if (pmsr & MAC_MSR_TX_FC)
+ state->pause |= MLO_PAUSE_TX;
- if (!of_phy_is_fixed_link(mac->of_node))
- phy_print_status(dev->phydev);
+ return 1;
}
-static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
- struct device_node *phy_node)
+static void mtk_mac_an_restart(struct phylink_config *config)
{
- struct phy_device *phydev;
- int phy_mode;
-
- phy_mode = of_get_phy_mode(phy_node);
- if (phy_mode < 0) {
- dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
- return -EINVAL;
- }
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
- phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
- mtk_phy_link_adjust, 0, phy_mode);
- if (!phydev) {
- dev_err(eth->dev, "could not connect to PHY\n");
- return -ENODEV;
- }
+ mtk_sgmii_restart_an(mac->hw, mac->id);
+}
- dev_info(eth->dev,
- "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
- mac->id, phydev_name(phydev), phydev->phy_id,
- phydev->drv->name);
+static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- return 0;
+ mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
+ mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
-static int mtk_phy_connect(struct net_device *dev)
+static void mtk_mac_link_up(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phy)
{
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth;
- struct device_node *np;
- u32 val;
- int err;
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- eth = mac->hw;
- np = of_parse_phandle(mac->of_node, "phy-handle", 0);
- if (!np && of_phy_is_fixed_link(mac->of_node))
- if (!of_phy_register_fixed_link(mac->of_node))
- np = of_node_get(mac->of_node);
- if (!np)
- return -ENODEV;
+ mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
+ mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+}
- err = mtk_setup_hw_path(eth, mac->id, of_get_phy_mode(np));
- if (err)
- goto err_phy;
+static void mtk_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_GMII &&
+ !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
+ phy_interface_mode_is_rgmii(state->interface)) &&
+ !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
+ !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
+ !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
+ (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(state->interface)))) {
+ linkmode_zero(supported);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
- mac->ge_mode = 0;
- switch (of_get_phy_mode(np)) {
+ switch (state->interface) {
case PHY_INTERFACE_MODE_TRGMII:
- mac->trgmii = true;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_ID:
+ phylink_set(mask, 1000baseT_Full);
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 2500baseX_Full);
+ break;
+ case PHY_INTERFACE_MODE_GMII:
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phylink_set(mask, 1000baseT_Half);
+ /* fall through */
case PHY_INTERFACE_MODE_SGMII:
- break;
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ /* fall through */
case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_GMII:
- mac->ge_mode = 1;
- break;
- case PHY_INTERFACE_MODE_REVMII:
- mac->ge_mode = 2;
- break;
case PHY_INTERFACE_MODE_RMII:
- if (!mac->id)
- goto err_phy;
- mac->ge_mode = 3;
- break;
+ case PHY_INTERFACE_MODE_REVMII:
+ case PHY_INTERFACE_MODE_NA:
default:
- goto err_phy;
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ break;
}
- /* put the gmac into the right mode */
- regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
- val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
- val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
- regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
-
- /* couple phydev to net_device */
- if (mtk_phy_connect_node(eth, mac, np))
- goto err_phy;
+ if (state->interface == PHY_INTERFACE_MODE_NA) {
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ phylink_set(mask, 1000baseX_Full);
+ }
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseT_Half);
+ }
+ }
- of_node_put(np);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
- return 0;
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
-err_phy:
- if (of_phy_is_fixed_link(mac->of_node))
- of_phy_deregister_fixed_link(mac->of_node);
- of_node_put(np);
- dev_err(eth->dev, "%s: invalid phy\n", __func__);
- return -EINVAL;
+ /* We can only operate at 2500BaseX or 1000BaseX. If requested
+ * to advertise both, only report advertising at 2500BaseX.
+ */
+ phylink_helper_basex_speed(state);
}
+static const struct phylink_mac_ops mtk_phylink_ops = {
+ .validate = mtk_validate,
+ .mac_link_state = mtk_mac_link_state,
+ .mac_an_restart = mtk_mac_an_restart,
+ .mac_config = mtk_mac_config,
+ .mac_link_down = mtk_mac_link_down,
+ .mac_link_up = mtk_mac_link_up,
+};
+
static int mtk_mdio_init(struct mtk_eth *eth)
{
struct device_node *mii_np;
@@ -395,8 +570,8 @@ static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags);
- val = mtk_r32(eth, MTK_QDMA_INT_MASK);
- mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
+ val = mtk_r32(eth, eth->tx_int_mask_reg);
+ mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
}
@@ -406,8 +581,8 @@ static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags);
- val = mtk_r32(eth, MTK_QDMA_INT_MASK);
- mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
+ val = mtk_r32(eth, eth->tx_int_mask_reg);
+ mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
}
@@ -437,6 +612,7 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
{
int ret = eth_mac_addr(dev, p);
struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
const char *macaddr = dev->dev_addr;
if (ret)
@@ -446,11 +622,19 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
return -EBUSY;
spin_lock_bh(&mac->hw->page_lock);
- mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
- MTK_GDMA_MAC_ADRH(mac->id));
- mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
- (macaddr[4] << 8) | macaddr[5],
- MTK_GDMA_MAC_ADRL(mac->id));
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
+ MT7628_SDM_MAC_ADRH);
+ mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
+ (macaddr[4] << 8) | macaddr[5],
+ MT7628_SDM_MAC_ADRL);
+ } else {
+ mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
+ MTK_GDMA_MAC_ADRH(mac->id));
+ mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
+ (macaddr[4] << 8) | macaddr[5],
+ MTK_GDMA_MAC_ADRL(mac->id));
+ }
spin_unlock_bh(&mac->hw->page_lock);
return 0;
@@ -626,19 +810,47 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
return &ring->buf[idx];
}
+static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
+ struct mtk_tx_dma *dma)
+{
+ return ring->dma_pdma - ring->dma + dma;
+}
+
+static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
+{
+ return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
+}
+
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
{
- if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
- dma_unmap_single(eth->dev,
- dma_unmap_addr(tx_buf, dma_addr0),
- dma_unmap_len(tx_buf, dma_len0),
- DMA_TO_DEVICE);
- } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
- dma_unmap_page(eth->dev,
- dma_unmap_addr(tx_buf, dma_addr0),
- dma_unmap_len(tx_buf, dma_len0),
- DMA_TO_DEVICE);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
+ dma_unmap_single(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
+ dma_unmap_page(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ }
+ } else {
+ if (dma_unmap_len(tx_buf, dma_len0)) {
+ dma_unmap_page(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ }
+
+ if (dma_unmap_len(tx_buf, dma_len1)) {
+ dma_unmap_page(eth->dev,
+ dma_unmap_addr(tx_buf, dma_addr1),
+ dma_unmap_len(tx_buf, dma_len1),
+ DMA_TO_DEVICE);
+ }
}
+
tx_buf->flags = 0;
if (tx_buf->skb &&
(tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
@@ -646,19 +858,45 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
tx_buf->skb = NULL;
}
+static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
+ struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
+ size_t size, int idx)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, size);
+ } else {
+ if (idx & 1) {
+ txd->txd3 = mapped_addr;
+ txd->txd2 |= TX_DMA_PLEN1(size);
+ dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len1, size);
+ } else {
+ tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ txd->txd1 = mapped_addr;
+ txd->txd2 = TX_DMA_PLEN0(size);
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, size);
+ }
+ }
+}
+
static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
int tx_num, struct mtk_tx_ring *ring, bool gso)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
struct mtk_tx_dma *itxd, *txd;
+ struct mtk_tx_dma *itxd_pdma, *txd_pdma;
struct mtk_tx_buf *itx_buf, *tx_buf;
dma_addr_t mapped_addr;
unsigned int nr_frags;
int i, n_desc = 1;
u32 txd4 = 0, fport;
+ int k = 0;
itxd = ring->next_free;
+ itxd_pdma = qdma_to_pdma(ring, itxd);
if (itxd == ring->last_free)
return -ENOMEM;
@@ -689,26 +927,37 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
- dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
+ setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
+ k++);
/* TX SG offload */
txd = itxd;
+ txd_pdma = qdma_to_pdma(ring, txd);
nr_frags = skb_shinfo(skb)->nr_frags;
+
for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
unsigned int offset = 0;
int frag_size = skb_frag_size(frag);
while (frag_size) {
bool last_frag = false;
unsigned int frag_map_size;
+ bool new_desc = true;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
+ (i & 0x1)) {
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ txd_pdma = qdma_to_pdma(ring, txd);
+ if (txd == ring->last_free)
+ goto err_dma;
+
+ n_desc++;
+ } else {
+ new_desc = false;
+ }
- txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
- if (txd == ring->last_free)
- goto err_dma;
- n_desc++;
frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
frag_map_size,
@@ -727,14 +976,16 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(txd->txd4, fport);
tx_buf = mtk_desc_to_tx_buf(ring, txd);
- memset(tx_buf, 0, sizeof(*tx_buf));
+ if (new_desc)
+ memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
- dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
- dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
+ setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
+ frag_map_size, k++);
+
frag_size -= frag_map_size;
offset += frag_map_size;
}
@@ -746,6 +997,12 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(itxd->txd4, txd4);
WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
(!nr_frags * TX_DMA_LS0)));
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (k & 0x1)
+ txd_pdma->txd2 |= TX_DMA_LS0;
+ else
+ txd_pdma->txd2 |= TX_DMA_LS1;
+ }
netdev_sent_queue(dev, skb->len);
skb_tx_timestamp(skb);
@@ -758,9 +1015,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
*/
wmb();
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
- !netdev_xmit_more())
- mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
+ !netdev_xmit_more())
+ mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
+ } else {
+ int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
+ ring->dma_size);
+ mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
+ }
return 0;
@@ -772,7 +1035,11 @@ err_dma:
mtk_tx_unmap(eth, tx_buf);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
+
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
+ itxd_pdma = qdma_to_pdma(ring, itxd);
} while (itxd != txd);
return -ENOMEM;
@@ -781,13 +1048,14 @@ err_dma:
static inline int mtk_cal_txd_req(struct sk_buff *skb)
{
int i, nfrags;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
nfrags = 1;
if (skb_is_gso(skb)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
- nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
+ nfrags += DIV_ROUND_UP(skb_frag_size(frag),
+ MTK_TX_DMA_BUF_LEN);
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
@@ -902,7 +1170,7 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
ring = &eth->rx_ring[i];
- idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
@@ -945,13 +1213,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct net_device *netdev;
unsigned int pktlen;
dma_addr_t dma_addr;
- int mac = 0;
+ int mac;
ring = mtk_get_rx_ring(eth);
if (unlikely(!ring))
goto rx_done;
- idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
rxd = &ring->dma[idx];
data = ring->data[idx];
@@ -960,9 +1228,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
break;
/* find out which mac the packet come from. values start at 1 */
- mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
- RX_DMA_FPORT_MASK;
- mac--;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ mac = 0;
+ } else {
+ mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
+ RX_DMA_FPORT_MASK;
+ mac--;
+ }
if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
!eth->netdev[mac]))
@@ -980,7 +1252,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto release_desc;
}
dma_addr = dma_map_single(eth->dev,
- new_data + NET_SKB_PAD,
+ new_data + NET_SKB_PAD +
+ eth->ip_align,
ring->buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
@@ -1003,7 +1276,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
skb_put(skb, pktlen);
- if (trxd.rxd4 & RX_DMA_L4_VALID)
+ if (trxd.rxd4 & eth->rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -1020,7 +1293,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
rxd->rxd1 = (unsigned int)dma_addr;
release_desc:
- rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ rxd->rxd2 = RX_DMA_LSO;
+ else
+ rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
ring->calc_idx = idx;
@@ -1039,19 +1315,14 @@ rx_done:
return done;
}
-static int mtk_poll_tx(struct mtk_eth *eth, int budget)
+static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
+ unsigned int *done, unsigned int *bytes)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
struct mtk_tx_dma *desc;
struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
- unsigned int done[MTK_MAX_DEVS];
- unsigned int bytes[MTK_MAX_DEVS];
u32 cpu, dma;
- int total = 0, i;
-
- memset(done, 0, sizeof(done));
- memset(bytes, 0, sizeof(bytes));
cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
@@ -1089,6 +1360,62 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
+ return budget;
+}
+
+static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
+ unsigned int *done, unsigned int *bytes)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_tx_dma *desc;
+ struct sk_buff *skb;
+ struct mtk_tx_buf *tx_buf;
+ u32 cpu, dma;
+
+ cpu = ring->cpu_idx;
+ dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
+
+ while ((cpu != dma) && budget) {
+ tx_buf = &ring->buf[cpu];
+ skb = tx_buf->skb;
+ if (!skb)
+ break;
+
+ if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
+ bytes[0] += skb->len;
+ done[0]++;
+ budget--;
+ }
+
+ mtk_tx_unmap(eth, tx_buf);
+
+ desc = &ring->dma[cpu];
+ ring->last_free = desc;
+ atomic_inc(&ring->free_count);
+
+ cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
+ }
+
+ ring->cpu_idx = cpu;
+
+ return budget;
+}
+
+static int mtk_poll_tx(struct mtk_eth *eth, int budget)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ unsigned int done[MTK_MAX_DEVS];
+ unsigned int bytes[MTK_MAX_DEVS];
+ int total = 0, i;
+
+ memset(done, 0, sizeof(done));
+ memset(bytes, 0, sizeof(bytes));
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
+ else
+ budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
+
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i] || !done[i])
continue;
@@ -1120,13 +1447,14 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
u32 status, mask;
int tx_done = 0;
- mtk_handle_status_irq(eth);
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_handle_status_irq(eth);
+ mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
tx_done = mtk_poll_tx(eth, budget);
if (unlikely(netif_msg_intr(eth))) {
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ status = mtk_r32(eth, eth->tx_int_status_reg);
+ mask = mtk_r32(eth, eth->tx_int_mask_reg);
dev_info(eth->dev,
"done tx %d, intr 0x%08x/0x%x\n",
tx_done, status, mask);
@@ -1135,7 +1463,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
if (tx_done == budget)
return budget;
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
+ status = mtk_r32(eth, eth->tx_int_status_reg);
if (status & MTK_TX_DONE_INT)
return budget;
@@ -1202,6 +1530,24 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
}
+ /* On MT7688 (PDMA only) this driver uses the ring->dma structs
+ * only as the framework. The real HW descriptors are the PDMA
+ * descriptors in ring->dma_pdma.
+ */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
+ &ring->phys_pdma,
+ GFP_ATOMIC);
+ if (!ring->dma_pdma)
+ goto no_tx_mem;
+
+ for (i = 0; i < MTK_DMA_SIZE; i++) {
+ ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
+ ring->dma_pdma[i].txd4 = 0;
+ }
+ }
+
+ ring->dma_size = MTK_DMA_SIZE;
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring->next_free = &ring->dma[0];
ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
@@ -1212,15 +1558,23 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
*/
wmb();
- mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
- mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
- mtk_w32(eth,
- ring->phys + ((MTK_DMA_SIZE - 1) * sz),
- MTK_QTX_CRX_PTR);
- mtk_w32(eth,
- ring->phys + ((MTK_DMA_SIZE - 1) * sz),
- MTK_QTX_DRX_PTR);
- mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
+ mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
+ mtk_w32(eth,
+ ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+ MTK_QTX_CRX_PTR);
+ mtk_w32(eth,
+ ring->phys + ((MTK_DMA_SIZE - 1) * sz),
+ MTK_QTX_DRX_PTR);
+ mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
+ MTK_QTX_CFG(0));
+ } else {
+ mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
+ mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
+ mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
+ mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
+ }
return 0;
@@ -1247,6 +1601,14 @@ static void mtk_tx_clean(struct mtk_eth *eth)
ring->phys);
ring->dma = NULL;
}
+
+ if (ring->dma_pdma) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
+ ring->dma_pdma,
+ ring->phys_pdma);
+ ring->dma_pdma = NULL;
+ }
}
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
@@ -1294,14 +1656,17 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
for (i = 0; i < rx_dma_size; i++) {
dma_addr_t dma_addr = dma_map_single(eth->dev,
- ring->data[i] + NET_SKB_PAD,
+ ring->data[i] + NET_SKB_PAD + eth->ip_align,
ring->buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
return -ENOMEM;
ring->dma[i].rxd1 = (unsigned int)dma_addr;
- ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ ring->dma[i].rxd2 = RX_DMA_LSO;
+ else
+ ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
}
ring->dma_size = rx_dma_size;
ring->calc_idx_update = false;
@@ -1617,9 +1982,16 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth)
unsigned long t_start = jiffies;
while (1) {
- if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
- (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
- return 0;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
+ (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
+ return 0;
+ } else {
+ if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
+ (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
+ return 0;
+ }
+
if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
break;
}
@@ -1636,20 +2008,24 @@ static int mtk_dma_init(struct mtk_eth *eth)
if (mtk_dma_busy_wait(eth))
return -EBUSY;
- /* QDMA needs scratch memory for internal reordering of the
- * descriptors
- */
- err = mtk_init_fq_dma(eth);
- if (err)
- return err;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ /* QDMA needs scratch memory for internal reordering of the
+ * descriptors
+ */
+ err = mtk_init_fq_dma(eth);
+ if (err)
+ return err;
+ }
err = mtk_tx_alloc(eth);
if (err)
return err;
- err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
- if (err)
- return err;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
+ if (err)
+ return err;
+ }
err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
if (err)
@@ -1666,10 +2042,14 @@ static int mtk_dma_init(struct mtk_eth *eth)
return err;
}
- /* Enable random early drop and set drop threshold automatically */
- mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
- MTK_QDMA_FC_THRES);
- mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ /* Enable random early drop and set drop threshold
+ * automatically
+ */
+ mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
+ FC_THRES_MIN, MTK_QDMA_FC_THRES);
+ mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
+ }
return 0;
}
@@ -1745,8 +2125,8 @@ static irqreturn_t mtk_handle_irq(int irq, void *_eth)
if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
mtk_handle_irq_rx(irq, _eth);
}
- if (mtk_r32(eth, MTK_QDMA_INT_MASK) & MTK_TX_DONE_INT) {
- if (mtk_r32(eth, MTK_QMTK_INT_STATUS) & MTK_TX_DONE_INT)
+ if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
+ if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
mtk_handle_irq_tx(irq, _eth);
}
@@ -1778,17 +2158,23 @@ static int mtk_start_dma(struct mtk_eth *eth)
return err;
}
- mtk_w32(eth,
- MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
- MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
- MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
- MTK_RX_BT_32DWORDS,
- MTK_QDMA_GLO_CFG);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ mtk_w32(eth,
+ MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
+ MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
+ MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
+ MTK_RX_BT_32DWORDS,
+ MTK_QDMA_GLO_CFG);
- mtk_w32(eth,
- MTK_RX_DMA_EN | rx_2b_offset |
- MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
- MTK_PDMA_GLO_CFG);
+ mtk_w32(eth,
+ MTK_RX_DMA_EN | rx_2b_offset |
+ MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
+ MTK_PDMA_GLO_CFG);
+ } else {
+ mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
+ MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
+ MTK_PDMA_GLO_CFG);
+ }
return 0;
}
@@ -1797,6 +2183,14 @@ static int mtk_open(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ int err;
+
+ err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
+ if (err) {
+ netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
+ err);
+ return err;
+ }
/* we run 2 netdevs on the same dma ring so we only bring it up once */
if (!refcount_read(&eth->dma_refcnt)) {
@@ -1814,9 +2208,8 @@ static int mtk_open(struct net_device *dev)
else
refcount_inc(&eth->dma_refcnt);
- phy_start(dev->phydev);
+ phylink_start(mac->phylink);
netif_start_queue(dev);
-
return 0;
}
@@ -1848,8 +2241,11 @@ static int mtk_stop(struct net_device *dev)
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ phylink_stop(mac->phylink);
+
netif_tx_disable(dev);
- phy_stop(dev->phydev);
+
+ phylink_disconnect_phy(mac->phylink);
/* only shutdown DMA if this is the last user */
if (!refcount_dec_and_test(&eth->dma_refcnt))
@@ -1860,7 +2256,8 @@ static int mtk_stop(struct net_device *dev)
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
- mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
mtk_dma_free(eth);
@@ -1922,17 +2319,26 @@ static int mtk_hw_init(struct mtk_eth *eth)
if (ret)
goto err_disable_pm;
- ethsys_reset(eth, RSTCTRL_FE);
- ethsys_reset(eth, RSTCTRL_PPE);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ ret = device_reset(eth->dev);
+ if (ret) {
+ dev_err(eth->dev, "MAC reset failed!\n");
+ goto err_disable_pm;
+ }
- regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->mac[i])
- continue;
- val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
- val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
+ /* enable interrupt delay for RX */
+ mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
+
+ /* disable delay and normal interrupt */
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+
+ return 0;
}
- regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
+
+ /* Non-MT7628 handling... */
+ ethsys_reset(eth, RSTCTRL_FE);
+ ethsys_reset(eth, RSTCTRL_PPE);
if (eth->pctl) {
/* Set GE2 driving and slew rate */
@@ -1946,11 +2352,11 @@ static int mtk_hw_init(struct mtk_eth *eth)
}
/* Set linkdown as the default for each GMAC. Its own MCR would be set
- * up with the more appropriate value when mtk_phy_link_adjust call is
- * being invoked.
+ * up with the more appropriate value when mtk_mac_config call is being
+ * invoked.
*/
for (i = 0; i < MTK_MAC_COUNT; i++)
- mtk_w32(eth, 0, MTK_MAC_MCR(i));
+ mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
/* Indicates CDM to parse the MTK special tag from CPU
* which also is working out for untag packets.
@@ -1978,7 +2384,7 @@ static int mtk_hw_init(struct mtk_eth *eth)
mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
/* setup the forward port to send frame to PDMA */
@@ -2030,7 +2436,7 @@ static int __init mtk_init(struct net_device *dev)
dev->dev_addr);
}
- return mtk_phy_connect(dev);
+ return 0;
}
static void mtk_uninit(struct net_device *dev)
@@ -2038,20 +2444,20 @@ static void mtk_uninit(struct net_device *dev)
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- phy_disconnect(dev->phydev);
- if (of_phy_is_fixed_link(mac->of_node))
- of_phy_deregister_fixed_link(mac->of_node);
+ phylink_disconnect_phy(mac->phylink);
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
}
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
+ struct mtk_mac *mac = netdev_priv(dev);
+
switch (cmd) {
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- return phy_mii_ioctl(dev->phydev, ifr, cmd);
+ return phylink_mii_ioctl(mac->phylink, ifr, cmd);
default:
break;
}
@@ -2092,16 +2498,6 @@ static void mtk_pending_work(struct work_struct *work)
eth->dev->pins->default_state);
mtk_hw_init(eth);
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->mac[i] ||
- of_phy_is_fixed_link(eth->mac[i]->of_node))
- continue;
- err = phy_init_hw(eth->netdev[i]->phydev);
- if (err)
- dev_err(eth->dev, "%s: PHY init failed.\n",
- eth->netdev[i]->name);
- }
-
/* restart DMA and enable IRQs */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!test_bit(i, &restart))
@@ -2164,9 +2560,7 @@ static int mtk_get_link_ksettings(struct net_device *ndev,
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return -EBUSY;
- phy_ethtool_ksettings_get(ndev->phydev, cmd);
-
- return 0;
+ return phylink_ethtool_ksettings_get(mac->phylink, cmd);
}
static int mtk_set_link_ksettings(struct net_device *ndev,
@@ -2177,7 +2571,7 @@ static int mtk_set_link_ksettings(struct net_device *ndev,
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return -EBUSY;
- return phy_ethtool_ksettings_set(ndev->phydev, cmd);
+ return phylink_ethtool_ksettings_set(mac->phylink, cmd);
}
static void mtk_get_drvinfo(struct net_device *dev,
@@ -2211,22 +2605,10 @@ static int mtk_nway_reset(struct net_device *dev)
if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
return -EBUSY;
- return genphy_restart_aneg(dev->phydev);
-}
+ if (!mac->phylink)
+ return -ENOTSUPP;
-static u32 mtk_get_link(struct net_device *dev)
-{
- struct mtk_mac *mac = netdev_priv(dev);
- int err;
-
- if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
- return -EBUSY;
-
- err = genphy_update_link(dev->phydev);
- if (err)
- return ethtool_op_get_link(dev);
-
- return dev->phydev->link;
+ return phylink_ethtool_nway_reset(mac->phylink);
}
static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2346,7 +2728,7 @@ static const struct ethtool_ops mtk_ethtool_ops = {
.get_msglevel = mtk_get_msglevel,
.set_msglevel = mtk_set_msglevel,
.nway_reset = mtk_nway_reset,
- .get_link = mtk_get_link,
+ .get_link = ethtool_op_get_link,
.get_strings = mtk_get_strings,
.get_sset_count = mtk_get_sset_count,
.get_ethtool_stats = mtk_get_ethtool_stats,
@@ -2374,9 +2756,10 @@ static const struct net_device_ops mtk_netdev_ops = {
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
{
- struct mtk_mac *mac;
const __be32 *_id = of_get_property(np, "reg", NULL);
- int id, err;
+ struct phylink *phylink;
+ int phy_mode, id, err;
+ struct mtk_mac *mac;
if (!_id) {
dev_err(eth->dev, "missing mac id\n");
@@ -2420,18 +2803,44 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
u64_stats_init(&mac->hw_stats->syncp);
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
+ /* phylink create */
+ phy_mode = of_get_phy_mode(np);
+ if (phy_mode < 0) {
+ dev_err(eth->dev, "incorrect phy-mode\n");
+ err = -EINVAL;
+ goto free_netdev;
+ }
+
+ /* mac config is not set */
+ mac->interface = PHY_INTERFACE_MODE_NA;
+ mac->mode = MLO_AN_PHY;
+ mac->speed = SPEED_UNKNOWN;
+
+ mac->phylink_config.dev = &eth->netdev[id]->dev;
+ mac->phylink_config.type = PHYLINK_NETDEV;
+
+ phylink = phylink_create(&mac->phylink_config,
+ of_fwnode_handle(mac->of_node),
+ phy_mode, &mtk_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto free_netdev;
+ }
+
+ mac->phylink = phylink;
+
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
- eth->netdev[id]->hw_features = MTK_HW_FEATURES;
+ eth->netdev[id]->hw_features = eth->soc->hw_features;
if (eth->hwlro)
eth->netdev[id]->hw_features |= NETIF_F_LRO;
- eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
+ eth->netdev[id]->vlan_features = eth->soc->hw_features &
~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
- eth->netdev[id]->features |= MTK_HW_FEATURES;
+ eth->netdev[id]->features |= eth->soc->hw_features;
eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
eth->netdev[id]->irq = eth->irq[0];
@@ -2446,11 +2855,9 @@ free_netdev:
static int mtk_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct device_node *mac_np;
struct mtk_eth *eth;
- int err;
- int i;
+ int err, i;
eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
if (!eth)
@@ -2459,19 +2866,36 @@ static int mtk_probe(struct platform_device *pdev)
eth->soc = of_device_get_match_data(&pdev->dev);
eth->dev = &pdev->dev;
- eth->base = devm_ioremap_resource(&pdev->dev, res);
+ eth->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
+ eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
+ } else {
+ eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
+ eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
+ eth->ip_align = NET_IP_ALIGN;
+ } else {
+ eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
+ }
+
spin_lock_init(&eth->page_lock);
spin_lock_init(&eth->tx_irq_lock);
spin_lock_init(&eth->rx_irq_lock);
- eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
- "mediatek,ethsys");
- if (IS_ERR(eth->ethsys)) {
- dev_err(&pdev->dev, "no ethsys regmap found\n");
- return PTR_ERR(eth->ethsys);
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,ethsys");
+ if (IS_ERR(eth->ethsys)) {
+ dev_err(&pdev->dev, "no ethsys regmap found\n");
+ return PTR_ERR(eth->ethsys);
+ }
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
@@ -2572,9 +2996,12 @@ static int mtk_probe(struct platform_device *pdev)
if (err)
goto err_free_dev;
- err = mtk_mdio_init(eth);
- if (err)
- goto err_free_dev;
+ /* No MT7628/88 support yet */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ err = mtk_mdio_init(eth);
+ if (err)
+ goto err_free_dev;
+ }
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
@@ -2616,6 +3043,7 @@ err_deinit_hw:
static int mtk_remove(struct platform_device *pdev)
{
struct mtk_eth *eth = platform_get_drvdata(pdev);
+ struct mtk_mac *mac;
int i;
/* stop all devices to make sure that dma is properly shut down */
@@ -2623,6 +3051,8 @@ static int mtk_remove(struct platform_device *pdev)
if (!eth->netdev[i])
continue;
mtk_stop(eth->netdev[i]);
+ mac = netdev_priv(eth->netdev[i]);
+ phylink_disconnect_phy(mac->phylink);
}
mtk_hw_deinit(eth);
@@ -2637,12 +3067,14 @@ static int mtk_remove(struct platform_device *pdev)
static const struct mtk_soc_data mt2701_data = {
.caps = MT7623_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
static const struct mtk_soc_data mt7621_data = {
.caps = MT7621_CAPS,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
};
@@ -2650,12 +3082,14 @@ static const struct mtk_soc_data mt7621_data = {
static const struct mtk_soc_data mt7622_data = {
.ana_rgc3 = 0x2028,
.caps = MT7622_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
};
static const struct mtk_soc_data mt7623_data = {
.caps = MT7623_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
};
@@ -2663,16 +3097,25 @@ static const struct mtk_soc_data mt7623_data = {
static const struct mtk_soc_data mt7629_data = {
.ana_rgc3 = 0x128,
.caps = MT7629_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
.required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false,
};
+static const struct mtk_soc_data rt5350_data = {
+ .caps = MT7628_CAPS,
+ .hw_features = MTK_HW_FEATURES_MT7628,
+ .required_clks = MT7628_CLKS_BITMAP,
+ .required_pctl = false,
+};
+
const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
{ .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
+ { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
{},
};
MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index bab94f763e2c..76bd12cb8150 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -14,6 +14,7 @@
#include <linux/of_net.h>
#include <linux/u64_stats_sync.h>
#include <linux/refcount.h>
+#include <linux/phylink.h>
#define MTK_QDMA_PAGE_SIZE 2048
#define MTK_MAX_RX_LENGTH 1536
@@ -39,7 +40,8 @@
NETIF_F_SG | NETIF_F_TSO | \
NETIF_F_TSO6 | \
NETIF_F_IPV6_CSUM)
-#define NEXT_RX_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
+#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
#define MTK_MAX_RX_RING_NUM 4
#define MTK_HW_LRO_DMA_SIZE 8
@@ -118,6 +120,7 @@
/* PDMA Global Configuration Register */
#define MTK_PDMA_GLO_CFG 0xa04
#define MTK_MULTI_EN BIT(10)
+#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
/* PDMA Reset Index Register */
#define MTK_PDMA_RST_IDX 0xa08
@@ -212,7 +215,7 @@
#define FC_THRES_MIN 0x4444
/* QDMA Interrupt Status Register */
-#define MTK_QMTK_INT_STATUS 0x1A18
+#define MTK_QDMA_INT_STATUS 0x1A18
#define MTK_RX_DONE_DLY BIT(30)
#define MTK_RX_DONE_INT3 BIT(19)
#define MTK_RX_DONE_INT2 BIT(18)
@@ -276,11 +279,18 @@
#define TX_DMA_OWNER_CPU BIT(31)
#define TX_DMA_LS0 BIT(30)
#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16)
+#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN)
#define TX_DMA_SWC BIT(14)
#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16)
+/* PDMA on MT7628 */
+#define TX_DMA_DONE BIT(31)
+#define TX_DMA_LS1 BIT(14)
+#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
+
/* QDMA descriptor rxd2 */
#define RX_DMA_DONE BIT(31)
+#define RX_DMA_LSO BIT(30)
#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
@@ -289,6 +299,7 @@
/* QDMA descriptor rxd4 */
#define RX_DMA_L4_VALID BIT(24)
+#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
#define RX_DMA_FPORT_SHIFT 19
#define RX_DMA_FPORT_MASK 0x7
@@ -320,12 +331,19 @@
#define MAC_MCR_SPEED_100 BIT(2)
#define MAC_MCR_FORCE_DPX BIT(1)
#define MAC_MCR_FORCE_LINK BIT(0)
-#define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | \
- MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \
- MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \
- MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \
- MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
- MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
+#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE)
+
+/* Mac status registers */
+#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
+#define MAC_MSR_EEE1G BIT(7)
+#define MAC_MSR_EEE100M BIT(6)
+#define MAC_MSR_RX_FC BIT(5)
+#define MAC_MSR_TX_FC BIT(4)
+#define MAC_MSR_SPEED_1000 BIT(3)
+#define MAC_MSR_SPEED_100 BIT(2)
+#define MAC_MSR_SPEED_MASK (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)
+#define MAC_MSR_DPX BIT(1)
+#define MAC_MSR_LINK BIT(0)
/* TRGMII RXC control register */
#define TRGMII_RCK_CTRL 0x10300
@@ -394,14 +412,38 @@
/* Register to auto-negotiation restart */
#define SGMSYS_PCS_CONTROL_1 0x0
#define SGMII_AN_RESTART BIT(9)
+#define SGMII_ISOLATE BIT(10)
+#define SGMII_AN_ENABLE BIT(12)
+#define SGMII_LINK_STATYS BIT(18)
+#define SGMII_AN_ABILITY BIT(19)
+#define SGMII_AN_COMPLETE BIT(21)
+#define SGMII_PCS_FAULT BIT(23)
+#define SGMII_AN_EXPANSION_CLR BIT(30)
/* Register to programmable link timer, the unit in 2 * 8ns */
#define SGMSYS_PCS_LINK_TIMER 0x18
#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0))
/* Register to control remote fault */
-#define SGMSYS_SGMII_MODE 0x20
-#define SGMII_REMOTE_FAULT_DIS BIT(8)
+#define SGMSYS_SGMII_MODE 0x20
+#define SGMII_IF_MODE_BIT0 BIT(0)
+#define SGMII_SPEED_DUPLEX_AN BIT(1)
+#define SGMII_SPEED_10 0x0
+#define SGMII_SPEED_100 BIT(2)
+#define SGMII_SPEED_1000 BIT(3)
+#define SGMII_DUPLEX_FULL BIT(4)
+#define SGMII_IF_MODE_BIT5 BIT(5)
+#define SGMII_REMOTE_FAULT_DIS BIT(8)
+#define SGMII_CODE_SYNC_SET_VAL BIT(9)
+#define SGMII_CODE_SYNC_SET_EN BIT(10)
+#define SGMII_SEND_AN_ERROR_EN BIT(11)
+#define SGMII_IF_MODE_MASK GENMASK(5, 1)
+
+/* Register to set SGMII speed, ANA RG_ Control Signals III*/
+#define SGMSYS_ANA_RG_CS3 0x2028
+#define RG_PHY_SPEED_MASK (BIT(2) | BIT(3))
+#define RG_PHY_SPEED_1_25G 0x0
+#define RG_PHY_SPEED_3_125G BIT(2)
/* Register to power up QPHY */
#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8
@@ -412,6 +454,19 @@
#define CO_QPHY_SEL BIT(0)
#define GEPHY_MAC_SEL BIT(1)
+/* MT7628/88 specific stuff */
+#define MT7628_PDMA_OFFSET 0x0800
+#define MT7628_SDM_OFFSET 0x0c00
+
+#define MT7628_TX_BASE_PTR0 (MT7628_PDMA_OFFSET + 0x00)
+#define MT7628_TX_MAX_CNT0 (MT7628_PDMA_OFFSET + 0x04)
+#define MT7628_TX_CTX_IDX0 (MT7628_PDMA_OFFSET + 0x08)
+#define MT7628_TX_DTX_IDX0 (MT7628_PDMA_OFFSET + 0x0c)
+#define MT7628_PST_DTX_IDX0 BIT(0)
+
+#define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c)
+#define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -509,6 +564,7 @@ enum mtk_clks_map {
BIT(MTK_CLK_SGMII_CK) | \
BIT(MTK_CLK_ETH2PLL))
#define MT7621_CLKS_BITMAP (0)
+#define MT7628_CLKS_BITMAP (0)
#define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \
BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \
BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \
@@ -563,6 +619,10 @@ struct mtk_tx_ring {
struct mtk_tx_dma *last_free;
u16 thresh;
atomic_t free_count;
+ int dma_size;
+ struct mtk_tx_dma *dma_pdma; /* For MT7628/88 PDMA handling */
+ dma_addr_t phys_pdma;
+ int cpu_idx;
};
/* PDMA rx ring mode */
@@ -604,6 +664,8 @@ enum mkt_eth_capabilities {
MTK_HWLRO_BIT,
MTK_SHARED_INT_BIT,
MTK_TRGMII_MT7621_CLK_BIT,
+ MTK_QDMA_BIT,
+ MTK_SOC_MT7628_BIT,
/* MUX BITS*/
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
@@ -634,6 +696,8 @@ enum mkt_eth_capabilities {
#define MTK_HWLRO BIT(MTK_HWLRO_BIT)
#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
+#define MTK_QDMA BIT(MTK_QDMA_BIT)
+#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
@@ -687,26 +751,31 @@ enum mkt_eth_capabilities {
#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
#define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \
- MTK_GMAC2_RGMII | MTK_SHARED_INT | MTK_TRGMII_MT7621_CLK)
+ MTK_GMAC2_RGMII | MTK_SHARED_INT | \
+ MTK_TRGMII_MT7621_CLK | MTK_QDMA)
#define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \
MTK_GMAC2_SGMII | MTK_GDM1_ESW | \
MTK_MUX_GDM1_TO_GMAC1_ESW | \
- MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII)
+ MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA)
-#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII)
+#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \
+ MTK_QDMA)
+
+#define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628)
#define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \
MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \
MTK_MUX_U3_GMAC2_TO_QPHY | \
- MTK_MUX_GMAC12_TO_GEPHY_SGMII)
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
/* struct mtk_eth_data - This is the structure holding all differences
* among various plaforms
* @ana_rgc3: The offset for register ANA_RGC3 related to
* sgmiisys syscon
* @caps Flags shown the extra capability for the SoC
+ * @hw_features Flags shown HW features
* @required_clks Flags shown the bitmap for required clocks on
* the target SoC
* @required_pctl A bool value to show whether the SoC requires
@@ -717,6 +786,7 @@ struct mtk_soc_data {
u32 caps;
u32 required_clks;
bool required_pctl;
+ netdev_features_t hw_features;
};
/* currently no SoC has more than 2 macs */
@@ -810,27 +880,33 @@ struct mtk_eth {
unsigned long state;
const struct mtk_soc_data *soc;
+
+ u32 tx_int_mask_reg;
+ u32 tx_int_status_reg;
+ u32 rx_dma_l4_valid;
+ int ip_align;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
* SoC
* @id: The number of the MAC
- * @ge_mode: Interface mode kept for setup restoring
+ * @interface: Interface mode kept for detecting change in hw settings
* @of_node: Our devicetree node
* @hw: Backpointer to our main datastruture
* @hw_stats: Packet statistics counter
- * @trgmii Indicate if the MAC uses TRGMII connected to internal
- switch
*/
struct mtk_mac {
int id;
- int ge_mode;
+ phy_interface_t interface;
+ unsigned int mode;
+ int speed;
struct device_node *of_node;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
struct mtk_eth *hw;
struct mtk_hw_stats *hw_stats;
__be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
int hwlro_ip_cnt;
- bool trgmii;
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
@@ -845,7 +921,12 @@ u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
u32 ana_rgc3);
int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id);
-int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id);
-int mtk_setup_hw_path(struct mtk_eth *eth, int mac_id, int phymode);
+int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
+ const struct phylink_link_state *state);
+void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id);
+
+int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
#endif /* MTK_ETH_H */
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
index ff509d42d818..4db27dfc7ec1 100644
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -16,8 +16,7 @@
int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
{
struct device_node *np;
- const char *str;
- int i, err;
+ int i;
ss->ana_rgc3 = ana_rgc3;
@@ -29,19 +28,6 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
ss->regmap[i] = syscon_node_to_regmap(np);
if (IS_ERR(ss->regmap[i]))
return PTR_ERR(ss->regmap[i]);
-
- err = of_property_read_string(np, "mediatek,physpeed", &str);
- if (err)
- return err;
-
- if (!strcmp(str, "2500"))
- ss->flags[i] |= MTK_SGMII_PHYSPEED_2500;
- else if (!strcmp(str, "1000"))
- ss->flags[i] |= MTK_SGMII_PHYSPEED_1000;
- else if (!strcmp(str, "auto"))
- ss->flags[i] |= MTK_SGMII_PHYSPEED_AN;
- else
- return -EINVAL;
}
return 0;
@@ -73,27 +59,45 @@ int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id)
return 0;
}
-int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id)
+int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
+ const struct phylink_link_state *state)
{
unsigned int val;
- int mode;
if (!ss->regmap[id])
return -EINVAL;
regmap_read(ss->regmap[id], ss->ana_rgc3, &val);
- val &= ~GENMASK(3, 2);
- mode = ss->flags[id] & MTK_SGMII_PHYSPEED_MASK;
- val |= (mode == MTK_SGMII_PHYSPEED_1000) ? 0 : BIT(2);
+ val &= ~RG_PHY_SPEED_MASK;
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ val |= RG_PHY_SPEED_3_125G;
regmap_write(ss->regmap[id], ss->ana_rgc3, val);
/* Disable SGMII AN */
regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
- val &= ~BIT(12);
+ val &= ~SGMII_AN_ENABLE;
regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
/* SGMII force mode setting */
- val = 0x31120019;
+ regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
+ val &= ~SGMII_IF_MODE_MASK;
+
+ switch (state->speed) {
+ case SPEED_10:
+ val |= SGMII_SPEED_10;
+ break;
+ case SPEED_100:
+ val |= SGMII_SPEED_100;
+ break;
+ case SPEED_2500:
+ case SPEED_1000:
+ val |= SGMII_SPEED_1000;
+ break;
+ };
+
+ if (state->duplex == DUPLEX_FULL)
+ val |= SGMII_DUPLEX_FULL;
+
regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
/* Release PHYA power down state */
@@ -103,3 +107,20 @@ int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id)
return 0;
}
+
+void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id)
+{
+ struct mtk_sgmii *ss = eth->sgmii;
+ unsigned int val, sid;
+
+ /* Decide how GMAC and SGMIISYS be mapped */
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
+ 0 : mac_id;
+
+ if (!ss->regmap[sid])
+ return;
+
+ regmap_read(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, &val);
+ val |= SGMII_AN_RESTART;
+ regmap_write(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, val);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index 88316c743820..eaf08f7ad128 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -99,8 +99,7 @@ static void mlx4_crdump_collect_crspace(struct mlx4_dev *dev,
readl(cr_space + offset);
err = devlink_region_snapshot_create(crdump->region_crspace,
- cr_res_size, crspace_data,
- id, &kvfree);
+ crspace_data, id, &kvfree);
if (err) {
kvfree(crspace_data);
mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
@@ -139,9 +138,7 @@ static void mlx4_crdump_collect_fw_health(struct mlx4_dev *dev,
readl(health_buf_start + offset);
err = devlink_region_snapshot_create(crdump->region_fw_health,
- HEALTH_BUFFER_SIZE,
- health_data,
- id, &kvfree);
+ health_data, id, &kvfree);
if (err) {
kvfree(health_data);
mlx4_warn(dev, "crdump: devlink create %s snapshot id %d err %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index c1438ae52a11..40ec5acf79c0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2645,14 +2645,6 @@ out:
en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
return;
}
-
- /* set offloads */
- priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_PARTIAL;
}
static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2660,14 +2652,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
int ret;
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
vxlan_del_task);
- /* unset offloads */
- priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_PARTIAL);
-
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 0);
if (ret)
@@ -3415,6 +3399,23 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mdev->LSO_support)
dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (mdev->dev->caps.tunnel_offload_mode ==
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ }
+
dev->vlan_features = dev->hw_features;
dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
@@ -3483,16 +3484,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->rss_hash_fn = ETH_RSS_HASH_TOP;
}
- if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
- dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_PARTIAL;
- dev->features |= NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
- NETIF_F_GSO_PARTIAL;
- dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
- }
-
/* MTU range: 68 - hw-specific max */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = priv->max_mtu;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 36a92b19e613..4d5ca302c067 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -772,9 +772,7 @@ static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv,
/* Map fragments if any */
for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) {
- const struct skb_frag_struct *frag;
-
- frag = &shinfo->frags[i_frag];
+ const skb_frag_t *frag = &shinfo->frags[i_frag];
byte_count = skb_frag_size(frag);
dma = skb_frag_dma_map(ddev, frag,
0, byte_count,
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 1f6e16d5ea6b..07c204bd3fc4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2292,23 +2292,31 @@ static int mlx4_init_fw(struct mlx4_dev *dev)
static int mlx4_init_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_init_hca_param *init_hca = NULL;
+ struct mlx4_dev_cap *dev_cap = NULL;
struct mlx4_adapter adapter;
- struct mlx4_dev_cap dev_cap;
struct mlx4_profile profile;
- struct mlx4_init_hca_param init_hca;
u64 icm_size;
struct mlx4_config_dev_params params;
int err;
if (!mlx4_is_slave(dev)) {
- err = mlx4_dev_cap(dev, &dev_cap);
+ dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
+ init_hca = kzalloc(sizeof(*init_hca), GFP_KERNEL);
+
+ if (!dev_cap || !init_hca) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ err = mlx4_dev_cap(dev, dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
- return err;
+ goto out_free;
}
- choose_steering_mode(dev, &dev_cap);
- choose_tunnel_offload_mode(dev, &dev_cap);
+ choose_steering_mode(dev, dev_cap);
+ choose_tunnel_offload_mode(dev, dev_cap);
if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
mlx4_is_master(dev))
@@ -2331,48 +2339,48 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
MLX4_STEERING_MODE_DEVICE_MANAGED)
profile.num_mcg = MLX4_FS_NUM_MCG;
- icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
- &init_hca);
+ icm_size = mlx4_make_profile(dev, &profile, dev_cap,
+ init_hca);
if ((long long) icm_size < 0) {
err = icm_size;
- return err;
+ goto out_free;
}
dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
if (enable_4k_uar || !dev->persist->num_vfs) {
- init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
+ init_hca->log_uar_sz = ilog2(dev->caps.num_uars) +
PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
- init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
+ init_hca->uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
} else {
- init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
- init_hca.uar_page_sz = PAGE_SHIFT - 12;
+ init_hca->log_uar_sz = ilog2(dev->caps.num_uars);
+ init_hca->uar_page_sz = PAGE_SHIFT - 12;
}
- init_hca.mw_enabled = 0;
+ init_hca->mw_enabled = 0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
- init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
+ init_hca->mw_enabled = INIT_HCA_TPT_MW_ENABLE;
- err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+ err = mlx4_init_icm(dev, dev_cap, init_hca, icm_size);
if (err)
- return err;
+ goto out_free;
- err = mlx4_INIT_HCA(dev, &init_hca);
+ err = mlx4_INIT_HCA(dev, init_hca);
if (err) {
mlx4_err(dev, "INIT_HCA command failed, aborting\n");
goto err_free_icm;
}
- if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
- err = mlx4_query_func(dev, &dev_cap);
+ if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
+ err = mlx4_query_func(dev, dev_cap);
if (err < 0) {
mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
goto err_close;
} else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
- dev->caps.num_eqs = dev_cap.max_eqs;
- dev->caps.reserved_eqs = dev_cap.reserved_eqs;
- dev->caps.reserved_uars = dev_cap.reserved_uars;
+ dev->caps.num_eqs = dev_cap->max_eqs;
+ dev->caps.reserved_eqs = dev_cap->reserved_eqs;
+ dev->caps.reserved_uars = dev_cap->reserved_uars;
}
}
@@ -2381,14 +2389,13 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
* read HCA frequency by QUERY_HCA command
*/
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
- memset(&init_hca, 0, sizeof(init_hca));
- err = mlx4_QUERY_HCA(dev, &init_hca);
+ err = mlx4_QUERY_HCA(dev, init_hca);
if (err) {
mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
} else {
dev->caps.hca_core_clock =
- init_hca.hca_core_clock;
+ init_hca->hca_core_clock;
}
/* In case we got HCA frequency 0 - disable timestamping
@@ -2464,7 +2471,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
priv->eq_table.inta_pin = adapter.inta_pin;
memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id));
- return 0;
+ err = 0;
+ goto out_free;
unmap_bf:
unmap_internal_clock(dev);
@@ -2483,6 +2491,10 @@ err_free_icm:
if (!mlx4_is_slave(dev))
mlx4_free_icms(dev);
+out_free:
+ kfree(dev_cap);
+ kfree(init_hca);
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 57d2cc666fe3..f4de9ccb5df1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -23,8 +23,9 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
#
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
- en_selftest.o en/port.o en/monitor_stats.o en/reporter_tx.o \
- en/params.o en/xsk/umem.o en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o
+ en_selftest.o en/port.o en/monitor_stats.o en/health.o \
+ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \
+ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o
#
# Netdev extra
@@ -34,7 +35,8 @@ mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o \
lib/geneve.o en/tc_tun_vxlan.o en/tc_tun_gre.o \
- en/tc_tun_geneve.o
+ en/tc_tun_geneve.o diag/en_tc_tracepoint.o
+mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
#
# Core extra
@@ -44,6 +46,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offlo
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
+mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += lib/hv.o lib/hv_vhca.o
#
# Ipoib netdev
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 8cdd7e66f8df..ea934cd02448 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -446,6 +446,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_UMEM:
case MLX5_CMD_OP_DESTROY_UMEM:
case MLX5_CMD_OP_ALLOC_MEMIC:
+ case MLX5_CMD_OP_MODIFY_XRQ:
+ case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -637,6 +639,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
MLX5_COMMAND_STR_CASE(CREATE_UMEM);
MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
+ MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
+ MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
default: return "unknown command opcode";
}
}
@@ -1368,49 +1372,19 @@ static void clean_debug_files(struct mlx5_core_dev *dev)
debugfs_remove_recursive(dbg->dbg_root);
}
-static int create_debugfs_files(struct mlx5_core_dev *dev)
+static void create_debugfs_files(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
- int err = -ENOMEM;
-
- if (!mlx5_debugfs_root)
- return 0;
dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
- if (!dbg->dbg_root)
- return err;
-
- dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
- dev, &dfops);
- if (!dbg->dbg_in)
- goto err_dbg;
- dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
- dev, &dfops);
- if (!dbg->dbg_out)
- goto err_dbg;
-
- dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
- dev, &olfops);
- if (!dbg->dbg_outlen)
- goto err_dbg;
-
- dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
- &dbg->status);
- if (!dbg->dbg_status)
- goto err_dbg;
-
- dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
- if (!dbg->dbg_run)
- goto err_dbg;
+ debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
+ debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
+ debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops);
+ debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status);
+ debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
mlx5_cmdif_debugfs_init(dev);
-
- return 0;
-
-err_dbg:
- clean_debug_files(dev);
- return err;
}
static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
@@ -2007,17 +1981,10 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
goto err_cache;
}
- err = create_debugfs_files(dev);
- if (err) {
- err = -ENOMEM;
- goto err_wq;
- }
+ create_debugfs_files(dev);
return 0;
-err_wq:
- destroy_workqueue(cmd->wq);
-
err_cache:
destroy_msg_cache(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index a11e22d0b0cc..04854e5fbcd7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -92,8 +92,6 @@ EXPORT_SYMBOL(mlx5_debugfs_root);
void mlx5_register_debugfs(void)
{
mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
- if (IS_ERR_OR_NULL(mlx5_debugfs_root))
- mlx5_debugfs_root = NULL;
}
void mlx5_unregister_debugfs(void)
@@ -101,45 +99,25 @@ void mlx5_unregister_debugfs(void)
debugfs_remove(mlx5_debugfs_root);
}
-int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
+void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return 0;
-
atomic_set(&dev->num_qps, 0);
dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
- if (!dev->priv.qp_debugfs)
- return -ENOMEM;
-
- return 0;
}
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return;
-
debugfs_remove_recursive(dev->priv.qp_debugfs);
}
-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
+void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return 0;
-
dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root);
- if (!dev->priv.eq_debugfs)
- return -ENOMEM;
-
- return 0;
}
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return;
-
debugfs_remove_recursive(dev->priv.eq_debugfs);
}
@@ -183,85 +161,41 @@ static const struct file_operations stats_fops = {
.write = average_write,
};
-int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
+void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_stats *stats;
struct dentry **cmd;
const char *namep;
- int err;
int i;
- if (!mlx5_debugfs_root)
- return 0;
-
cmd = &dev->priv.cmdif_debugfs;
*cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
- if (!*cmd)
- return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) {
stats = &dev->cmd.stats[i];
namep = mlx5_command_str(i);
if (strcmp(namep, "unknown command opcode")) {
stats->root = debugfs_create_dir(namep, *cmd);
- if (!stats->root) {
- mlx5_core_warn(dev, "failed adding command %d\n",
- i);
- err = -ENOMEM;
- goto out;
- }
-
- stats->avg = debugfs_create_file("average", 0400,
- stats->root, stats,
- &stats_fops);
- if (!stats->avg) {
- mlx5_core_warn(dev, "failed creating debugfs file\n");
- err = -ENOMEM;
- goto out;
- }
-
- stats->count = debugfs_create_u64("n", 0400,
- stats->root,
- &stats->n);
- if (!stats->count) {
- mlx5_core_warn(dev, "failed creating debugfs file\n");
- err = -ENOMEM;
- goto out;
- }
+
+ debugfs_create_file("average", 0400, stats->root, stats,
+ &stats_fops);
+ debugfs_create_u64("n", 0400, stats->root, &stats->n);
}
}
-
- return 0;
-out:
- debugfs_remove_recursive(dev->priv.cmdif_debugfs);
- return err;
}
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return;
-
debugfs_remove_recursive(dev->priv.cmdif_debugfs);
}
-int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
+void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return 0;
-
dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root);
- if (!dev->priv.cq_debugfs)
- return -ENOMEM;
-
- return 0;
}
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- if (!mlx5_debugfs_root)
- return;
-
debugfs_remove_recursive(dev->priv.cq_debugfs);
}
@@ -484,7 +418,6 @@ static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
{
struct mlx5_rsc_debug *d;
char resn[32];
- int err;
int i;
d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL);
@@ -496,30 +429,15 @@ static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
d->type = type;
sprintf(resn, "0x%x", rsn);
d->root = debugfs_create_dir(resn, root);
- if (!d->root) {
- err = -ENOMEM;
- goto out_free;
- }
for (i = 0; i < nfile; i++) {
d->fields[i].i = i;
- d->fields[i].dent = debugfs_create_file(field[i], 0400,
- d->root, &d->fields[i],
- &fops);
- if (!d->fields[i].dent) {
- err = -ENOMEM;
- goto out_rem;
- }
+ debugfs_create_file(field[i], 0400, d->root, &d->fields[i],
+ &fops);
}
*dbg = d;
return 0;
-out_rem:
- debugfs_remove_recursive(d->root);
-
-out_free:
- kfree(d);
- return err;
}
static void rem_res_tree(struct mlx5_rsc_debug *d)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
new file mode 100644
index 000000000000..1177860a2ee4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_EN_REP_TP_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_EN_REP_TP_
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+#include "en_rep.h"
+
+TRACE_EVENT(mlx5e_rep_neigh_update,
+ TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, const u8 *ha,
+ bool neigh_connected),
+ TP_ARGS(nhe, ha, neigh_connected),
+ TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name)
+ __array(u8, ha, ETH_ALEN)
+ __array(u8, v4, 4)
+ __array(u8, v6, 16)
+ __field(bool, neigh_connected)
+ ),
+ TP_fast_assign(const struct mlx5e_neigh *mn = &nhe->m_neigh;
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __assign_str(devname, mn->dev->name);
+ __entry->neigh_connected = neigh_connected;
+ memcpy(__entry->ha, ha, ETH_ALEN);
+
+ p32 = (__be32 *)__entry->v4;
+ pin6 = (struct in6_addr *)__entry->v6;
+ if (mn->family == AF_INET) {
+ *p32 = mn->dst_ip.v4;
+ ipv6_addr_set_v4mapped(*p32, pin6);
+ } else if (mn->family == AF_INET6) {
+ *pin6 = mn->dst_ip.v6;
+ }
+ ),
+ TP_printk("netdev: %s MAC: %pM IPv4: %pI4 IPv6: %pI6c neigh_connected=%d\n",
+ __get_str(devname), __entry->ha,
+ __entry->v4, __entry->v6, __entry->neigh_connected
+ )
+);
+
+#endif /* _MLX5_EN_REP_TP_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ./diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE en_rep_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c
new file mode 100644
index 000000000000..c5dc6c50fa87
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#define CREATE_TRACE_POINTS
+#include "en_tc_tracepoint.h"
+
+void put_ids_to_array(int *ids,
+ const struct flow_action_entry *entries,
+ unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ ids[i] = entries[i].id;
+}
+
+#define NAME_SIZE 16
+
+static const char FLOWACT2STR[NUM_FLOW_ACTIONS][NAME_SIZE] = {
+ [FLOW_ACTION_ACCEPT] = "ACCEPT",
+ [FLOW_ACTION_DROP] = "DROP",
+ [FLOW_ACTION_TRAP] = "TRAP",
+ [FLOW_ACTION_GOTO] = "GOTO",
+ [FLOW_ACTION_REDIRECT] = "REDIRECT",
+ [FLOW_ACTION_MIRRED] = "MIRRED",
+ [FLOW_ACTION_VLAN_PUSH] = "VLAN_PUSH",
+ [FLOW_ACTION_VLAN_POP] = "VLAN_POP",
+ [FLOW_ACTION_VLAN_MANGLE] = "VLAN_MANGLE",
+ [FLOW_ACTION_TUNNEL_ENCAP] = "TUNNEL_ENCAP",
+ [FLOW_ACTION_TUNNEL_DECAP] = "TUNNEL_DECAP",
+ [FLOW_ACTION_MANGLE] = "MANGLE",
+ [FLOW_ACTION_ADD] = "ADD",
+ [FLOW_ACTION_CSUM] = "CSUM",
+ [FLOW_ACTION_MARK] = "MARK",
+ [FLOW_ACTION_WAKE] = "WAKE",
+ [FLOW_ACTION_QUEUE] = "QUEUE",
+ [FLOW_ACTION_SAMPLE] = "SAMPLE",
+ [FLOW_ACTION_POLICE] = "POLICE",
+ [FLOW_ACTION_CT] = "CT",
+};
+
+const char *parse_action(struct trace_seq *p,
+ int *ids,
+ unsigned int num)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ unsigned int i;
+
+ for (i = 0; i < num; i++) {
+ if (ids[i] < NUM_FLOW_ACTIONS)
+ trace_seq_printf(p, "%s ", FLOWACT2STR[ids[i]]);
+ else
+ trace_seq_printf(p, "UNKNOWN ");
+ }
+
+ trace_seq_putc(p, 0);
+ return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
new file mode 100644
index 000000000000..d4e6cfaaade3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_TC_TP_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_TC_TP_
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+#include <net/flow_offload.h>
+#include "en_rep.h"
+
+#define __parse_action(ids, num) parse_action(p, ids, num)
+
+void put_ids_to_array(int *ids,
+ const struct flow_action_entry *entries,
+ unsigned int num);
+
+const char *parse_action(struct trace_seq *p,
+ int *ids,
+ unsigned int num);
+
+DECLARE_EVENT_CLASS(mlx5e_flower_template,
+ TP_PROTO(const struct flow_cls_offload *f),
+ TP_ARGS(f),
+ TP_STRUCT__entry(__field(void *, cookie)
+ __field(unsigned int, num)
+ __dynamic_array(int, ids, f->rule ?
+ f->rule->action.num_entries : 0)
+ ),
+ TP_fast_assign(__entry->cookie = (void *)f->cookie;
+ __entry->num = (f->rule ?
+ f->rule->action.num_entries : 0);
+ if (__entry->num)
+ put_ids_to_array(__get_dynamic_array(ids),
+ f->rule->action.entries,
+ f->rule->action.num_entries);
+ ),
+ TP_printk("cookie=%p actions= %s\n",
+ __entry->cookie, __entry->num ?
+ __parse_action(__get_dynamic_array(ids),
+ __entry->num) : "NULL"
+ )
+);
+
+DEFINE_EVENT(mlx5e_flower_template, mlx5e_configure_flower,
+ TP_PROTO(const struct flow_cls_offload *f),
+ TP_ARGS(f)
+ );
+
+DEFINE_EVENT(mlx5e_flower_template, mlx5e_delete_flower,
+ TP_PROTO(const struct flow_cls_offload *f),
+ TP_ARGS(f)
+ );
+
+TRACE_EVENT(mlx5e_stats_flower,
+ TP_PROTO(const struct flow_cls_offload *f),
+ TP_ARGS(f),
+ TP_STRUCT__entry(__field(void *, cookie)
+ __field(u64, bytes)
+ __field(u64, packets)
+ __field(u64, lastused)
+ ),
+ TP_fast_assign(__entry->cookie = (void *)f->cookie;
+ __entry->bytes = f->stats.bytes;
+ __entry->packets = f->stats.pkts;
+ __entry->lastused = f->stats.lastused;
+ ),
+ TP_printk("cookie=%p bytes=%llu packets=%llu lastused=%llu\n",
+ __entry->cookie, __entry->bytes,
+ __entry->packets, __entry->lastused
+ )
+);
+
+TRACE_EVENT(mlx5e_tc_update_neigh_used_value,
+ TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, bool neigh_used),
+ TP_ARGS(nhe, neigh_used),
+ TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name)
+ __array(u8, v4, 4)
+ __array(u8, v6, 16)
+ __field(bool, neigh_used)
+ ),
+ TP_fast_assign(const struct mlx5e_neigh *mn = &nhe->m_neigh;
+ struct in6_addr *pin6;
+ __be32 *p32;
+
+ __assign_str(devname, mn->dev->name);
+ __entry->neigh_used = neigh_used;
+
+ p32 = (__be32 *)__entry->v4;
+ pin6 = (struct in6_addr *)__entry->v6;
+ if (mn->family == AF_INET) {
+ *p32 = mn->dst_ip.v4;
+ ipv6_addr_set_v4mapped(*p32, pin6);
+ } else if (mn->family == AF_INET6) {
+ *pin6 = mn->dst_ip.v6;
+ }
+ ),
+ TP_printk("netdev: %s IPv4: %pI4 IPv6: %pI6c neigh_used=%d\n",
+ __get_str(devname), __entry->v4, __entry->v6,
+ __entry->neigh_used
+ )
+);
+
+#endif /* _MLX5_TC_TP_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ./diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE en_tc_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 8a4930c8bf62..2011eaf15cc5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -546,7 +546,7 @@ static void mlx5_fw_tracer_save_trace(struct mlx5_fw_tracer *tracer,
trace_data->timestamp = timestamp;
trace_data->lost = lost;
trace_data->event_id = event_id;
- strncpy(trace_data->msg, msg, TRACE_STR_MSG);
+ strscpy_pad(trace_data->msg, msg, TRACE_STR_MSG);
tracer->st_arr.saved_traces_index =
(tracer->st_arr.saved_traces_index + 1) & (SAVED_TRACES_NUM - 1);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 65bec19a438f..8d76452cacdc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -54,6 +54,7 @@
#include "mlx5_core.h"
#include "en_stats.h"
#include "en/fs.h"
+#include "lib/hv_vhca.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
@@ -162,6 +163,14 @@ enum mlx5e_rq_group {
#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
};
+static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
+{
+ if (mlx5_lag_is_lacp_owner(mdev))
+ return 1;
+
+ return clamp_t(u8, MLX5_CAP_GEN(mdev, num_lag_ports), 1, MLX5_MAX_PORTS);
+}
+
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
{
switch (wq_type) {
@@ -300,6 +309,7 @@ struct mlx5e_dcbx_dp {
enum {
MLX5E_RQ_STATE_ENABLED,
+ MLX5E_RQ_STATE_RECOVERING,
MLX5E_RQ_STATE_AM,
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
@@ -356,6 +366,7 @@ enum {
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS,
+ MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
};
struct mlx5e_sq_wqe_info {
@@ -480,8 +491,6 @@ struct mlx5e_xdp_mpwqe {
struct mlx5e_tx_wqe *wqe;
u8 ds_count;
u8 pkt_count;
- u8 max_ds_count;
- u8 complete;
u8 inline_on;
};
@@ -552,6 +561,8 @@ struct mlx5e_icosq {
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
+
+ struct work_struct recover_work;
} ____cacheline_aligned_in_smp;
struct mlx5e_wqe_frag_info {
@@ -671,6 +682,8 @@ struct mlx5e_rq {
struct zero_copy_allocator zca;
struct xdp_umem *umem;
+ struct work_struct recover_work;
+
/* control */
struct mlx5_wq_ctrl wq_ctrl;
__be32 mkey_be;
@@ -700,6 +713,7 @@ struct mlx5e_channel {
struct net_device *netdev;
__be32 mkey_be;
u8 num_tc;
+ u8 lag_port;
/* XDP_REDIRECT */
struct mlx5e_xdpsq xdpsq;
@@ -778,6 +792,15 @@ struct mlx5e_modify_sq_param {
int rl_index;
};
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
+struct mlx5e_hv_vhca_stats_agent {
+ struct mlx5_hv_vhca_agent *agent;
+ struct delayed_work work;
+ u16 delay;
+ void *buf;
+};
+#endif
+
struct mlx5e_xsk {
/* UMEMs are stored separately from channels, because we don't want to
* lose them when channels are recreated. The kernel also stores UMEMs,
@@ -804,7 +827,7 @@ struct mlx5e_priv {
struct mlx5e_rq drop_rq;
struct mlx5e_channels channels;
- u32 tisn[MLX5E_MAX_NUM_TC];
+ u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
struct mlx5e_rqt indir_rqt;
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
@@ -847,7 +870,11 @@ struct mlx5e_priv {
struct mlx5e_tls *tls;
#endif
struct devlink_health_reporter *tx_reporter;
+ struct devlink_health_reporter *rx_reporter;
struct mlx5e_xsk xsk;
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
+ struct mlx5e_hv_vhca_stats_agent stats_agent;
+#endif
};
struct mlx5e_profile {
@@ -888,6 +915,26 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
+static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
+{
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
+ default:
+ return mlx5_wq_cyc_get_size(&rq->wqe.wq);
+ }
+}
+
+static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
+{
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return rq->mpwqe.wq.cur_sz;
+ default:
+ return rq->wqe.wq.cur_sz;
+ }
+}
+
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
@@ -1006,18 +1053,18 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
+int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
+void mlx5e_activate_rq(struct mlx5e_rq *rq);
+void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
+void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
+void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p);
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_tx_disable_queue(struct netdev_queue *txq);
-static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
-{
- return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) &&
- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
-}
-
static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_ETH(mdev, swp) &&
@@ -1063,6 +1110,7 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
int mlx5e_create_tises(struct mlx5e_priv *priv);
+void mlx5e_destroy_tises(struct mlx5e_priv *priv);
int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
void mlx5e_update_carrier(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
@@ -1135,7 +1183,6 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
u16 num_channels);
-u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index be5961ff24cc..68d593074f6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -10,11 +10,14 @@ enum {
};
struct mlx5e_tc_table {
+ /* protects flow table */
+ struct mutex t_lock;
struct mlx5_flow_table *t;
struct rhashtable ht;
- DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ struct mod_hdr_tbl mod_hdr;
+ struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
DECLARE_HASHTABLE(hairpin_tbl, 8);
struct notifier_block netdevice_nb;
@@ -92,9 +95,15 @@ struct mlx5e_tirc_config {
enum mlx5e_tunnel_types {
MLX5E_TT_IPV4_GRE,
MLX5E_TT_IPV6_GRE,
+ MLX5E_TT_IPV4_IPIP,
+ MLX5E_TT_IPV6_IPIP,
+ MLX5E_TT_IPV4_IPV6,
+ MLX5E_TT_IPV6_IPV6,
MLX5E_NUM_TUNNEL_TT,
};
+bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev);
+
/* L3/L4 traffic type classifier */
struct mlx5e_ttc_table {
struct mlx5e_flow_table ft;
@@ -132,12 +141,17 @@ struct mlx5e_ethtool_steering {
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
-int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
-int mlx5e_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *info, u32 *rule_locs);
+int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
+int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs);
#else
static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
+static inline int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{ return -EOPNOTSUPP; }
+static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_EN_RXNFC */
#ifdef CONFIG_MLX5_EN_ARFS
@@ -224,5 +238,8 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type);
+bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev);
+
#endif /* __MLX5E_FLOW_STEER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
new file mode 100644
index 000000000000..1d6b58860da6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Mellanox Technologies.
+
+#include "health.h"
+#include "lib/eq.h"
+
+int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = cq->channel->priv;
+ u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {};
+ u8 hw_status;
+ void *cqc;
+ int err;
+
+ err = mlx5_core_query_cq(priv->mdev, &cq->mcq, out, sizeof(out));
+ if (err)
+ return err;
+
+ cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context);
+ hw_status = MLX5_GET(cqc, cqc, status);
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "CQ");
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "HW status", hw_status);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
+{
+ u8 cq_log_stride;
+ u32 cq_sz;
+ int err;
+
+ cq_sz = mlx5_cqwq_get_size(&cq->wq);
+ cq_log_stride = mlx5_cqwq_get_log_stride_size(&cq->wq);
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "CQ");
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "stride size", BIT(cq_log_stride));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "size", cq_sz);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mlx5e_health_create_reporters(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_reporter_tx_create(priv);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_rx_create(priv);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv)
+{
+ mlx5e_reporter_rx_destroy(priv);
+ mlx5e_reporter_tx_destroy(priv);
+}
+
+void mlx5e_health_channels_update(struct mlx5e_priv *priv)
+{
+ if (priv->tx_reporter)
+ devlink_health_reporter_state_update(priv->tx_reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+ if (priv->rx_reporter)
+ devlink_health_reporter_state_update(priv->rx_reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+}
+
+int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn)
+{
+ struct mlx5_core_dev *mdev = channel->mdev;
+ struct net_device *dev = channel->netdev;
+ struct mlx5e_modify_sq_param msp = {};
+ int err;
+
+ msp.curr_state = MLX5_SQC_STATE_ERR;
+ msp.next_state = MLX5_SQC_STATE_RST;
+
+ err = mlx5e_modify_sq(mdev, sqn, &msp);
+ if (err) {
+ netdev_err(dev, "Failed to move sq 0x%x to reset\n", sqn);
+ return err;
+ }
+
+ memset(&msp, 0, sizeof(msp));
+ msp.curr_state = MLX5_SQC_STATE_RST;
+ msp.next_state = MLX5_SQC_STATE_RDY;
+
+ err = mlx5e_modify_sq(mdev, sqn, &msp);
+ if (err) {
+ netdev_err(dev, "Failed to move sq 0x%x to ready\n", sqn);
+ return err;
+ }
+
+ return 0;
+}
+
+int mlx5e_health_recover_channels(struct mlx5e_priv *priv)
+{
+ int err = 0;
+
+ rtnl_lock();
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
+ err = mlx5e_safe_reopen_channels(priv);
+
+out:
+ mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
+
+ return err;
+}
+
+int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel)
+{
+ u32 eqe_count;
+
+ netdev_err(channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
+ eq->core.eqn, eq->core.cons_index, eq->core.irqn);
+
+ eqe_count = mlx5_eq_poll_irq_disabled(eq);
+ if (!eqe_count)
+ return -EIO;
+
+ netdev_err(channel->netdev, "Recovered %d eqes on EQ 0x%x\n",
+ eqe_count, eq->core.eqn);
+
+ channel->stats->eq_rearm++;
+ return 0;
+}
+
+int mlx5e_health_report(struct mlx5e_priv *priv,
+ struct devlink_health_reporter *reporter, char *err_str,
+ struct mlx5e_err_ctx *err_ctx)
+{
+ if (!reporter) {
+ netdev_err(priv->netdev, err_str);
+ return err_ctx->recover(&err_ctx->ctx);
+ }
+ return devlink_health_report(reporter, err_str, err_ctx);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
new file mode 100644
index 000000000000..d3693fa547ac
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5E_EN_HEALTH_H
+#define __MLX5E_EN_HEALTH_H
+
+#include "en.h"
+
+#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
+
+static inline bool cqe_syndrome_needs_recover(u8 syndrome)
+{
+ return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR ||
+ syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
+ syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR ||
+ syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+}
+
+int mlx5e_reporter_tx_create(struct mlx5e_priv *priv);
+void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv);
+void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq);
+int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq);
+
+int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name);
+int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg);
+
+int mlx5e_reporter_rx_create(struct mlx5e_priv *priv);
+void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv);
+void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq);
+void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
+void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
+
+#define MLX5E_REPORTER_PER_Q_MAX_LEN 256
+
+struct mlx5e_err_ctx {
+ int (*recover)(void *ctx);
+ void *ctx;
+};
+
+int mlx5e_health_sq_to_ready(struct mlx5e_channel *channel, u32 sqn);
+int mlx5e_health_channel_eq_recover(struct mlx5_eq_comp *eq, struct mlx5e_channel *channel);
+int mlx5e_health_recover_channels(struct mlx5e_priv *priv);
+int mlx5e_health_report(struct mlx5e_priv *priv,
+ struct devlink_health_reporter *reporter, char *err_str,
+ struct mlx5e_err_ctx *err_ctx);
+int mlx5e_health_create_reporters(struct mlx5e_priv *priv);
+void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv);
+void mlx5e_health_channels_update(struct mlx5e_priv *priv);
+
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
new file mode 100644
index 000000000000..c37b4acd9bd5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2018 Mellanox Technologies
+
+#include "en.h"
+#include "en/hv_vhca_stats.h"
+#include "lib/hv_vhca.h"
+#include "lib/hv.h"
+
+struct mlx5e_hv_vhca_per_ring_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+};
+
+static void
+mlx5e_hv_vhca_fill_ring_stats(struct mlx5e_priv *priv, int ch,
+ struct mlx5e_hv_vhca_per_ring_stats *data)
+{
+ struct mlx5e_channel_stats *stats;
+ int tc;
+
+ stats = &priv->channel_stats[ch];
+ data->rx_packets = stats->rq.packets;
+ data->rx_bytes = stats->rq.bytes;
+
+ for (tc = 0; tc < priv->max_opened_tc; tc++) {
+ data->tx_packets += stats->sq[tc].packets;
+ data->tx_bytes += stats->sq[tc].bytes;
+ }
+}
+
+static void mlx5e_hv_vhca_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int buf_len)
+{
+ int ch, i = 0;
+
+ for (ch = 0; ch < priv->max_nch; ch++) {
+ u64 *buf = data + i;
+
+ if (WARN_ON_ONCE(buf +
+ sizeof(struct mlx5e_hv_vhca_per_ring_stats) >
+ data + buf_len))
+ return;
+
+ mlx5e_hv_vhca_fill_ring_stats(priv, ch,
+ (struct mlx5e_hv_vhca_per_ring_stats *)buf);
+ i += sizeof(struct mlx5e_hv_vhca_per_ring_stats) / sizeof(u64);
+ }
+}
+
+static int mlx5e_hv_vhca_stats_buf_size(struct mlx5e_priv *priv)
+{
+ return (sizeof(struct mlx5e_hv_vhca_per_ring_stats) *
+ priv->max_nch);
+}
+
+static void mlx5e_hv_vhca_stats_work(struct work_struct *work)
+{
+ struct mlx5e_hv_vhca_stats_agent *sagent;
+ struct mlx5_hv_vhca_agent *agent;
+ struct delayed_work *dwork;
+ struct mlx5e_priv *priv;
+ int buf_len, rc;
+ void *buf;
+
+ dwork = to_delayed_work(work);
+ sagent = container_of(dwork, struct mlx5e_hv_vhca_stats_agent, work);
+ priv = container_of(sagent, struct mlx5e_priv, stats_agent);
+ buf_len = mlx5e_hv_vhca_stats_buf_size(priv);
+ agent = sagent->agent;
+ buf = sagent->buf;
+
+ memset(buf, 0, buf_len);
+ mlx5e_hv_vhca_fill_stats(priv, buf, buf_len);
+
+ rc = mlx5_hv_vhca_agent_write(agent, buf, buf_len);
+ if (rc) {
+ mlx5_core_err(priv->mdev,
+ "%s: Failed to write stats, err = %d\n",
+ __func__, rc);
+ return;
+ }
+
+ if (sagent->delay)
+ queue_delayed_work(priv->wq, &sagent->work, sagent->delay);
+}
+
+enum {
+ MLX5_HV_VHCA_STATS_VERSION = 1,
+ MLX5_HV_VHCA_STATS_UPDATE_ONCE = 0xFFFF,
+};
+
+static void mlx5e_hv_vhca_stats_control(struct mlx5_hv_vhca_agent *agent,
+ struct mlx5_hv_vhca_control_block *block)
+{
+ struct mlx5e_hv_vhca_stats_agent *sagent;
+ struct mlx5e_priv *priv;
+
+ priv = mlx5_hv_vhca_agent_priv(agent);
+ sagent = &priv->stats_agent;
+
+ block->version = MLX5_HV_VHCA_STATS_VERSION;
+ block->rings = priv->max_nch;
+
+ if (!block->command) {
+ cancel_delayed_work_sync(&priv->stats_agent.work);
+ return;
+ }
+
+ sagent->delay = block->command == MLX5_HV_VHCA_STATS_UPDATE_ONCE ? 0 :
+ msecs_to_jiffies(block->command * 100);
+
+ queue_delayed_work(priv->wq, &sagent->work, sagent->delay);
+}
+
+static void mlx5e_hv_vhca_stats_cleanup(struct mlx5_hv_vhca_agent *agent)
+{
+ struct mlx5e_priv *priv = mlx5_hv_vhca_agent_priv(agent);
+
+ cancel_delayed_work_sync(&priv->stats_agent.work);
+}
+
+int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
+{
+ int buf_len = mlx5e_hv_vhca_stats_buf_size(priv);
+ struct mlx5_hv_vhca_agent *agent;
+
+ priv->stats_agent.buf = kvzalloc(buf_len, GFP_KERNEL);
+ if (!priv->stats_agent.buf)
+ return -ENOMEM;
+
+ agent = mlx5_hv_vhca_agent_create(priv->mdev->hv_vhca,
+ MLX5_HV_VHCA_AGENT_STATS,
+ mlx5e_hv_vhca_stats_control, NULL,
+ mlx5e_hv_vhca_stats_cleanup,
+ priv);
+
+ if (IS_ERR_OR_NULL(agent)) {
+ if (IS_ERR(agent))
+ netdev_warn(priv->netdev,
+ "Failed to create hv vhca stats agent, err = %ld\n",
+ PTR_ERR(agent));
+
+ kfree(priv->stats_agent.buf);
+ return IS_ERR_OR_NULL(agent);
+ }
+
+ priv->stats_agent.agent = agent;
+ INIT_DELAYED_WORK(&priv->stats_agent.work, mlx5e_hv_vhca_stats_work);
+
+ return 0;
+}
+
+void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv)
+{
+ if (IS_ERR_OR_NULL(priv->stats_agent.agent))
+ return;
+
+ mlx5_hv_vhca_agent_destroy(priv->stats_agent.agent);
+ kfree(priv->stats_agent.buf);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h
new file mode 100644
index 000000000000..664463faf77b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_STATS_VHCA_H__
+#define __MLX5_EN_STATS_VHCA_H__
+#include "en.h"
+
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
+
+int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv);
+void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv);
+
+#else
+
+static inline int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv)
+{
+}
+#endif
+
+#endif /* __MLX5_EN_STATS_VHCA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h
deleted file mode 100644
index e78e92753d73..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#ifndef __MLX5E_EN_REPORTER_H
-#define __MLX5E_EN_REPORTER_H
-
-#include <linux/mlx5/driver.h>
-#include "en.h"
-
-int mlx5e_tx_reporter_create(struct mlx5e_priv *priv);
-void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv);
-void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq);
-int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq);
-
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
new file mode 100644
index 000000000000..b860569d4247
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Mellanox Technologies.
+
+#include "health.h"
+#include "params.h"
+
+static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_rq_out);
+ void *out;
+ void *rqc;
+ int err;
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_rq(dev, rqn, out);
+ if (err)
+ goto out;
+
+ rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context);
+ *state = MLX5_GET(rqc, rqc, state);
+
+out:
+ kvfree(out);
+ return err;
+}
+
+static int mlx5e_wait_for_icosq_flush(struct mlx5e_icosq *icosq)
+{
+ unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
+
+ while (time_before(jiffies, exp_time)) {
+ if (icosq->cc == icosq->pc)
+ return 0;
+
+ msleep(20);
+ }
+
+ netdev_err(icosq->channel->netdev,
+ "Wait for ICOSQ 0x%x flush timeout (cc = 0x%x, pc = 0x%x)\n",
+ icosq->sqn, icosq->cc, icosq->pc);
+
+ return -ETIMEDOUT;
+}
+
+static void mlx5e_reset_icosq_cc_pc(struct mlx5e_icosq *icosq)
+{
+ WARN_ONCE(icosq->cc != icosq->pc, "ICOSQ 0x%x: cc (0x%x) != pc (0x%x)\n",
+ icosq->sqn, icosq->cc, icosq->pc);
+ icosq->cc = 0;
+ icosq->pc = 0;
+}
+
+static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
+{
+ struct mlx5_core_dev *mdev;
+ struct mlx5e_icosq *icosq;
+ struct net_device *dev;
+ struct mlx5e_rq *rq;
+ u8 state;
+ int err;
+
+ icosq = ctx;
+ rq = &icosq->channel->rq;
+ mdev = icosq->channel->mdev;
+ dev = icosq->channel->netdev;
+ err = mlx5_core_query_sq_state(mdev, icosq->sqn, &state);
+ if (err) {
+ netdev_err(dev, "Failed to query ICOSQ 0x%x state. err = %d\n",
+ icosq->sqn, err);
+ goto out;
+ }
+
+ if (state != MLX5_SQC_STATE_ERR)
+ goto out;
+
+ mlx5e_deactivate_rq(rq);
+ err = mlx5e_wait_for_icosq_flush(icosq);
+ if (err)
+ goto out;
+
+ mlx5e_deactivate_icosq(icosq);
+
+ /* At this point, both the rq and the icosq are disabled */
+
+ err = mlx5e_health_sq_to_ready(icosq->channel, icosq->sqn);
+ if (err)
+ goto out;
+
+ mlx5e_reset_icosq_cc_pc(icosq);
+ mlx5e_free_rx_descs(rq);
+ clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
+ mlx5e_activate_icosq(icosq);
+ mlx5e_activate_rq(rq);
+
+ rq->stats->recover++;
+ return 0;
+out:
+ clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
+ return err;
+}
+
+void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq)
+{
+ struct mlx5e_priv *priv = icosq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = icosq;
+ err_ctx.recover = mlx5e_rx_reporter_err_icosq_cqe_recover;
+ sprintf(err_str, "ERR CQE on ICOSQ: 0x%x", icosq->sqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
+static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
+{
+ struct net_device *dev = rq->netdev;
+ int err;
+
+ err = mlx5e_modify_rq_state(rq, curr_state, MLX5_RQC_STATE_RST);
+ if (err) {
+ netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
+ return err;
+ }
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err) {
+ netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
+{
+ struct mlx5_core_dev *mdev;
+ struct net_device *dev;
+ struct mlx5e_rq *rq;
+ u8 state;
+ int err;
+
+ rq = ctx;
+ mdev = rq->mdev;
+ dev = rq->netdev;
+ err = mlx5e_query_rq_state(mdev, rq->rqn, &state);
+ if (err) {
+ netdev_err(dev, "Failed to query RQ 0x%x state. err = %d\n",
+ rq->rqn, err);
+ goto out;
+ }
+
+ if (state != MLX5_RQC_STATE_ERR)
+ goto out;
+
+ mlx5e_deactivate_rq(rq);
+ mlx5e_free_rx_descs(rq);
+
+ err = mlx5e_rq_to_ready(rq, MLX5_RQC_STATE_ERR);
+ if (err)
+ goto out;
+
+ clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
+ mlx5e_activate_rq(rq);
+ rq->stats->recover++;
+ return 0;
+out:
+ clear_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state);
+ return err;
+}
+
+void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq)
+{
+ struct mlx5e_priv *priv = rq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = rq;
+ err_ctx.recover = mlx5e_rx_reporter_err_rq_cqe_recover;
+ sprintf(err_str, "ERR CQE on RQ: 0x%x", rq->rqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
+static int mlx5e_rx_reporter_timeout_recover(void *ctx)
+{
+ struct mlx5e_icosq *icosq;
+ struct mlx5_eq_comp *eq;
+ struct mlx5e_rq *rq;
+ int err;
+
+ rq = ctx;
+ icosq = &rq->channel->icosq;
+ eq = rq->cq.mcq.eq;
+ err = mlx5e_health_channel_eq_recover(eq, rq->channel);
+ if (err)
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
+
+ return err;
+}
+
+void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
+{
+ struct mlx5e_icosq *icosq = &rq->channel->icosq;
+ struct mlx5e_priv *priv = rq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {};
+
+ err_ctx.ctx = rq;
+ err_ctx.recover = mlx5e_rx_reporter_timeout_recover;
+ sprintf(err_str, "RX timeout on channel: %d, ICOSQ: 0x%x RQ: 0x%x, CQ: 0x%x\n",
+ icosq->channel->ix, icosq->sqn, rq->rqn, rq->cq.mcq.cqn);
+
+ mlx5e_health_report(priv, priv->rx_reporter, err_str, &err_ctx);
+}
+
+static int mlx5e_rx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
+{
+ return err_ctx->recover(err_ctx->ctx);
+}
+
+static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter,
+ void *context)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_err_ctx *err_ctx = context;
+
+ return err_ctx ? mlx5e_rx_reporter_recover_from_ctx(err_ctx) :
+ mlx5e_health_recover_channels(priv);
+}
+
+static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = rq->channel->priv;
+ struct mlx5e_params *params;
+ struct mlx5e_icosq *icosq;
+ u8 icosq_hw_state;
+ int wqes_sz;
+ u8 hw_state;
+ u16 wq_head;
+ int err;
+
+ params = &priv->channels.params;
+ icosq = &rq->channel->icosq;
+ err = mlx5e_query_rq_state(priv->mdev, rq->rqn, &hw_state);
+ if (err)
+ return err;
+
+ err = mlx5_core_query_sq_state(priv->mdev, icosq->sqn, &icosq_hw_state);
+ if (err)
+ return err;
+
+ wqes_sz = mlx5e_rqwq_get_cur_sz(rq);
+ wq_head = params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+ rq->mpwqe.wq.head : mlx5_wq_cyc_get_head(&rq->wqe.wq);
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->channel->ix);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "SW state", rq->state);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "cc", wq_head);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "ICOSQ HW state", icosq_hw_state);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_cq_diagnose(&rq->cq, fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg)
+{
+ struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
+ struct mlx5e_params *params = &priv->channels.params;
+ struct mlx5e_rq *generic_rq;
+ u32 rq_stride, rq_sz;
+ int i, err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ generic_rq = &priv->channels.c[0]->rq;
+ rq_sz = mlx5e_rqwq_get_size(generic_rq);
+ rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "Common config");
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "RQ");
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride);
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_cq_common_diagnose(&generic_rq->cq, fmsg);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
+ if (err)
+ goto unlock;
+
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
+
+ err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg);
+ if (err)
+ goto unlock;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ goto unlock;
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
+ .name = "rx",
+ .recover = mlx5e_rx_reporter_recover,
+ .diagnose = mlx5e_rx_reporter_diagnose,
+};
+
+#define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
+
+int mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
+{
+ struct devlink *devlink = priv_to_devlink(priv->mdev);
+ struct devlink_health_reporter *reporter;
+
+ reporter = devlink_health_reporter_create(devlink,
+ &mlx5_rx_reporter_ops,
+ MLX5E_REPORTER_RX_GRACEFUL_PERIOD,
+ true, priv);
+ if (IS_ERR(reporter)) {
+ netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
+ PTR_ERR(reporter));
+ return PTR_ERR(reporter);
+ }
+ priv->rx_reporter = reporter;
+ return 0;
+}
+
+void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv)
+{
+ if (!priv->rx_reporter)
+ return;
+
+ devlink_health_reporter_destroy(priv->rx_reporter);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index c7f86453c638..bfed558637c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -1,16 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019 Mellanox Technologies. */
-#include <net/devlink.h>
-#include "reporter.h"
-#include "lib/eq.h"
-
-#define MLX5E_TX_REPORTER_PER_SQ_MAX_LEN 256
-
-struct mlx5e_tx_err_ctx {
- int (*recover)(struct mlx5e_txqsq *sq);
- struct mlx5e_txqsq *sq;
-};
+#include "health.h"
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
{
@@ -40,41 +31,20 @@ static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
sq->pc = 0;
}
-static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state)
+static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
{
- struct mlx5_core_dev *mdev = sq->channel->mdev;
- struct net_device *dev = sq->channel->netdev;
- struct mlx5e_modify_sq_param msp = {0};
+ struct mlx5_core_dev *mdev;
+ struct net_device *dev;
+ struct mlx5e_txqsq *sq;
+ u8 state;
int err;
- msp.curr_state = curr_state;
- msp.next_state = MLX5_SQC_STATE_RST;
-
- err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
- if (err) {
- netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn);
- return err;
- }
-
- memset(&msp, 0, sizeof(msp));
- msp.curr_state = MLX5_SQC_STATE_RST;
- msp.next_state = MLX5_SQC_STATE_RDY;
-
- err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
- if (err) {
- netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn);
- return err;
- }
-
- return 0;
-}
+ sq = ctx;
+ mdev = sq->channel->mdev;
+ dev = sq->channel->netdev;
-static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
-{
- struct mlx5_core_dev *mdev = sq->channel->mdev;
- struct net_device *dev = sq->channel->netdev;
- u8 state;
- int err;
+ if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+ return 0;
err = mlx5_core_query_sq_state(mdev, sq->sqn, &state);
if (err) {
@@ -97,7 +67,7 @@ static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq)
* pending WQEs. SQ can safely reset the SQ.
*/
- err = mlx5e_sq_to_ready(sq, state);
+ err = mlx5e_health_sq_to_ready(sq->channel, sq->sqn);
if (err)
goto out;
@@ -112,115 +82,98 @@ out:
return err;
}
-static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter,
- char *err_str,
- struct mlx5e_tx_err_ctx *err_ctx)
+void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq)
{
- if (IS_ERR_OR_NULL(tx_reporter)) {
- netdev_err(err_ctx->sq->channel->netdev, err_str);
- return err_ctx->recover(err_ctx->sq);
- }
-
- return devlink_health_report(tx_reporter, err_str, err_ctx);
-}
+ struct mlx5e_priv *priv = sq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx = {0};
-void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq)
-{
- char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN];
- struct mlx5e_tx_err_ctx err_ctx = {0};
-
- err_ctx.sq = sq;
- err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
+ err_ctx.ctx = sq;
+ err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover;
sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn);
- mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str,
- &err_ctx);
+ mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
}
-static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq)
+static int mlx5e_tx_reporter_timeout_recover(void *ctx)
{
- struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
- u32 eqe_count;
-
- netdev_err(sq->channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
- eq->core.eqn, eq->core.cons_index, eq->core.irqn);
+ struct mlx5_eq_comp *eq;
+ struct mlx5e_txqsq *sq;
+ int err;
- eqe_count = mlx5_eq_poll_irq_disabled(eq);
- if (!eqe_count) {
+ sq = ctx;
+ eq = sq->cq.mcq.eq;
+ err = mlx5e_health_channel_eq_recover(eq, sq->channel);
+ if (err)
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- return -EIO;
- }
- netdev_err(sq->channel->netdev, "Recover %d eqes on EQ 0x%x\n",
- eqe_count, eq->core.eqn);
- sq->channel->stats->eq_rearm++;
- return 0;
+ return err;
}
-int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq)
+int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
{
- char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN];
- struct mlx5e_tx_err_ctx err_ctx;
+ struct mlx5e_priv *priv = sq->channel->priv;
+ char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+ struct mlx5e_err_ctx err_ctx;
- err_ctx.sq = sq;
- err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
+ err_ctx.ctx = sq;
+ err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
sprintf(err_str,
"TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
jiffies_to_usecs(jiffies - sq->txq->trans_start));
- return mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str,
- &err_ctx);
+ return mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
}
/* state lock cannot be grabbed within this function.
* It can cause a dead lock or a read-after-free.
*/
-static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
-{
- return err_ctx->recover(err_ctx->sq);
-}
-
-static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
+static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_err_ctx *err_ctx)
{
- int err = 0;
-
- rtnl_lock();
- mutex_lock(&priv->state_lock);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto out;
-
- err = mlx5e_safe_reopen_channels(priv);
-
-out:
- mutex_unlock(&priv->state_lock);
- rtnl_unlock();
-
- return err;
+ return err_ctx->recover(err_ctx->ctx);
}
static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
void *context)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- struct mlx5e_tx_err_ctx *err_ctx = context;
+ struct mlx5e_err_ctx *err_ctx = context;
return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) :
- mlx5e_tx_reporter_recover_all(priv);
+ mlx5e_health_recover_channels(priv);
}
static int
mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
- u32 sqn, u8 state, bool stopped)
+ struct mlx5e_txqsq *sq, int tc)
{
+ struct mlx5e_priv *priv = sq->channel->priv;
+ bool stopped = netif_xmit_stopped(sq->txq);
+ u8 state;
int err;
+ err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
+ if (err)
+ return err;
+
err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sqn);
+ err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
if (err)
return err;
@@ -232,6 +185,18 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
if (err)
return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc);
+ if (err)
+ return err;
+
+ err = mlx5e_reporter_cq_diagnose(&sq->cq, fmsg);
+ if (err)
+ return err;
+
err = devlink_fmsg_obj_nest_end(fmsg);
if (err)
return err;
@@ -243,31 +208,61 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
- int i, err = 0;
+ struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
+ u32 sq_stride, sq_sz;
+
+ int i, tc, err = 0;
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
+ sq_sz = mlx5_wq_cyc_get_size(&generic_sq->wq);
+ sq_stride = MLX5_SEND_WQE_BB;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "Common Config");
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_start(fmsg, "SQ");
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride);
+ if (err)
+ goto unlock;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_cq_common_diagnose(&generic_sq->cq, fmsg);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ goto unlock;
+
+ err = mlx5e_reporter_named_obj_nest_end(fmsg);
+ if (err)
+ goto unlock;
+
err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
if (err)
goto unlock;
- for (i = 0; i < priv->channels.num * priv->channels.params.num_tc;
- i++) {
- struct mlx5e_txqsq *sq = priv->txq2sq[i];
- u8 state;
+ for (i = 0; i < priv->channels.num; i++) {
+ struct mlx5e_channel *c = priv->channels.c[i];
- err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
- if (err)
- goto unlock;
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
+ struct mlx5e_txqsq *sq = &c->sq[tc];
- err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq->sqn,
- state,
- netif_xmit_stopped(sq->txq));
- if (err)
- goto unlock;
+ err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc);
+ if (err)
+ goto unlock;
+ }
}
err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err)
@@ -286,25 +281,30 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
#define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
-int mlx5e_tx_reporter_create(struct mlx5e_priv *priv)
+int mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
{
+ struct devlink_health_reporter *reporter;
struct mlx5_core_dev *mdev = priv->mdev;
- struct devlink *devlink = priv_to_devlink(mdev);
+ struct devlink *devlink;
- priv->tx_reporter =
+ devlink = priv_to_devlink(mdev);
+ reporter =
devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops,
MLX5_REPORTER_TX_GRACEFUL_PERIOD,
true, priv);
- if (IS_ERR(priv->tx_reporter))
+ if (IS_ERR(reporter)) {
netdev_warn(priv->netdev,
"Failed to create tx reporter, err = %ld\n",
- PTR_ERR(priv->tx_reporter));
- return IS_ERR_OR_NULL(priv->tx_reporter);
+ PTR_ERR(reporter));
+ return PTR_ERR(reporter);
+ }
+ priv->tx_reporter = reporter;
+ return 0;
}
-void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv)
+void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv)
{
- if (IS_ERR_OR_NULL(priv->tx_reporter))
+ if (!priv->tx_reporter)
return;
devlink_health_reporter_destroy(priv->tx_reporter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index a6a52806be45..4c4620db3d31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -31,29 +31,36 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
real_dev = is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : dev;
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
- uplink_upper = netdev_master_upper_dev_get(uplink_dev);
+
+ rcu_read_lock();
+ uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
+ /* mlx5_lag_is_sriov() is a blocking function which can't be called
+ * while holding rcu read lock. Take the net_device for correctness
+ * sake.
+ */
+ if (uplink_upper)
+ dev_hold(uplink_upper);
+ rcu_read_unlock();
+
dst_is_lag_dev = (uplink_upper &&
netif_is_lag_master(uplink_upper) &&
real_dev == uplink_upper &&
mlx5_lag_is_sriov(priv->mdev));
+ if (uplink_upper)
+ dev_put(uplink_upper);
/* if the egress device isn't on the same HW e-switch or
* it's a LAG device, use the uplink
*/
+ *route_dev = dev;
if (!netdev_port_same_parent_id(priv->netdev, real_dev) ||
- dst_is_lag_dev) {
- *route_dev = dev;
+ dst_is_lag_dev || is_vlan_dev(*route_dev))
*out_dev = uplink_dev;
- } else {
- *route_dev = dev;
- if (is_vlan_dev(*route_dev))
- *out_dev = uplink_dev;
- else if (mlx5e_eswitch_rep(dev) &&
- mlx5e_is_valid_eswitch_fwd_dev(priv, dev))
- *out_dev = *route_dev;
- else
- return -EOPNOTSUPP;
- }
+ else if (mlx5e_eswitch_rep(dev) &&
+ mlx5e_is_valid_eswitch_fwd_dev(priv, dev))
+ *out_dev = *route_dev;
+ else
+ return -EOPNOTSUPP;
if (!(mlx5e_eswitch_rep(*out_dev) &&
mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index ddfe19adb3d9..87be96747902 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -6,7 +6,7 @@
#include "en.h"
-#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
+#define MLX5E_SQ_NOPS_ROOM (MLX5_SEND_WQE_MAX_WQEBBS - 1)
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
MLX5E_SQ_NOPS_ROOM)
@@ -117,9 +117,27 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
mlx5_write64((__be32 *)ctrl, uar_map);
}
-static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5e_tx_wqe *wqe)
+static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
{
- return !!wqe->ctrl.tisn;
+ return cseg && !!cseg->tisn;
+}
+
+static inline u8
+mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
+ struct sk_buff *skb)
+{
+ u8 mode;
+
+ if (mlx5e_transport_inline_tx_wqe(cseg))
+ return MLX5_INLINE_MODE_TCP_UDP;
+
+ mode = sq->min_inline_mode;
+
+ if (skb_vlan_tag_present(skb) &&
+ test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
+ mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
+
+ return mode;
}
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index b0b982cf69bb..1ed5c33e022f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -179,33 +179,19 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
struct mlx5_wq_cyc *wq = &sq->wq;
- u8 wqebbs;
- u16 pi;
-
- mlx5e_xdpsq_fetch_wqe(sq, &session->wqe);
-
- prefetchw(session->wqe->data);
- session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
- session->pkt_count = 0;
- session->complete = 0;
+ u16 pi, contig_wqebbs;
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
-/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
- * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
- * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
- * full-session WQE be cache-aligned.
- */
-#if L1_CACHE_BYTES < 128
-#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
-#else
-#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
-#endif
+ if (unlikely(contig_wqebbs < MLX5_SEND_WQE_MAX_WQEBBS))
+ mlx5e_fill_xdpsq_frag_edge(sq, wq, pi, contig_wqebbs);
- wqebbs = min_t(u16, mlx5_wq_cyc_get_contig_wqebbs(wq, pi),
- MLX5E_XDP_MPW_MAX_WQEBBS);
+ session->wqe = mlx5e_xdpsq_fetch_wqe(sq, &pi);
- session->max_ds_count = MLX5_SEND_WQEBB_NUM_DS * wqebbs;
+ prefetchw(session->wqe->data);
+ session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
+ session->pkt_count = 0;
mlx5e_xdp_update_inline_state(sq);
@@ -244,7 +230,7 @@ static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
{
if (unlikely(!sq->mpwqe.wqe)) {
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
- MLX5_SEND_WQE_MAX_WQEBBS))) {
+ MLX5E_XDPSQ_STOP_ROOM))) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
sq->stats->full++;
@@ -285,8 +271,8 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
- if (unlikely(session->complete ||
- session->ds_count == session->max_ds_count))
+ if (unlikely(mlx5e_xdp_no_room_for_inline_pkt(session) ||
+ session->ds_count == MLX5E_XDP_MPW_MAX_NUM_DS))
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index b90923932668..36ac1e3816b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -40,6 +40,26 @@
(sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
+#define MLX5E_XDPSQ_STOP_ROOM (MLX5E_SQ_STOP_ROOM)
+
+#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
+#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT \
+ DIV_ROUND_UP(MLX5E_XDP_INLINE_WQE_SZ_THRSD, MLX5_SEND_WQE_DS)
+
+/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
+ * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
+ * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
+ * full-session WQE be cache-aligned.
+ */
+#if L1_CACHE_BYTES < 128
+#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
+#else
+#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
+#endif
+
+#define MLX5E_XDP_MPW_MAX_NUM_DS \
+ (MLX5E_XDP_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
+
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
@@ -114,6 +134,30 @@ static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq)
session->inline_on = 1;
}
+static inline bool
+mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session)
+{
+ return session->inline_on &&
+ session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS;
+}
+
+static inline void
+mlx5e_fill_xdpsq_frag_edge(struct mlx5e_xdpsq *sq, struct mlx5_wq_cyc *wq,
+ u16 pi, u16 nnops)
+{
+ struct mlx5e_xdp_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
+
+ edge_wi = wi + nnops;
+ /* fill sq frag edge with nops to avoid wqe wrapping two pages */
+ for (; wi < edge_wi; wi++) {
+ wi->num_wqebbs = 1;
+ wi->num_pkts = 0;
+ mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ }
+
+ sq->stats->nops += nnops;
+}
+
static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_xmit_data *xdptxd,
@@ -126,20 +170,12 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
session->pkt_count++;
-#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
-
if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
struct mlx5_wqe_inline_seg *inline_dseg =
(struct mlx5_wqe_inline_seg *)dseg;
u16 ds_len = sizeof(*inline_dseg) + dma_len;
u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS);
- if (unlikely(session->ds_count + ds_cnt > session->max_ds_count)) {
- /* Not enough space for inline wqe, send with memory pointer */
- session->complete = true;
- goto no_inline;
- }
-
inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG);
memcpy(inline_dseg->data, xdptxd->data, dma_len);
@@ -148,21 +184,23 @@ mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
return;
}
-no_inline:
dseg->addr = cpu_to_be64(xdptxd->dma_addr);
dseg->byte_count = cpu_to_be32(dma_len);
dseg->lkey = sq->mkey_be;
session->ds_count++;
}
-static inline void mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq,
- struct mlx5e_tx_wqe **wqe)
+static inline struct mlx5e_tx_wqe *
+mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, u16 *pi)
{
struct mlx5_wq_cyc *wq = &sq->wq;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ struct mlx5e_tx_wqe *wqe;
+
+ *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
+ memset(wqe, 0, sizeof(*wqe));
- *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
- memset(*wqe, 0, sizeof(**wqe));
+ return wqe;
}
static inline void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 7f78c004d12f..d360750b25b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -60,24 +60,28 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
struct mlx5e_channel *c)
{
- struct mlx5e_channel_param cparam = {};
+ struct mlx5e_channel_param *cparam;
struct dim_cq_moder icocq_moder = {};
int err;
if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev))
return -EINVAL;
- mlx5e_build_xsk_cparam(priv, params, xsk, &cparam);
+ cparam = kvzalloc(sizeof(*cparam), GFP_KERNEL);
+ if (!cparam)
+ return -ENOMEM;
- err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam.rx_cq, &c->xskrq.cq);
+ mlx5e_build_xsk_cparam(priv, params, xsk, cparam);
+
+ err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->xskrq.cq);
if (unlikely(err))
- return err;
+ goto err_free_cparam;
- err = mlx5e_open_rq(c, params, &cparam.rq, xsk, umem, &c->xskrq);
+ err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq);
if (unlikely(err))
goto err_close_rx_cq;
- err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam.tx_cq, &c->xsksq.cq);
+ err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xsksq.cq);
if (unlikely(err))
goto err_close_rq;
@@ -87,21 +91,23 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
* is disabled and then reenabled, but the SQ continues receiving CQEs
* from the old UMEM.
*/
- err = mlx5e_open_xdpsq(c, params, &cparam.xdp_sq, umem, &c->xsksq, true);
+ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true);
if (unlikely(err))
goto err_close_tx_cq;
- err = mlx5e_open_cq(c, icocq_moder, &cparam.icosq_cq, &c->xskicosq.cq);
+ err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->xskicosq.cq);
if (unlikely(err))
goto err_close_sq;
/* Create a dedicated SQ for posting NOPs whenever we need an IRQ to be
* triggered and NAPI to be called on the correct CPU.
*/
- err = mlx5e_open_icosq(c, params, &cparam.icosq, &c->xskicosq);
+ err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->xskicosq);
if (unlikely(err))
goto err_close_icocq;
+ kvfree(cparam);
+
spin_lock_init(&c->xskicosq_lock);
set_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
@@ -123,6 +129,9 @@ err_close_rq:
err_close_rx_cq:
mlx5e_close_cq(&c->xskrq.cq);
+err_free_cparam:
+ kvfree(cparam);
+
return err;
}
@@ -141,6 +150,7 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
void mlx5e_activate_xsk(struct mlx5e_channel *c)
{
+ mlx5e_activate_icosq(&c->xskicosq);
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
/* TX queue is created active. */
@@ -153,6 +163,7 @@ void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
{
mlx5e_deactivate_rq(&c->xskrq);
/* TX queue is disabled on close. */
+ mlx5e_deactivate_icosq(&c->xskicosq);
}
static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 35e188cf4ea4..fd2c75b4b519 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -26,6 +26,13 @@ int mlx5e_xsk_async_xmit(struct net_device *dev, u32 qid)
return -ENXIO;
if (!napi_if_scheduled_mark_missed(&c->napi)) {
+ /* To avoid WQE overrun, don't post a NOP if XSKICOSQ is not
+ * active and not polled by NAPI. Return 0, because the upcoming
+ * activate will trigger the IRQ for us.
+ */
+ if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state)))
+ return 0;
+
spin_lock(&c->xskicosq_lock);
mlx5e_trigger_irq(&c->xskicosq);
spin_unlock(&c->xskicosq_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 7833ddef0427..e5222d17df35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -408,7 +408,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
goto out;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(WARN_ON_ONCE(tls_ctx->netdev != netdev)))
+ if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
goto err_out;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 1539cf3de5dc..f7890e0ce96c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -180,15 +180,3 @@ out:
return err;
}
-
-u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev)
-{
- u8 min_inline_mode;
-
- mlx5_query_min_inline(mdev, &min_inline_mode);
- if (min_inline_mode == MLX5_INLINE_MODE_NONE &&
- !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
- min_inline_mode = MLX5_INLINE_MODE_L2;
-
- return min_inline_mode;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 8dd31b5c740c..01f2918063af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1101,7 +1101,7 @@ void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv,
struct mlx5e_params *params)
{
- params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev);
+ mlx5_query_min_inline(priv->mdev, &params->tx_min_inline_mode);
if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP &&
params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 20e628c907e5..c5a9c20d7f00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1431,7 +1431,7 @@ static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev)
return ret;
}
-static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode)
+static __u32 mlx5e_reformat_wol_mode_mlx5_to_linux(u8 mode)
{
__u32 ret = 0;
@@ -1459,7 +1459,7 @@ static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode)
return ret;
}
-static u8 mlx5e_refomrat_wol_mode_linux_to_mlx5(__u32 mode)
+static u8 mlx5e_reformat_wol_mode_linux_to_mlx5(__u32 mode)
{
u8 ret = 0;
@@ -1505,7 +1505,7 @@ static void mlx5e_get_wol(struct net_device *netdev,
if (err)
return;
- wol->wolopts = mlx5e_refomrat_wol_mode_mlx5_to_linux(mlx5_wol_mode);
+ wol->wolopts = mlx5e_reformat_wol_mode_mlx5_to_linux(mlx5_wol_mode);
}
static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -1521,7 +1521,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~wol_supported)
return -EINVAL;
- mlx5_wol_mode = mlx5e_refomrat_wol_mode_linux_to_mlx5(wol->wolopts);
+ mlx5_wol_mode = mlx5e_reformat_wol_mode_linux_to_mlx5(wol->wolopts);
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
@@ -1958,21 +1958,27 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags;
}
-#ifndef CONFIG_MLX5_EN_RXNFC
-/* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS
- * otherwise this function will be defined from en_fs_ethtool.c
- */
static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- if (info->cmd != ETHTOOL_GRXRINGS)
- return -EOPNOTSUPP;
- /* ring_count is needed by ethtool -x */
- info->data = priv->channels.params.num_channels;
- return 0;
+ /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part
+ * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc,
+ * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc
+ * is compiled out via CONFIG_MLX5_EN_RXNFC=n.
+ */
+ if (info->cmd == ETHTOOL_GRXRINGS) {
+ info->data = priv->channels.params.num_channels;
+ return 0;
+ }
+
+ return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs);
+}
+
+static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ return mlx5e_ethtool_set_rxnfc(dev, cmd);
}
-#endif
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
@@ -1993,9 +1999,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
.get_rxnfc = mlx5e_get_rxnfc,
-#ifdef CONFIG_MLX5_EN_RXNFC
.set_rxnfc = mlx5e_set_rxnfc,
-#endif
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
.get_pauseparam = mlx5e_get_pauseparam,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 76cc10e44080..15b7f0f1427c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -747,8 +747,55 @@ static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
.etype = ETH_P_IPV6,
.proto = IPPROTO_GRE,
},
+ [MLX5E_TT_IPV4_IPIP] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_IPIP,
+ },
+ [MLX5E_TT_IPV6_IPIP] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_IPIP,
+ },
+ [MLX5E_TT_IPV4_IPV6] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_IPV6,
+ },
+ [MLX5E_TT_IPV6_IPV6] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_IPV6,
+ },
+
};
+bool mlx5e_tunnel_proto_supported(struct mlx5_core_dev *mdev, u8 proto_type)
+{
+ switch (proto_type) {
+ case IPPROTO_GRE:
+ return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ return MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip);
+ default:
+ return false;
+ }
+}
+
+bool mlx5e_any_tunnel_proto_supported(struct mlx5_core_dev *mdev)
+{
+ int tt;
+
+ for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
+ if (mlx5e_tunnel_proto_supported(mdev, ttc_tunnel_rules[tt].proto))
+ return true;
+ }
+ return false;
+}
+
+bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
+{
+ return (mlx5e_any_tunnel_proto_supported(mdev) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
+}
+
static u8 mlx5e_etype_to_ipv(u16 ethertype)
{
if (ethertype == ETH_P_IP)
@@ -838,6 +885,9 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = params->inner_ttc->ft.t;
for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
+ if (!mlx5e_tunnel_proto_supported(priv->mdev,
+ ttc_tunnel_rules[tt].proto))
+ continue;
rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_tunnel_rules[tt].etype,
ttc_tunnel_rules[tt].proto);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 94304abc49e9..eed7101e8bb7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -888,10 +888,10 @@ static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
return 0;
}
-int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
- int err = 0;
struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
@@ -911,16 +911,13 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err;
}
-int mlx5e_get_rxnfc(struct net_device *dev,
- struct ethtool_rxnfc *info, u32 *rule_locs)
+int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0;
switch (info->cmd) {
- case ETHTOOL_GRXRINGS:
- info->data = priv->channels.params.num_channels;
- break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = priv->fs.ethtool.tot_num_rules;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 9d5f6e56188f..dadadf221087 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -56,12 +56,13 @@
#include "en/xdp.h"
#include "lib/eq.h"
#include "en/monitor_stats.h"
-#include "en/reporter.h"
+#include "en/health.h"
#include "en/params.h"
#include "en/xsk/umem.h"
#include "en/xsk/setup.h"
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
+#include "en/hv_vhca_stats.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
@@ -247,26 +248,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
-static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
-{
- switch (rq->wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
- default:
- return mlx5_wq_cyc_get_size(&rq->wqe.wq);
- }
-}
-
-static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
-{
- switch (rq->wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return rq->mpwqe.wq.cur_sz;
- default:
- return rq->wqe.wq.cur_sz;
- }
-}
-
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
struct mlx5e_channel *c)
{
@@ -382,6 +363,13 @@ static void mlx5e_free_di_list(struct mlx5e_rq *rq)
kvfree(rq->wqe.di);
}
+static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
+{
+ struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work);
+
+ mlx5e_reporter_rq_cqe_err(rq);
+}
+
static int mlx5e_alloc_rq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
@@ -418,6 +406,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->stats = &c->priv->channel_stats[c->ix].xskrq;
else
rq->stats = &c->priv->channel_stats[c->ix].rq;
+ INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work);
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
if (IS_ERR(rq->xdp_prog)) {
@@ -720,8 +709,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
return err;
}
-static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
- int next_state)
+int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -829,10 +817,11 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
+ mlx5e_reporter_rx_timeout(rq);
return -ETIMEDOUT;
}
-static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
{
__be16 wqe_ix_be;
u16 wqe_ix;
@@ -911,7 +900,7 @@ err_free_rq:
return err;
}
-static void mlx5e_activate_rq(struct mlx5e_rq *rq)
+void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
mlx5e_trigger_irq(&rq->channel->icosq);
@@ -926,6 +915,8 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
cancel_work_sync(&rq->dim.work);
+ cancel_work_sync(&rq->channel->icosq.recover_work);
+ cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_free_rq(rq);
@@ -1042,6 +1033,14 @@ static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
return 0;
}
+static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work)
+{
+ struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq,
+ recover_work);
+
+ mlx5e_reporter_icosq_cqe_err(sq);
+}
+
static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
struct mlx5e_sq_param *param,
struct mlx5e_icosq *sq)
@@ -1064,6 +1063,8 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
+ INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work);
+
return 0;
err_sq_wq_destroy:
@@ -1130,6 +1131,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
sq->stop_room = MLX5E_SQ_STOP_ROOM;
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
+ if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
+ set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (mlx5_accel_is_tls_device(c->priv->mdev)) {
@@ -1377,7 +1380,7 @@ static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
recover_work);
- mlx5e_tx_reporter_err_cqe(sq);
+ mlx5e_reporter_tx_err_cqe(sq);
}
int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
@@ -1393,7 +1396,6 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = params->tx_min_inline_mode;
- set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
if (err)
goto err_free_icosq;
@@ -1407,12 +1409,22 @@ err_free_icosq:
return err;
}
-void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
{
- struct mlx5e_channel *c = sq->channel;
+ set_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
+}
- clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
+{
+ struct mlx5e_channel *c = icosq->channel;
+
+ clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
napi_synchronize(&c->napi);
+}
+
+void mlx5e_close_icosq(struct mlx5e_icosq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
mlx5e_destroy_sq(c->mdev, sq->sqn);
mlx5e_free_icosq(sq);
@@ -1430,7 +1442,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
return err;
csp.tis_lst_sz = 1;
- csp.tisn = c->priv->tisn[0]; /* tc = 0 */
+ csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
@@ -1680,7 +1692,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
for (tc = 0; tc < params->num_tc; tc++) {
int txq_ix = c->ix + tc * priv->max_nch;
- err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
+ err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
params, &cparam->sq, &c->sq[tc], tc);
if (err)
goto err_close_sqs;
@@ -1914,6 +1926,13 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
mlx5e_close_cq(&c->icosq.cq);
}
+static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
+{
+ u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id);
+
+ return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev);
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
@@ -1948,6 +1967,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
c->irq_desc = irq_to_desc(irq);
+ c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
err = mlx5e_alloc_xps_cpumask(c, params);
if (err)
@@ -1989,6 +2009,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
+ mlx5e_activate_icosq(&c->icosq);
mlx5e_activate_rq(&c->rq);
netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix);
@@ -2004,6 +2025,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
mlx5e_deactivate_xsk(c);
mlx5e_deactivate_rq(&c->rq);
+ mlx5e_deactivate_icosq(&c->icosq);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->sq[tc]);
}
@@ -2321,10 +2343,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
- if (!IS_ERR_OR_NULL(priv->tx_reporter))
- devlink_health_reporter_state_update(priv->tx_reporter,
- DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
-
+ mlx5e_health_channels_update(priv);
kvfree(cparam);
return 0;
@@ -3168,40 +3187,58 @@ void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
mlx5_core_destroy_tis(mdev, tisn);
}
+void mlx5e_destroy_tises(struct mlx5e_priv *priv)
+{
+ int tc, i;
+
+ for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++)
+ for (tc = 0; tc < priv->profile->max_tc; tc++)
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
+}
+
+static bool mlx5e_lag_should_assign_affinity(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_GEN(mdev, lag_tx_port_affinity) && mlx5e_get_num_lag_ports(mdev) > 1;
+}
+
int mlx5e_create_tises(struct mlx5e_priv *priv)
{
+ int tc, i;
int err;
- int tc;
- for (tc = 0; tc < priv->profile->max_tc; tc++) {
- u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
- void *tisc;
+ for (i = 0; i < mlx5e_get_num_lag_ports(priv->mdev); i++) {
+ for (tc = 0; tc < priv->profile->max_tc; tc++) {
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+ void *tisc;
- tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
- MLX5_SET(tisc, tisc, prio, tc << 1);
+ MLX5_SET(tisc, tisc, prio, tc << 1);
- err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[tc]);
- if (err)
- goto err_close_tises;
+ if (mlx5e_lag_should_assign_affinity(priv->mdev))
+ MLX5_SET(tisc, tisc, lag_tx_port_affinity, i + 1);
+
+ err = mlx5e_create_tis(priv->mdev, in, &priv->tisn[i][tc]);
+ if (err)
+ goto err_close_tises;
+ }
}
return 0;
err_close_tises:
- for (tc--; tc >= 0; tc--)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
+ for (; i >= 0; i--) {
+ for (tc--; tc >= 0; tc--)
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[i][tc]);
+ tc = priv->profile->max_tc;
+ }
return err;
}
static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
- int tc;
-
- mlx5e_tx_reporter_destroy(priv);
- for (tc = 0; tc < priv->profile->max_tc; tc++)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
+ mlx5e_destroy_tises(priv);
}
static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv,
@@ -3422,7 +3459,7 @@ out:
#ifdef CONFIG_MLX5_ESWITCH
static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct flow_cls_offload *cls_flower,
- int flags)
+ unsigned long flags)
{
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
@@ -3442,12 +3479,12 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
+ unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
struct mlx5e_priv *priv = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
- MLX5E_TC_NIC_OFFLOAD);
+ return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
default:
return -EOPNOTSUPP;
}
@@ -3463,11 +3500,15 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
switch (type) {
#ifdef CONFIG_MLX5_ESWITCH
- case TC_SETUP_BLOCK:
+ case TC_SETUP_BLOCK: {
+ struct flow_block_offload *f = type_data;
+
+ f->unlocked_driver_cb = true;
return flow_block_cb_setup_simple(type_data,
&mlx5e_block_cb_list,
mlx5e_setup_tc_block_cb,
priv, priv, true);
+ }
#endif
case TC_SETUP_QDISC_MQPRIO:
return mlx5e_setup_tc_mqprio(priv, type_data);
@@ -3640,7 +3681,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- if (!enable && mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD)) {
+ if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
@@ -3781,9 +3822,10 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
}
if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
- features &= ~NETIF_F_LRO;
- if (params->lro_en)
+ if (features & NETIF_F_LRO) {
netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
+ features &= ~NETIF_F_LRO;
+ }
}
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
@@ -3950,7 +3992,8 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
/* Disable CQE compression */
- netdev_warn(priv->netdev, "Disabling cqe compression");
+ if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
+ netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
if (err) {
netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
@@ -4202,6 +4245,8 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
switch (proto) {
case IPPROTO_GRE:
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
return features;
case IPPROTO_UDP:
udph = udp_hdr(skb);
@@ -4267,7 +4312,7 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
if (!netif_xmit_stopped(dev_queue))
continue;
- if (mlx5e_tx_reporter_timeout(sq))
+ if (mlx5e_reporter_tx_timeout(sq))
report_failed = true;
}
@@ -4768,7 +4813,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
/* TX inline */
- params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
+ mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
/* RSS */
mlx5e_build_rss_params(rss_params, params->num_channels);
@@ -4838,7 +4883,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
- MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
+ mlx5e_any_tunnel_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
netdev->hw_enc_features |= NETIF_F_TSO6;
@@ -4853,7 +4898,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
- if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
+ if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) {
netdev->hw_features |= NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_GRE |
@@ -4862,6 +4907,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM;
}
+ if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_IPIP)) {
+ netdev->hw_features |= NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6;
+ netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6;
+ netdev->gso_partial_features |= NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6;
+ }
+
netdev->hw_features |= NETIF_F_GSO_PARTIAL;
netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_features |= NETIF_F_GSO_UDP_L4;
@@ -4965,12 +5019,14 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_build_nic_netdev(netdev);
mlx5e_build_tc2txq_maps(priv);
+ mlx5e_health_create_reporters(priv);
return 0;
}
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
+ mlx5e_health_destroy_reporters(priv);
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
mlx5e_netdev_cleanup(priv->netdev, priv);
@@ -5073,7 +5129,6 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_initialize(priv);
#endif
- mlx5e_tx_reporter_create(priv);
return 0;
}
@@ -5097,6 +5152,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_init(priv);
+ mlx5e_hv_vhca_stats_create(priv);
if (netdev->reg_state != NETREG_REGISTERED)
return;
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -5129,6 +5185,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
queue_work(priv->wq, &priv->set_rx_mode_work);
+ mlx5e_hv_vhca_stats_destroy(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index d0684fdb69e1..1623cd32f303 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -46,6 +46,8 @@
#include "en/tc_tun.h"
#include "fs_core.h"
#include "lib/port_tun.h"
+#define CREATE_TRACE_POINTS
+#include "diag/en_rep_tracepoint.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
@@ -389,24 +391,17 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
.set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
};
-static int mlx5e_rep_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static void mlx5e_rep_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
- struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
u64 parent_id;
priv = netdev_priv(dev);
- esw = priv->mdev->priv.eswitch;
-
- if (esw->mode == MLX5_ESWITCH_NONE)
- return -EOPNOTSUPP;
parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
ppid->id_len = sizeof(parent_id);
memcpy(ppid->id, &parent_id, sizeof(parent_id));
-
- return 0;
}
static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
@@ -531,47 +526,97 @@ void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
neigh_update->min_interval);
}
+static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
+{
+ return refcount_inc_not_zero(&nhe->refcnt);
+}
+
+static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe);
+
+static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
+{
+ if (refcount_dec_and_test(&nhe->refcnt)) {
+ mlx5e_rep_neigh_entry_remove(nhe);
+ kfree_rcu(nhe, rcu);
+ }
+}
+
+static struct mlx5e_neigh_hash_entry *
+mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv,
+ struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_neigh_hash_entry *next = NULL;
+
+ rcu_read_lock();
+
+ for (next = nhe ?
+ list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
+ &nhe->neigh_list,
+ struct mlx5e_neigh_hash_entry,
+ neigh_list) :
+ list_first_or_null_rcu(&rpriv->neigh_update.neigh_list,
+ struct mlx5e_neigh_hash_entry,
+ neigh_list);
+ next;
+ next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
+ &next->neigh_list,
+ struct mlx5e_neigh_hash_entry,
+ neigh_list))
+ if (mlx5e_rep_neigh_entry_hold(next))
+ break;
+
+ rcu_read_unlock();
+
+ if (nhe)
+ mlx5e_rep_neigh_entry_release(nhe);
+
+ return next;
+}
+
static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
{
struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
neigh_update.neigh_stats_work.work);
struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_neigh_hash_entry *nhe;
+ struct mlx5e_neigh_hash_entry *nhe = NULL;
rtnl_lock();
if (!list_empty(&rpriv->neigh_update.neigh_list))
mlx5e_rep_queue_neigh_stats_work(priv);
- list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
+ while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL)
mlx5e_tc_update_neigh_used_value(nhe);
rtnl_unlock();
}
-static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
-{
- refcount_inc(&nhe->refcnt);
-}
-
-static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
-{
- if (refcount_dec_and_test(&nhe->refcnt))
- kfree(nhe);
-}
-
static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
bool neigh_connected,
unsigned char ha[ETH_ALEN])
{
struct ethhdr *eth = (struct ethhdr *)e->encap_header;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ bool encap_connected;
+ LIST_HEAD(flow_list);
ASSERT_RTNL();
+ /* wait for encap to be fully initialized */
+ wait_for_completion(&e->res_ready);
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
+ if (e->compl_result || (encap_connected == neigh_connected &&
+ ether_addr_equal(e->h_dest, ha)))
+ goto unlock;
+
+ mlx5e_take_all_encap_flows(e, &flow_list);
+
if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
(!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
- mlx5e_tc_encap_flows_del(priv, e);
+ mlx5e_tc_encap_flows_del(priv, e, &flow_list);
if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
ether_addr_copy(e->h_dest, ha);
@@ -581,8 +626,11 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
*/
ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
- mlx5e_tc_encap_flows_add(priv, e);
+ mlx5e_tc_encap_flows_add(priv, e, &flow_list);
}
+unlock:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ mlx5e_put_encap_flow_list(priv, &flow_list);
}
static void mlx5e_rep_neigh_update(struct work_struct *work)
@@ -594,7 +642,6 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
unsigned char ha[ETH_ALEN];
struct mlx5e_priv *priv;
bool neigh_connected;
- bool encap_connected;
u8 nud_state, dead;
rtnl_lock();
@@ -612,13 +659,15 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
neigh_connected = (nud_state & NUD_VALID) && !dead;
+ trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected);
+
list_for_each_entry(e, &nhe->encap_list, encap_list) {
- encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
- priv = netdev_priv(e->out_dev);
+ if (!mlx5e_encap_take(e))
+ continue;
- if (encap_connected != neigh_connected ||
- !ether_addr_equal(e->h_dest, ha))
- mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
+ priv = netdev_priv(e->out_dev);
+ mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
+ mlx5e_encap_put(priv, e);
}
mlx5e_rep_neigh_entry_release(nhe);
rtnl_unlock();
@@ -659,8 +708,8 @@ mlx5e_rep_indr_offload(struct net_device *netdev,
struct flow_cls_offload *flower,
struct mlx5e_rep_indr_block_priv *indr_priv)
{
+ unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
- int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD;
int err = 0;
switch (flower->command) {
@@ -714,6 +763,7 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
+ f->unlocked_driver_cb = true;
f->driver_block_list = &mlx5e_block_cb_list;
switch (f->command) {
@@ -722,10 +772,6 @@ mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
if (indr_priv)
return -EEXIST;
- if (flow_block_cb_is_busy(mlx5e_rep_indr_setup_block_cb,
- indr_priv, &mlx5e_block_cb_list))
- return -EBUSY;
-
indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
if (!indr_priv)
return -ENOMEM;
@@ -785,9 +831,9 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
{
int err;
- err = __tc_indr_block_cb_register(netdev, rpriv,
- mlx5e_rep_indr_setup_tc_cb,
- rpriv);
+ err = __flow_indr_block_cb_register(netdev, rpriv,
+ mlx5e_rep_indr_setup_tc_cb,
+ rpriv);
if (err) {
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
@@ -800,8 +846,8 @@ static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
struct net_device *netdev)
{
- __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
- rpriv);
+ __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
+ rpriv);
}
static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
@@ -827,6 +873,28 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
return NOTIFY_OK;
}
+static void
+mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv,
+ struct mlx5e_neigh_hash_entry *nhe,
+ struct neighbour *n)
+{
+ /* Take a reference to ensure the neighbour and mlx5 encap
+ * entry won't be destructed until we drop the reference in
+ * delayed work.
+ */
+ neigh_hold(n);
+
+ /* This assignment is valid as long as the the neigh reference
+ * is taken
+ */
+ nhe->n = n;
+
+ if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
+ mlx5e_rep_neigh_entry_release(nhe);
+ neigh_release(n);
+ }
+}
+
static struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
struct mlx5e_neigh *m_neigh);
@@ -859,34 +927,13 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
m_neigh.family = n->ops->family;
memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
- /* We are in atomic context and can't take RTNL mutex, so use
- * spin_lock_bh to lookup the neigh table. bh is used since
- * netevent can be called from a softirq context.
- */
- spin_lock_bh(&neigh_update->encap_lock);
+ rcu_read_lock();
nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
- if (!nhe) {
- spin_unlock_bh(&neigh_update->encap_lock);
+ rcu_read_unlock();
+ if (!nhe)
return NOTIFY_DONE;
- }
-
- /* This assignment is valid as long as the the neigh reference
- * is taken
- */
- nhe->n = n;
-
- /* Take a reference to ensure the neighbour and mlx5 encap
- * entry won't be destructed until we drop the reference in
- * delayed work.
- */
- neigh_hold(n);
- mlx5e_rep_neigh_entry_hold(nhe);
- if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
- mlx5e_rep_neigh_entry_release(nhe);
- neigh_release(n);
- }
- spin_unlock_bh(&neigh_update->encap_lock);
+ mlx5e_rep_queue_neigh_update_work(priv, nhe, n);
break;
case NETEVENT_DELAY_PROBE_TIME_UPDATE:
@@ -903,19 +950,15 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
#endif
return NOTIFY_DONE;
- /* We are in atomic context and can't take RTNL mutex,
- * so use spin_lock_bh to walk the neigh list and look for
- * the relevant device. bh is used since netevent can be
- * called from a softirq context.
- */
- spin_lock_bh(&neigh_update->encap_lock);
- list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
+ neigh_list) {
if (p->dev == nhe->m_neigh.dev) {
found = true;
break;
}
}
- spin_unlock_bh(&neigh_update->encap_lock);
+ rcu_read_unlock();
if (!found)
return NOTIFY_DONE;
@@ -946,7 +989,7 @@ static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
return err;
INIT_LIST_HEAD(&neigh_update->neigh_list);
- spin_lock_init(&neigh_update->encap_lock);
+ mutex_init(&neigh_update->encap_lock);
INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
mlx5e_rep_neigh_stats_work);
mlx5e_rep_neigh_update_init_interval(rpriv);
@@ -973,6 +1016,7 @@ static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
+ mutex_destroy(&neigh_update->encap_lock);
rhashtable_destroy(&neigh_update->neigh_ht);
}
@@ -988,28 +1032,27 @@ static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
if (err)
return err;
- list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
+ list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
return err;
}
-static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
- struct mlx5e_neigh_hash_entry *nhe)
+static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe)
{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv;
- spin_lock_bh(&rpriv->neigh_update.encap_lock);
+ mutex_lock(&rpriv->neigh_update.encap_lock);
- list_del(&nhe->neigh_list);
+ list_del_rcu(&nhe->neigh_list);
rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
&nhe->rhash_node,
mlx5e_neigh_ht_params);
- spin_unlock_bh(&rpriv->neigh_update.encap_lock);
+ mutex_unlock(&rpriv->neigh_update.encap_lock);
}
-/* This function must only be called under RTNL lock or under the
- * representor's encap_lock in case RTNL mutex can't be held.
+/* This function must only be called under the representor's encap_lock or
+ * inside rcu read lock section.
*/
static struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
@@ -1017,9 +1060,11 @@ mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
+ struct mlx5e_neigh_hash_entry *nhe;
- return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
- mlx5e_neigh_ht_params);
+ nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
+ mlx5e_neigh_ht_params);
+ return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL;
}
static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
@@ -1032,8 +1077,10 @@ static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
if (!*nhe)
return -ENOMEM;
+ (*nhe)->priv = priv;
memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
+ spin_lock_init(&(*nhe)->encap_list_lock);
INIT_LIST_HEAD(&(*nhe)->encap_list);
refcount_set(&(*nhe)->refcnt, 1);
@@ -1047,19 +1094,6 @@ out_free:
return err;
}
-static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
- struct mlx5e_neigh_hash_entry *nhe)
-{
- /* The neigh hash entry must be removed from the hash table regardless
- * of the reference count value, so it won't be found by the next
- * neigh notification call. The neigh hash entry reference count is
- * incremented only during creation and neigh notification calls and
- * protects from freeing the nhe struct.
- */
- mlx5e_rep_neigh_entry_remove(priv, nhe);
- mlx5e_rep_neigh_entry_release(nhe);
-}
-
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
@@ -1072,16 +1106,26 @@ int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
if (err)
return err;
+
+ mutex_lock(&rpriv->neigh_update.encap_lock);
nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
if (!nhe) {
err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
if (err) {
+ mutex_unlock(&rpriv->neigh_update.encap_lock);
mlx5_tun_entropy_refcount_dec(tun_entropy,
e->reformat_type);
return err;
}
}
- list_add(&e->encap_list, &nhe->encap_list);
+
+ e->nhe = nhe;
+ spin_lock(&nhe->encap_list_lock);
+ list_add_rcu(&e->encap_list, &nhe->encap_list);
+ spin_unlock(&nhe->encap_list_lock);
+
+ mutex_unlock(&rpriv->neigh_update.encap_lock);
+
return 0;
}
@@ -1091,13 +1135,16 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
- struct mlx5e_neigh_hash_entry *nhe;
- list_del(&e->encap_list);
- nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
+ if (!e->nhe)
+ return;
- if (list_empty(&nhe->encap_list))
- mlx5e_rep_neigh_entry_destroy(priv, nhe);
+ spin_lock(&e->nhe->encap_list_lock);
+ list_del_rcu(&e->encap_list);
+ spin_unlock(&e->nhe->encap_list_lock);
+
+ mlx5e_rep_neigh_entry_release(e->nhe);
+ e->nhe = NULL;
mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
}
@@ -1160,15 +1207,34 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
}
}
+static
+int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ switch (ma->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return mlx5e_tc_configure_matchall(priv, ma);
+ case TC_CLSMATCHALL_DESTROY:
+ return mlx5e_tc_delete_matchall(priv, ma);
+ case TC_CLSMATCHALL_STATS:
+ mlx5e_tc_stats_matchall(priv, ma);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
+ unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_priv *priv = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
- MLX5E_TC_ESW_OFFLOAD);
+ return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
+ case TC_SETUP_CLSMATCHALL:
+ return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
default:
return -EOPNOTSUPP;
}
@@ -1180,9 +1246,11 @@ static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct flow_block_offload *f = type_data;
switch (type) {
case TC_SETUP_BLOCK:
+ f->unlocked_driver_cb = true;
return flow_block_cb_setup_simple(type_data,
&mlx5e_rep_block_cb_list,
mlx5e_rep_setup_tc_cb,
@@ -1553,7 +1621,7 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_rep_uplink_priv *uplink_priv;
- int tc, err;
+ int err;
err = mlx5e_create_tises(priv);
if (err) {
@@ -1564,6 +1632,7 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
uplink_priv = &rpriv->uplink_priv;
+ mutex_init(&uplink_priv->unready_flows_lock);
INIT_LIST_HEAD(&uplink_priv->unready_flows);
/* init shared tc flow table */
@@ -1588,18 +1657,15 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
tc_esw_cleanup:
mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
destroy_tises:
- for (tc = 0; tc < priv->profile->max_tc; tc++)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
+ mlx5e_destroy_tises(priv);
return err;
}
static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- int tc;
- for (tc = 0; tc < priv->profile->max_tc; tc++)
- mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
+ mlx5e_destroy_tises(priv);
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
/* clean indirect TC block notifications */
@@ -1608,6 +1674,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
/* delete shared tc flow table */
mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
+ mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
}
}
@@ -1731,37 +1798,46 @@ is_devlink_port_supported(const struct mlx5_core_dev *dev,
mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
}
+static unsigned int
+vport_to_devlink_port_index(const struct mlx5_core_dev *dev, u16 vport_num)
+{
+ return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
+}
+
static int register_devlink_port(struct mlx5_core_dev *dev,
struct mlx5e_rep_priv *rpriv)
{
struct devlink *devlink = priv_to_devlink(dev);
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct netdev_phys_item_id ppid = {};
- int ret;
+ unsigned int dl_port_index = 0;
if (!is_devlink_port_supported(dev, rpriv))
return 0;
- ret = mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
- if (ret)
- return ret;
+ mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
- if (rep->vport == MLX5_VPORT_UPLINK)
+ if (rep->vport == MLX5_VPORT_UPLINK) {
devlink_port_attrs_set(&rpriv->dl_port,
DEVLINK_PORT_FLAVOUR_PHYSICAL,
PCI_FUNC(dev->pdev->devfn), false, 0,
&ppid.id[0], ppid.id_len);
- else if (rep->vport == MLX5_VPORT_PF)
+ dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
+ } else if (rep->vport == MLX5_VPORT_PF) {
devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
dev->pdev->devfn);
- else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport))
+ dl_port_index = rep->vport;
+ } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
+ rpriv->rep->vport)) {
devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
&ppid.id[0], ppid.id_len,
dev->pdev->devfn,
rep->vport - 1);
+ dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
+ }
- return devlink_port_register(devlink, &rpriv->dl_port, rep->vport);
+ return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
}
static void unregister_devlink_port(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index c56e6ee4350c..a0ae5069d8c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -35,6 +35,7 @@
#include <net/ip_tunnels.h>
#include <linux/rhashtable.h>
+#include <linux/mutex.h>
#include "eswitch.h"
#include "en.h"
#include "lib/port_tun.h"
@@ -48,7 +49,7 @@ struct mlx5e_neigh_update_table {
*/
struct list_head neigh_list;
/* protect lookup/remove operations */
- spinlock_t encap_lock;
+ struct mutex encap_lock;
struct notifier_block netevent_nb;
struct delayed_work neigh_stats_work;
unsigned long min_interval; /* jiffies */
@@ -75,6 +76,8 @@ struct mlx5_rep_uplink_priv {
struct mlx5_tun_entropy tun_entropy;
+ /* protects unready_flows */
+ struct mutex unready_flows_lock;
struct list_head unready_flows;
struct work_struct reoffload_flows_work;
};
@@ -86,6 +89,7 @@ struct mlx5e_rep_priv {
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
+ struct rtnl_link_stats64 prev_vf_vport_stats;
struct devlink_port dl_port;
};
@@ -107,6 +111,7 @@ struct mlx5e_neigh {
struct mlx5e_neigh_hash_entry {
struct rhash_head rhash_node;
struct mlx5e_neigh m_neigh;
+ struct mlx5e_priv *priv;
/* Save the neigh hash entry in a list on the representor in
* addition to the hash table. In order to iterate easily over the
@@ -114,6 +119,8 @@ struct mlx5e_neigh_hash_entry {
*/
struct list_head neigh_list;
+ /* protects encap list */
+ spinlock_t encap_list_lock;
/* encap list sharing the same neigh */
struct list_head encap_list;
@@ -134,6 +141,8 @@ struct mlx5e_neigh_hash_entry {
* 'used' value and avoid neigh deleting by the kernel.
*/
unsigned long reported_lastuse;
+
+ struct rcu_head rcu;
};
enum {
@@ -142,6 +151,8 @@ enum {
};
struct mlx5e_encap_entry {
+ /* attached neigh hash entry */
+ struct mlx5e_neigh_hash_entry *nhe;
/* neigh hash entry list of encaps sharing the same neigh */
struct list_head encap_list;
struct mlx5e_neigh m_neigh;
@@ -161,6 +172,10 @@ struct mlx5e_encap_entry {
u8 flags;
char *encap_header;
int encap_size;
+ refcount_t refcnt;
+ struct completion res_ready;
+ int compl_result;
+ struct rcu_head rcu;
};
struct mlx5e_rep_sq {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index ac6e586d403d..2fd2760d0bb7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -48,6 +48,7 @@
#include "lib/clock.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
+#include "en/health.h"
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{
@@ -615,6 +616,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
+ if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
+ queue_work(cq->channel->priv->wq, &sq->recover_work);
break;
}
do {
@@ -859,13 +862,24 @@ tail_padding_csum(struct sk_buff *skb, int offset,
}
static void
-mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
- struct mlx5e_rq_stats *stats)
+mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
+ struct mlx5e_rq_stats *stats)
{
struct ipv6hdr *ip6;
struct iphdr *ip4;
int pkt_len;
+ /* Fixup vlan headers, if any */
+ if (network_depth > ETH_HLEN)
+ /* CQE csum is calculated from the IP header and does
+ * not cover VLAN headers (if present). This will add
+ * the checksum manually.
+ */
+ skb->csum = csum_partial(skb->data + ETH_HLEN,
+ network_depth - ETH_HLEN,
+ skb->csum);
+
+ /* Fixup tail padding, if any */
switch (proto) {
case htons(ETH_P_IP):
ip4 = (struct iphdr *)(skb->data + network_depth);
@@ -931,16 +945,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return; /* CQE csum covers all received bytes */
/* csum might need some fixups ...*/
- if (network_depth > ETH_HLEN)
- /* CQE csum is calculated from the IP header and does
- * not cover VLAN headers (if present). This will add
- * the checksum manually.
- */
- skb->csum = csum_partial(skb->data + ETH_HLEN,
- network_depth - ETH_HLEN,
- skb->csum);
-
- mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
+ mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
return;
}
@@ -1065,11 +1070,6 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
prefetchw(va); /* xdp_frame data area */
prefetch(data);
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
- rq->stats->wqe_err++;
- return NULL;
- }
-
rcu_read_lock();
consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt, false);
rcu_read_unlock();
@@ -1097,11 +1097,6 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
u16 byte_cnt = cqe_bcnt - headlen;
struct sk_buff *skb;
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
- rq->stats->wqe_err++;
- return NULL;
- }
-
/* XDP is not supported in this configuration, as incoming packets
* might spread among multiple pages.
*/
@@ -1135,6 +1130,15 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
return skb;
}
+static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
+
+ if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
+ !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state))
+ queue_work(rq->channel->priv->wq, &rq->recover_work);
+}
+
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
@@ -1147,6 +1151,12 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ trigger_report(rq, cqe);
+ rq->stats->wqe_err++;
+ goto free_wqe;
+ }
+
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
@@ -1188,6 +1198,11 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ rq->stats->wqe_err++;
+ goto free_wqe;
+ }
+
skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
@@ -1322,7 +1337,8 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi->consumed_strides += cstrides;
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ trigger_report(rq, cqe);
rq->stats->wqe_err++;
goto mpwrq_cqe_out;
}
@@ -1498,6 +1514,11 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ rq->stats->wqe_err++;
+ goto wq_free_wqe;
+ }
+
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
@@ -1533,26 +1554,27 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ rq->stats->wqe_err++;
+ goto wq_free_wqe;
+ }
+
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
rq, cqe, wi, cqe_bcnt);
- if (unlikely(!skb)) {
- /* a DROP, save the page-reuse checks */
- mlx5e_free_rx_wqe(rq, wi, true);
- goto wq_cyc_pop;
- }
+ if (unlikely(!skb)) /* a DROP, save the page-reuse checks */
+ goto wq_free_wqe;
+
skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt);
- if (unlikely(!skb)) {
- mlx5e_free_rx_wqe(rq, wi, true);
- goto wq_cyc_pop;
- }
+ if (unlikely(!skb))
+ goto wq_free_wqe;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
napi_gro_receive(rq->cq.napi, skb);
+wq_free_wqe:
mlx5e_free_rx_wqe(rq, wi, true);
-wq_cyc_pop:
mlx5_wq_cyc_pop(wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 57f9f346d213..f1065e78086a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -74,6 +74,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
@@ -90,6 +91,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
@@ -107,6 +109,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
@@ -200,6 +203,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
+ s->rx_xdp_tx_nops += xdpsq_stats->nops;
s->rx_xdp_tx_full += xdpsq_stats->full;
s->rx_xdp_tx_err += xdpsq_stats->err;
s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
@@ -217,6 +221,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_cache_waive += rq_stats->cache_waive;
s->rx_congst_umr += rq_stats->congst_umr;
s->rx_arfs_err += rq_stats->arfs_err;
+ s->rx_recover += rq_stats->recover;
s->ch_events += ch_stats->events;
s->ch_poll += ch_stats->poll;
s->ch_arm += ch_stats->arm;
@@ -227,6 +232,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_xdp_xmit += xdpsq_red_stats->xmit;
s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
+ s->tx_xdp_nops += xdpsq_red_stats->nops;
s->tx_xdp_full += xdpsq_red_stats->full;
s->tx_xdp_err += xdpsq_red_stats->err;
s->tx_xdp_cqes += xdpsq_red_stats->cqes;
@@ -363,17 +369,27 @@ static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
}
#define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
-static const struct counter_desc vnic_env_stats_desc[] = {
+static const struct counter_desc vnic_env_stats_steer_desc[] = {
{ "rx_steer_missed_packets",
VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
};
-#define NUM_VNIC_ENV_COUNTERS ARRAY_SIZE(vnic_env_stats_desc)
+static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
+ { "dev_internal_queue_oob",
+ VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
+};
+
+#define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
+ (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
+ ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
+#define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
+ (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
+ ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
static int mlx5e_grp_vnic_env_get_num_stats(struct mlx5e_priv *priv)
{
- return MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard) ?
- NUM_VNIC_ENV_COUNTERS : 0;
+ return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
+ NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev);
}
static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
@@ -381,12 +397,13 @@ static int mlx5e_grp_vnic_env_fill_strings(struct mlx5e_priv *priv, u8 *data,
{
int i;
- if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
- return idx;
+ for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vnic_env_stats_steer_desc[i].format);
- for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
+ for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vnic_env_stats_desc[i].format);
+ vnic_env_stats_dev_oob_desc[i].format);
return idx;
}
@@ -395,12 +412,13 @@ static int mlx5e_grp_vnic_env_fill_stats(struct mlx5e_priv *priv, u64 *data,
{
int i;
- if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
- return idx;
-
- for (i = 0; i < NUM_VNIC_ENV_COUNTERS; i++)
+ for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
- vnic_env_stats_desc, i);
+ vnic_env_stats_steer_desc, i);
+
+ for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
+ data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_dev_oob_desc, i);
return idx;
}
@@ -1294,6 +1312,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
};
static const struct counter_desc sq_stats_desc[] = {
@@ -1331,6 +1350,7 @@ static const struct counter_desc rq_xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
@@ -1340,6 +1360,7 @@ static const struct counter_desc xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 76ac111e14d0..c281e567711d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -81,6 +81,7 @@ struct mlx5e_sw_stats {
u64 rx_xdp_tx_xmit;
u64 rx_xdp_tx_mpwqe;
u64 rx_xdp_tx_inlnw;
+ u64 rx_xdp_tx_nops;
u64 rx_xdp_tx_full;
u64 rx_xdp_tx_err;
u64 rx_xdp_tx_cqe;
@@ -97,6 +98,7 @@ struct mlx5e_sw_stats {
u64 tx_xdp_xmit;
u64 tx_xdp_mpwqe;
u64 tx_xdp_inlnw;
+ u64 tx_xdp_nops;
u64 tx_xdp_full;
u64 tx_xdp_err;
u64 tx_xdp_cqes;
@@ -114,6 +116,7 @@ struct mlx5e_sw_stats {
u64 rx_cache_waive;
u64 rx_congst_umr;
u64 rx_arfs_err;
+ u64 rx_recover;
u64 ch_events;
u64 ch_poll;
u64 ch_arm;
@@ -247,6 +250,7 @@ struct mlx5e_rq_stats {
u64 cache_waive;
u64 congst_umr;
u64 arfs_err;
+ u64 recover;
};
struct mlx5e_sq_stats {
@@ -288,6 +292,7 @@ struct mlx5e_xdpsq_stats {
u64 xmit;
u64 mpwqe;
u64 inlnw;
+ u64 nops;
u64 full;
u64 err;
/* dirtied @completion */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 00b2d4a86159..5581a8045ede 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -38,6 +38,8 @@
#include <linux/mlx5/fs.h>
#include <linux/mlx5/device.h>
#include <linux/rhashtable.h>
+#include <linux/refcount.h>
+#include <linux/completion.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
@@ -54,6 +56,7 @@
#include "en/tc_tun.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
+#include "diag/en_tc_tracepoint.h"
struct mlx5_nic_flow_attr {
u32 action;
@@ -65,19 +68,20 @@ struct mlx5_nic_flow_attr {
struct mlx5_fc *counter;
};
-#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
+#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
enum {
- MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
- MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
- MLX5E_TC_FLOW_ESWITCH = MLX5E_TC_ESW_OFFLOAD,
- MLX5E_TC_FLOW_NIC = MLX5E_TC_NIC_OFFLOAD,
- MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE),
- MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 1),
- MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 2),
- MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 3),
- MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 4),
- MLX5E_TC_FLOW_NOT_READY = BIT(MLX5E_TC_FLOW_BASE + 5),
+ MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
+ MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
+ MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
+ MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
+ MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
+ MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
+ MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
+ MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
+ MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
+ MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
+ MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
};
#define MLX5E_TC_MAX_SPLITS 1
@@ -100,6 +104,7 @@ enum {
* container_of(helper item, containing struct type, helper field[index])
*/
struct encap_flow_item {
+ struct mlx5e_encap_entry *e; /* attached encap instance */
struct list_head list;
int index;
};
@@ -108,7 +113,7 @@ struct mlx5e_tc_flow {
struct rhash_head node;
struct mlx5e_priv *priv;
u64 cookie;
- u16 flags;
+ unsigned long flags;
struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
/* Flow can be associated with multiple encap IDs.
* The number of encaps is bounded by the number of supported
@@ -116,10 +121,17 @@ struct mlx5e_tc_flow {
*/
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5e_tc_flow *peer_flow;
+ struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
+ struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */
struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
+ int tmp_efi_index;
+ struct list_head tmp_list; /* temporary flow list used by neigh update */
+ refcount_t refcnt;
+ struct rcu_head rcu_head;
+ struct completion init_done;
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -157,12 +169,20 @@ struct mlx5e_hairpin_entry {
/* a node of a hash table which keeps all the hairpin entries */
struct hlist_node hairpin_hlist;
+ /* protects flows list */
+ spinlock_t flows_lock;
/* flows sharing the same hairpin */
struct list_head flows;
+ /* hpe's that were not fully initialized when dead peer update event
+ * function traversed them.
+ */
+ struct list_head dead_peer_wait_list;
u16 peer_vhca_id;
u8 prio;
struct mlx5e_hairpin *hp;
+ refcount_t refcnt;
+ struct completion res_ready;
};
struct mod_hdr_key {
@@ -174,16 +194,93 @@ struct mlx5e_mod_hdr_entry {
/* a node of a hash table which keeps all the mod_hdr entries */
struct hlist_node mod_hdr_hlist;
+ /* protects flows list */
+ spinlock_t flows_lock;
/* flows sharing the same mod_hdr entry */
struct list_head flows;
struct mod_hdr_key key;
u32 mod_hdr_id;
+
+ refcount_t refcnt;
+ struct completion res_ready;
+ int compl_result;
};
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
+static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow);
+
+static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
+{
+ if (!flow || !refcount_inc_not_zero(&flow->refcnt))
+ return ERR_PTR(-EINVAL);
+ return flow;
+}
+
+static void mlx5e_flow_put(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ if (refcount_dec_and_test(&flow->refcnt)) {
+ mlx5e_tc_del_flow(priv, flow);
+ kfree_rcu(flow, rcu_head);
+ }
+}
+
+static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
+{
+ /* Complete all memory stores before setting bit. */
+ smp_mb__before_atomic();
+ set_bit(flag, &flow->flags);
+}
+
+#define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
+
+static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
+ unsigned long flag)
+{
+ /* test_and_set_bit() provides all necessary barriers */
+ return test_and_set_bit(flag, &flow->flags);
+}
+
+#define flow_flag_test_and_set(flow, flag) \
+ __flow_flag_test_and_set(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
+static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
+{
+ /* Complete all memory stores before clearing bit. */
+ smp_mb__before_atomic();
+ clear_bit(flag, &flow->flags);
+}
+
+#define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
+static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
+{
+ bool ret = test_bit(flag, &flow->flags);
+
+ /* Read fields of flow structure only after checking flags. */
+ smp_mb__after_atomic();
+ return ret;
+}
+
+#define flow_flag_test(flow, flag) __flow_flag_test(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
+static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
+{
+ return flow_flag_test(flow, ESWITCH);
+}
+
+static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
+{
+ return flow_flag_test(flow, OFFLOADED);
+}
+
static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
{
return jhash(key->actions,
@@ -199,15 +296,62 @@ static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
}
+static struct mod_hdr_tbl *
+get_mod_hdr_table(struct mlx5e_priv *priv, int namespace)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr :
+ &priv->fs.tc.mod_hdr;
+}
+
+static struct mlx5e_mod_hdr_entry *
+mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key)
+{
+ struct mlx5e_mod_hdr_entry *mh, *found = NULL;
+
+ hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
+ if (!cmp_mod_hdr_info(&mh->key, key)) {
+ refcount_inc(&mh->refcnt);
+ found = mh;
+ break;
+ }
+ }
+
+ return found;
+}
+
+static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
+ struct mlx5e_mod_hdr_entry *mh,
+ int namespace)
+{
+ struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace);
+
+ if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
+ return;
+ hash_del(&mh->mod_hdr_hlist);
+ mutex_unlock(&tbl->lock);
+
+ WARN_ON(!list_empty(&mh->flows));
+ if (mh->compl_result > 0)
+ mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
+
+ kfree(mh);
+}
+
+static int get_flow_name_space(struct mlx5e_tc_flow *flow)
+{
+ return mlx5e_is_eswitch_flow(flow) ?
+ MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
+}
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5e_tc_flow_parse_attr *parse_attr)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int num_actions, actions_size, namespace, err;
struct mlx5e_mod_hdr_entry *mh;
+ struct mod_hdr_tbl *tbl;
struct mod_hdr_key key;
- bool found = false;
u32 hash_key;
num_actions = parse_attr->num_mod_hdr_actions;
@@ -218,80 +362,82 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
hash_key = hash_mod_hdr_info(&key);
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
- namespace = MLX5_FLOW_NAMESPACE_FDB;
- hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
- mod_hdr_hlist, hash_key) {
- if (!cmp_mod_hdr_info(&mh->key, &key)) {
- found = true;
- break;
- }
- }
- } else {
- namespace = MLX5_FLOW_NAMESPACE_KERNEL;
- hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
- mod_hdr_hlist, hash_key) {
- if (!cmp_mod_hdr_info(&mh->key, &key)) {
- found = true;
- break;
- }
- }
- }
+ namespace = get_flow_name_space(flow);
+ tbl = get_mod_hdr_table(priv, namespace);
- if (found)
+ mutex_lock(&tbl->lock);
+ mh = mlx5e_mod_hdr_get(tbl, &key, hash_key);
+ if (mh) {
+ mutex_unlock(&tbl->lock);
+ wait_for_completion(&mh->res_ready);
+
+ if (mh->compl_result < 0) {
+ err = -EREMOTEIO;
+ goto attach_header_err;
+ }
goto attach_flow;
+ }
mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
- if (!mh)
+ if (!mh) {
+ mutex_unlock(&tbl->lock);
return -ENOMEM;
+ }
mh->key.actions = (void *)mh + sizeof(*mh);
memcpy(mh->key.actions, key.actions, actions_size);
mh->key.num_actions = num_actions;
+ spin_lock_init(&mh->flows_lock);
INIT_LIST_HEAD(&mh->flows);
+ refcount_set(&mh->refcnt, 1);
+ init_completion(&mh->res_ready);
+
+ hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
+ mutex_unlock(&tbl->lock);
err = mlx5_modify_header_alloc(priv->mdev, namespace,
mh->key.num_actions,
mh->key.actions,
&mh->mod_hdr_id);
- if (err)
- goto out_err;
-
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
- hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
- else
- hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
+ if (err) {
+ mh->compl_result = err;
+ goto alloc_header_err;
+ }
+ mh->compl_result = 1;
+ complete_all(&mh->res_ready);
attach_flow:
+ flow->mh = mh;
+ spin_lock(&mh->flows_lock);
list_add(&flow->mod_hdr, &mh->flows);
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ spin_unlock(&mh->flows_lock);
+ if (mlx5e_is_eswitch_flow(flow))
flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
else
flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
return 0;
-out_err:
- kfree(mh);
+alloc_header_err:
+ complete_all(&mh->res_ready);
+attach_header_err:
+ mlx5e_mod_hdr_put(priv, mh, namespace);
return err;
}
static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
- struct list_head *next = flow->mod_hdr.next;
+ /* flow wasn't fully initialized */
+ if (!flow->mh)
+ return;
+ spin_lock(&flow->mh->flows_lock);
list_del(&flow->mod_hdr);
+ spin_unlock(&flow->mh->flows_lock);
- if (list_empty(next)) {
- struct mlx5e_mod_hdr_entry *mh;
-
- mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
-
- mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
- hash_del(&mh->mod_hdr_hlist);
- kfree(mh);
- }
+ mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow));
+ flow->mh = NULL;
}
static
@@ -555,13 +701,35 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
hairpin_hlist, hash_key) {
- if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
+ if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
+ refcount_inc(&hpe->refcnt);
return hpe;
+ }
}
return NULL;
}
+static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
+ struct mlx5e_hairpin_entry *hpe)
+{
+ /* no more hairpin flows for us, release the hairpin pair */
+ if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
+ return;
+ hash_del(&hpe->hairpin_hlist);
+ mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+
+ if (!IS_ERR_OR_NULL(hpe->hp)) {
+ netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
+ dev_name(hpe->hp->pair->peer_mdev->device));
+
+ mlx5e_hairpin_destroy(hpe->hp);
+ }
+
+ WARN_ON(!list_empty(&hpe->flows));
+ kfree(hpe);
+}
+
#define UNKNOWN_MATCH_PRIO 8
static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
@@ -627,17 +795,37 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
extack);
if (err)
return err;
+
+ mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
- if (hpe)
+ if (hpe) {
+ mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+ wait_for_completion(&hpe->res_ready);
+
+ if (IS_ERR(hpe->hp)) {
+ err = -EREMOTEIO;
+ goto out_err;
+ }
goto attach_flow;
+ }
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
- if (!hpe)
+ if (!hpe) {
+ mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
return -ENOMEM;
+ }
+ spin_lock_init(&hpe->flows_lock);
INIT_LIST_HEAD(&hpe->flows);
+ INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
hpe->peer_vhca_id = peer_id;
hpe->prio = match_prio;
+ refcount_set(&hpe->refcnt, 1);
+ init_completion(&hpe->res_ready);
+
+ hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
+ hash_hairpin_info(peer_id, match_prio));
+ mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
params.log_data_size = 15;
params.log_data_size = min_t(u8, params.log_data_size,
@@ -659,9 +847,11 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
params.num_channels = link_speed64;
hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
+ hpe->hp = hp;
+ complete_all(&hpe->res_ready);
if (IS_ERR(hp)) {
err = PTR_ERR(hp);
- goto create_hairpin_err;
+ goto out_err;
}
netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
@@ -669,46 +859,39 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
dev_name(hp->pair->peer_mdev->device),
hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
- hpe->hp = hp;
- hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
- hash_hairpin_info(peer_id, match_prio));
-
attach_flow:
if (hpe->hp->num_channels > 1) {
- flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
+ flow_flag_set(flow, HAIRPIN_RSS);
flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
} else {
flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
}
+
+ flow->hpe = hpe;
+ spin_lock(&hpe->flows_lock);
list_add(&flow->hairpin, &hpe->flows);
+ spin_unlock(&hpe->flows_lock);
return 0;
-create_hairpin_err:
- kfree(hpe);
+out_err:
+ mlx5e_hairpin_put(priv, hpe);
return err;
}
static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
- struct list_head *next = flow->hairpin.next;
+ /* flow wasn't fully initialized */
+ if (!flow->hpe)
+ return;
+ spin_lock(&flow->hpe->flows_lock);
list_del(&flow->hairpin);
+ spin_unlock(&flow->hpe->flows_lock);
- /* no more hairpin flows for us, release the hairpin pair */
- if (list_empty(next)) {
- struct mlx5e_hairpin_entry *hpe;
-
- hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
-
- netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
- dev_name(hpe->hp->pair->peer_mdev->device));
-
- mlx5e_hairpin_destroy(hpe->hp);
- hash_del(&hpe->hairpin_hlist);
- kfree(hpe);
- }
+ mlx5e_hairpin_put(priv, flow->hpe);
+ flow->hpe = NULL;
}
static int
@@ -727,18 +910,17 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
.flags = FLOW_ACT_NO_APPEND,
};
struct mlx5_fc *counter = NULL;
- bool table_created = false;
int err, dest_ix = 0;
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
flow_context->flow_tag = attr->flow_tag;
- if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
+ if (flow_flag_test(flow, HAIRPIN)) {
err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
- if (err) {
- goto err_add_hairpin_flow;
- }
- if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
+ if (err)
+ return err;
+
+ if (flow_flag_test(flow, HAIRPIN_RSS)) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[dest_ix].ft = attr->hairpin_ft;
} else {
@@ -754,10 +936,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(dev, true);
- if (IS_ERR(counter)) {
- err = PTR_ERR(counter);
- goto err_fc_create;
- }
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
+
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[dest_ix].counter_id = mlx5_fc_id(counter);
dest_ix++;
@@ -769,9 +950,10 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
flow_act.modify_id = attr->mod_hdr_id;
kfree(parse_attr->mod_hdr_actions);
if (err)
- goto err_create_mod_hdr_id;
+ return err;
}
+ mutex_lock(&priv->fs.tc.t_lock);
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
int tc_grp_size, tc_tbl_size;
u32 max_flow_counter;
@@ -791,15 +973,13 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
MLX5E_TC_TABLE_NUM_GROUPS,
MLX5E_TC_FT_LEVEL, 0);
if (IS_ERR(priv->fs.tc.t)) {
+ mutex_unlock(&priv->fs.tc.t_lock);
NL_SET_ERR_MSG_MOD(extack,
"Failed to create tc offload table\n");
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
- err = PTR_ERR(priv->fs.tc.t);
- goto err_create_ft;
+ return PTR_ERR(priv->fs.tc.t);
}
-
- table_created = true;
}
if (attr->match_level != MLX5_MATCH_NONE)
@@ -807,29 +987,12 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
&flow_act, dest, dest_ix);
+ mutex_unlock(&priv->fs.tc.t_lock);
- if (IS_ERR(flow->rule[0])) {
- err = PTR_ERR(flow->rule[0]);
- goto err_add_rule;
- }
+ if (IS_ERR(flow->rule[0]))
+ return PTR_ERR(flow->rule[0]);
return 0;
-
-err_add_rule:
- if (table_created) {
- mlx5_destroy_flow_table(priv->fs.tc.t);
- priv->fs.tc.t = NULL;
- }
-err_create_ft:
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5e_detach_mod_hdr(priv, flow);
-err_create_mod_hdr_id:
- mlx5_fc_destroy(dev, counter);
-err_fc_create:
- if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
- mlx5e_hairpin_flow_del(priv, flow);
-err_add_hairpin_flow:
- return err;
}
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
@@ -839,18 +1002,21 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5_fc *counter = NULL;
counter = attr->counter;
- mlx5_del_flow_rules(flow->rule[0]);
+ if (!IS_ERR_OR_NULL(flow->rule[0]))
+ mlx5_del_flow_rules(flow->rule[0]);
mlx5_fc_destroy(priv->mdev, counter);
- if (!mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD) && priv->fs.tc.t) {
+ mutex_lock(&priv->fs.tc.t_lock);
+ if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
}
+ mutex_unlock(&priv->fs.tc.t_lock);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
- if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
+ if (flow_flag_test(flow, HAIRPIN))
mlx5e_hairpin_flow_del(priv, flow);
}
@@ -885,7 +1051,6 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
}
}
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
return rule;
}
@@ -894,7 +1059,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_esw_flow_attr *attr)
{
- flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
+ flow_flag_clear(flow, OFFLOADED);
if (attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
@@ -917,7 +1082,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
- flow->flags |= MLX5E_TC_FLOW_SLOW;
+ flow_flag_set(flow, SLOW);
return rule;
}
@@ -932,7 +1097,26 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
slow_attr->split_count = 0;
slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
- flow->flags &= ~MLX5E_TC_FLOW_SLOW;
+ flow_flag_clear(flow, SLOW);
+}
+
+/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
+ * function.
+ */
+static void unready_flow_add(struct mlx5e_tc_flow *flow,
+ struct list_head *unready_flows)
+{
+ flow_flag_set(flow, NOT_READY);
+ list_add_tail(&flow->unready, unready_flows);
+}
+
+/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
+ * function.
+ */
+static void unready_flow_del(struct mlx5e_tc_flow *flow)
+{
+ list_del(&flow->unready);
+ flow_flag_clear(flow, NOT_READY);
}
static void add_unready_flow(struct mlx5e_tc_flow *flow)
@@ -945,14 +1129,24 @@ static void add_unready_flow(struct mlx5e_tc_flow *flow)
rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &rpriv->uplink_priv;
- flow->flags |= MLX5E_TC_FLOW_NOT_READY;
- list_add_tail(&flow->unready, &uplink_priv->unready_flows);
+ mutex_lock(&uplink_priv->unready_flows_lock);
+ unready_flow_add(flow, &uplink_priv->unready_flows);
+ mutex_unlock(&uplink_priv->unready_flows_lock);
}
static void remove_unready_flow(struct mlx5e_tc_flow *flow)
{
- list_del(&flow->unready);
- flow->flags &= ~MLX5E_TC_FLOW_NOT_READY;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+
+ esw = flow->priv->mdev->priv.eswitch;
+ rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &rpriv->uplink_priv;
+
+ mutex_lock(&uplink_priv->unready_flows_lock);
+ unready_flow_del(flow);
+ mutex_unlock(&uplink_priv->unready_flows_lock);
}
static int
@@ -980,14 +1174,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (attr->chain > max_chain) {
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
- err = -EOPNOTSUPP;
- goto err_max_prio_chain;
+ return -EOPNOTSUPP;
}
if (attr->prio > max_prio) {
NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
- err = -EOPNOTSUPP;
- goto err_max_prio_chain;
+ return -EOPNOTSUPP;
}
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
@@ -1002,7 +1194,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
extack, &encap_dev, &encap_valid);
if (err)
- goto err_attach_encap;
+ return err;
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
@@ -1012,21 +1204,19 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err)
- goto err_add_vlan;
+ return err;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
kfree(parse_attr->mod_hdr_actions);
if (err)
- goto err_mod_hdr;
+ return err;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(attr->counter_dev, true);
- if (IS_ERR(counter)) {
- err = PTR_ERR(counter);
- goto err_create_counter;
- }
+ if (IS_ERR(counter))
+ return PTR_ERR(counter);
attr->counter = counter;
}
@@ -1044,27 +1234,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
}
- if (IS_ERR(flow->rule[0])) {
- err = PTR_ERR(flow->rule[0]);
- goto err_add_rule;
- }
+ if (IS_ERR(flow->rule[0]))
+ return PTR_ERR(flow->rule[0]);
+ else
+ flow_flag_set(flow, OFFLOADED);
return 0;
-
-err_add_rule:
- mlx5_fc_destroy(attr->counter_dev, counter);
-err_create_counter:
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5e_detach_mod_hdr(priv, flow);
-err_mod_hdr:
- mlx5_eswitch_del_vlan_action(esw, attr);
-err_add_vlan:
- for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
- if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
- mlx5e_detach_encap(priv, flow, out_index);
-err_attach_encap:
-err_max_prio_chain:
- return err;
}
static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
@@ -1088,14 +1263,14 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr slow_attr;
int out_index;
- if (flow->flags & MLX5E_TC_FLOW_NOT_READY) {
+ if (flow_flag_test(flow, NOT_READY)) {
remove_unready_flow(flow);
kvfree(attr->parse_attr);
return;
}
- if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
- if (flow->flags & MLX5E_TC_FLOW_SLOW)
+ if (mlx5e_is_offloaded_flow(flow)) {
+ if (flow_flag_test(flow, SLOW))
mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
else
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
@@ -1119,13 +1294,13 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e)
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr slow_attr, *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
- struct encap_flow_item *efi;
struct mlx5e_tc_flow *flow;
int err;
@@ -1142,16 +1317,17 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(priv);
- list_for_each_entry(efi, &e->flows, list) {
+ list_for_each_entry(flow, flow_list, tmp_list) {
bool all_flow_encaps_valid = true;
int i;
- flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
+ if (!mlx5e_is_offloaded_flow(flow))
+ continue;
esw_attr = flow->esw_attr;
spec = &esw_attr->parse_attr->spec;
- esw_attr->dests[efi->index].encap_id = e->encap_id;
- esw_attr->dests[efi->index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+ esw_attr->dests[flow->tmp_efi_index].encap_id = e->encap_id;
+ esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
/* Flow can be associated with multiple encap entries.
* Before offloading the flow verify that all of them have
* a valid neighbour.
@@ -1177,30 +1353,32 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
}
mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */
flow->rule[0] = rule;
+ /* was unset when slow path rule removed */
+ flow_flag_set(flow, OFFLOADED);
}
}
void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e)
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr slow_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
- struct encap_flow_item *efi;
struct mlx5e_tc_flow *flow;
int err;
- list_for_each_entry(efi, &e->flows, list) {
- flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
+ list_for_each_entry(flow, flow_list, tmp_list) {
+ if (!mlx5e_is_offloaded_flow(flow))
+ continue;
spec = &flow->esw_attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
/* mark the flow's encap dest as non-valid */
- flow->esw_attr->dests[efi->index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
+ flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1210,8 +1388,9 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
}
mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */
flow->rule[0] = rule;
+ /* was unset when fast path rule removed */
+ flow_flag_set(flow, OFFLOADED);
}
/* we know that the encap is valid */
@@ -1221,17 +1400,90 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
{
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ if (mlx5e_is_eswitch_flow(flow))
return flow->esw_attr->counter;
else
return flow->nic_attr->counter;
}
+/* Takes reference to all flows attached to encap and adds the flows to
+ * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
+ */
+void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
+{
+ struct encap_flow_item *efi;
+ struct mlx5e_tc_flow *flow;
+
+ list_for_each_entry(efi, &e->flows, list) {
+ flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
+ if (IS_ERR(mlx5e_flow_get(flow)))
+ continue;
+ wait_for_completion(&flow->init_done);
+
+ flow->tmp_efi_index = efi->index;
+ list_add(&flow->tmp_list, flow_list);
+ }
+}
+
+/* Iterate over tmp_list of flows attached to flow_list head. */
+void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
+{
+ struct mlx5e_tc_flow *flow, *tmp;
+
+ list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
+ mlx5e_flow_put(priv, flow);
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_encap_entry *next = NULL;
+
+retry:
+ rcu_read_lock();
+
+ /* find encap with non-zero reference counter value */
+ for (next = e ?
+ list_next_or_null_rcu(&nhe->encap_list,
+ &e->encap_list,
+ struct mlx5e_encap_entry,
+ encap_list) :
+ list_first_or_null_rcu(&nhe->encap_list,
+ struct mlx5e_encap_entry,
+ encap_list);
+ next;
+ next = list_next_or_null_rcu(&nhe->encap_list,
+ &next->encap_list,
+ struct mlx5e_encap_entry,
+ encap_list))
+ if (mlx5e_encap_take(next))
+ break;
+
+ rcu_read_unlock();
+
+ /* release starting encap */
+ if (e)
+ mlx5e_encap_put(netdev_priv(e->out_dev), e);
+ if (!next)
+ return next;
+
+ /* wait for encap to be fully initialized */
+ wait_for_completion(&next->res_ready);
+ /* continue searching if encap entry is not in valid state after completion */
+ if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ e = next;
+ goto retry;
+ }
+
+ return next;
+}
+
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
{
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
+ struct mlx5e_encap_entry *e = NULL;
struct mlx5e_tc_flow *flow;
- struct mlx5e_encap_entry *e;
struct mlx5_fc *counter;
struct neigh_table *tbl;
bool neigh_used = false;
@@ -1247,14 +1499,25 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
else
return;
- list_for_each_entry(e, &nhe->encap_list, encap_list) {
- struct encap_flow_item *efi;
- if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
- continue;
- list_for_each_entry(efi, &e->flows, list) {
+ /* mlx5e_get_next_valid_encap() releases previous encap before returning
+ * next one.
+ */
+ while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
+ struct mlx5e_priv *priv = netdev_priv(e->out_dev);
+ struct encap_flow_item *efi, *tmp;
+ struct mlx5_eswitch *esw;
+ LIST_HEAD(flow_list);
+
+ esw = priv->mdev->priv.eswitch;
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ list_for_each_entry_safe(efi, tmp, &e->flows, list) {
flow = container_of(efi, struct mlx5e_tc_flow,
encaps[efi->index]);
- if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ if (IS_ERR(mlx5e_flow_get(flow)))
+ continue;
+ list_add(&flow->tmp_list, &flow_list);
+
+ if (mlx5e_is_offloaded_flow(flow)) {
counter = mlx5e_tc_get_counter(flow);
lastuse = mlx5_fc_query_lastuse(counter);
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
@@ -1263,10 +1526,18 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
}
}
}
- if (neigh_used)
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_put_encap_flow_list(priv, &flow_list);
+ if (neigh_used) {
+ /* release current encap before breaking the loop */
+ mlx5e_encap_put(priv, e);
break;
+ }
}
+ trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
+
if (neigh_used) {
nhe->reported_lastuse = jiffies;
@@ -1282,40 +1553,69 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
}
}
-static void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow, int out_index)
+static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
{
- struct list_head *next = flow->encaps[out_index].list.next;
+ WARN_ON(!list_empty(&e->flows));
- list_del(&flow->encaps[out_index].list);
- if (list_empty(next)) {
- struct mlx5e_encap_entry *e;
-
- e = list_entry(next, struct mlx5e_encap_entry, flows);
+ if (e->compl_result > 0) {
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
if (e->flags & MLX5_ENCAP_ENTRY_VALID)
mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
+ }
- hash_del_rcu(&e->encap_hlist);
- kfree(e->encap_header);
- kfree(e);
+ kfree(e->encap_header);
+ kfree_rcu(e, rcu);
+}
+
+void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
+ return;
+ hash_del_rcu(&e->encap_hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_encap_dealloc(priv, e);
+}
+
+static void mlx5e_detach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow, int out_index)
+{
+ struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ /* flow wasn't fully initialized */
+ if (!e)
+ return;
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ list_del(&flow->encaps[out_index].list);
+ flow->encaps[out_index].e = NULL;
+ if (!refcount_dec_and_test(&e->refcnt)) {
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ return;
}
+ hash_del_rcu(&e->encap_hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_encap_dealloc(priv, e);
}
static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
- if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
- !(flow->flags & MLX5E_TC_FLOW_DUP))
+ if (!flow_flag_test(flow, ESWITCH) ||
+ !flow_flag_test(flow, DUP))
return;
mutex_lock(&esw->offloads.peer_mutex);
list_del(&flow->peer);
mutex_unlock(&esw->offloads.peer_mutex);
- flow->flags &= ~MLX5E_TC_FLOW_DUP;
+ flow_flag_clear(flow, DUP);
mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
kvfree(flow->peer_flow);
@@ -1339,7 +1639,7 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
+ if (mlx5e_is_eswitch_flow(flow)) {
mlx5e_tc_del_fdb_peer_flow(flow);
mlx5e_tc_del_fdb_flow(priv, flow);
} else {
@@ -1840,6 +2140,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep;
+ bool is_eswitch_flow;
int err;
inner_match_level = MLX5_MATCH_NONE;
@@ -1850,7 +2151,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
outer_match_level : inner_match_level;
- if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
+ is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
+ if (!err && is_eswitch_flow) {
rep = rpriv->rep;
if (rep->vport != MLX5_VPORT_UPLINK &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
@@ -1864,7 +2166,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
}
}
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
+ if (is_eswitch_flow) {
flow->esw_attr->inner_match_level = inner_match_level;
flow->esw_attr->outer_match_level = outer_match_level;
} else {
@@ -2385,14 +2687,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
{
u32 actions;
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+ if (mlx5e_is_eswitch_flow(flow))
actions = flow->esw_attr->action;
else
actions = flow->nic_attr->action;
- if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
+ if (flow_flag_test(flow, EGRESS) &&
!((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) ||
- (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)))
+ (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
+ (actions & MLX5_FLOW_CONTEXT_ACTION_DROP)))
return false;
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
@@ -2542,7 +2845,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
same_hw_devs(priv, netdev_priv(peer_dev))) {
parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
- flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
+ flow_flag_set(flow, HAIRPIN);
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
} else {
@@ -2629,6 +2932,31 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
+bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
+{
+ return refcount_inc_not_zero(&e->refcnt);
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
+ uintptr_t hash_key)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_encap_entry *e;
+ struct encap_key e_key;
+
+ hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
+ encap_hlist, hash_key) {
+ e_key.ip_tun_key = &e->tun_info->key;
+ e_key.tc_tunnel = e->tunnel;
+ if (!cmp_encap_info(&e_key, key) &&
+ mlx5e_encap_take(e))
+ return e;
+ }
+
+ return NULL;
+}
+
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct net_device *mirred_dev,
@@ -2641,11 +2969,10 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
const struct ip_tunnel_info *tun_info;
- struct encap_key key, e_key;
+ struct encap_key key;
struct mlx5e_encap_entry *e;
unsigned short family;
uintptr_t hash_key;
- bool found = false;
int err = 0;
parse_attr = attr->parse_attr;
@@ -2660,42 +2987,60 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
hash_key = hash_encap_info(&key);
- hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
- encap_hlist, hash_key) {
- e_key.ip_tun_key = &e->tun_info->key;
- e_key.tc_tunnel = e->tunnel;
- if (!cmp_encap_info(&e_key, &key)) {
- found = true;
- break;
- }
- }
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ e = mlx5e_encap_get(priv, &key, hash_key);
/* must verify if encap is valid or not */
- if (found)
+ if (e) {
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ wait_for_completion(&e->res_ready);
+
+ /* Protect against concurrent neigh update. */
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ if (e->compl_result < 0) {
+ err = -EREMOTEIO;
+ goto out_err;
+ }
goto attach_flow;
+ }
e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (!e)
- return -ENOMEM;
+ if (!e) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ refcount_set(&e->refcnt, 1);
+ init_completion(&e->res_ready);
e->tun_info = tun_info;
err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
- if (err)
+ if (err) {
+ kfree(e);
+ e = NULL;
goto out_err;
+ }
INIT_LIST_HEAD(&e->flows);
+ hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
if (family == AF_INET)
err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
else if (family == AF_INET6)
err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
- if (err)
+ /* Protect against concurrent neigh update. */
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ complete_all(&e->res_ready);
+ if (err) {
+ e->compl_result = err;
goto out_err;
-
- hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
+ }
+ e->compl_result = 1;
attach_flow:
+ flow->encaps[out_index].e = e;
list_add(&flow->encaps[out_index].list, &e->flows);
flow->encaps[out_index].index = out_index;
*encap_dev = e->out_dev;
@@ -2706,11 +3051,14 @@ attach_flow:
} else {
*encap_valid = false;
}
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
return err;
out_err:
- kfree(e);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ if (e)
+ mlx5e_encap_put(priv, e);
return err;
}
@@ -2890,12 +3238,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
- struct net_device *uplink_upper = netdev_master_upper_dev_get(uplink_dev);
+ struct net_device *uplink_upper;
+ rcu_read_lock();
+ uplink_upper =
+ netdev_master_upper_dev_get_rcu(uplink_dev);
if (uplink_upper &&
netif_is_lag_master(uplink_upper) &&
uplink_upper == out_dev)
out_dev = uplink_dev;
+ rcu_read_unlock();
if (is_vlan_dev(out_dev)) {
err = add_vlan_push_action(priv, attr,
@@ -3066,19 +3418,19 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return 0;
}
-static void get_flags(int flags, u16 *flow_flags)
+static void get_flags(int flags, unsigned long *flow_flags)
{
- u16 __flow_flags = 0;
+ unsigned long __flow_flags = 0;
- if (flags & MLX5E_TC_INGRESS)
- __flow_flags |= MLX5E_TC_FLOW_INGRESS;
- if (flags & MLX5E_TC_EGRESS)
- __flow_flags |= MLX5E_TC_FLOW_EGRESS;
+ if (flags & MLX5_TC_FLAG(INGRESS))
+ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
+ if (flags & MLX5_TC_FLAG(EGRESS))
+ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
- if (flags & MLX5E_TC_ESW_OFFLOAD)
- __flow_flags |= MLX5E_TC_FLOW_ESWITCH;
- if (flags & MLX5E_TC_NIC_OFFLOAD)
- __flow_flags |= MLX5E_TC_FLOW_NIC;
+ if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
+ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
+ if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
+ __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
*flow_flags = __flow_flags;
}
@@ -3090,12 +3442,13 @@ static const struct rhashtable_params tc_ht_params = {
.automatic_shrinking = true,
};
-static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags)
+static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
+ unsigned long flags)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *uplink_rpriv;
- if (flags & MLX5E_TC_ESW_OFFLOAD) {
+ if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
return &uplink_rpriv->uplink_priv.tc_ht;
} else /* NIC offload */
@@ -3106,7 +3459,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
{
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
- flow->flags & MLX5E_TC_FLOW_INGRESS;
+ flow_flag_test(flow, INGRESS);
bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
@@ -3125,13 +3478,13 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
static int
mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
- struct flow_cls_offload *f, u16 flow_flags,
+ struct flow_cls_offload *f, unsigned long flow_flags,
struct mlx5e_tc_flow_parse_attr **__parse_attr,
struct mlx5e_tc_flow **__flow)
{
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
- int err;
+ int out_index, err;
flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
@@ -3143,6 +3496,12 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
flow->cookie = f->cookie;
flow->flags = flow_flags;
flow->priv = priv;
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
+ INIT_LIST_HEAD(&flow->encaps[out_index].list);
+ INIT_LIST_HEAD(&flow->mod_hdr);
+ INIT_LIST_HEAD(&flow->hairpin);
+ refcount_set(&flow->refcnt, 1);
+ init_completion(&flow->init_done);
*__flow = flow;
*__parse_attr = parse_attr;
@@ -3182,7 +3541,7 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
static struct mlx5e_tc_flow *
__mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
- u16 flow_flags,
+ unsigned long flow_flags,
struct net_device *filter_dev,
struct mlx5_eswitch_rep *in_rep,
struct mlx5_core_dev *in_mdev)
@@ -3193,7 +3552,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
int attr_size, err;
- flow_flags |= MLX5E_TC_FLOW_ESWITCH;
+ flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
attr_size = sizeof(struct mlx5_esw_flow_attr);
err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
&parse_attr, &flow);
@@ -3215,6 +3574,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
goto err_free;
err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
+ complete_all(&flow->init_done);
if (err) {
if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
goto err_free;
@@ -3225,15 +3585,14 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return flow;
err_free:
- kfree(flow);
- kvfree(parse_attr);
+ mlx5e_flow_put(priv, flow);
out:
return ERR_PTR(err);
}
static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
struct mlx5e_tc_flow *flow,
- u16 flow_flags)
+ unsigned long flow_flags)
{
struct mlx5e_priv *priv = flow->priv, *peer_priv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
@@ -3271,7 +3630,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
}
flow->peer_flow = peer_flow;
- flow->flags |= MLX5E_TC_FLOW_DUP;
+ flow_flag_set(flow, DUP);
mutex_lock(&esw->offloads.peer_mutex);
list_add_tail(&flow->peer, &esw->offloads.peer_flows);
mutex_unlock(&esw->offloads.peer_mutex);
@@ -3284,7 +3643,7 @@ out:
static int
mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
- u16 flow_flags,
+ unsigned long flow_flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
@@ -3318,7 +3677,7 @@ out:
static int
mlx5e_add_nic_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
- u16 flow_flags,
+ unsigned long flow_flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
@@ -3332,7 +3691,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
return -EOPNOTSUPP;
- flow_flags |= MLX5E_TC_FLOW_NIC;
+ flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
attr_size = sizeof(struct mlx5_nic_flow_attr);
err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
&parse_attr, &flow);
@@ -3353,14 +3712,14 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ flow_flag_set(flow, OFFLOADED);
kvfree(parse_attr);
*__flow = flow;
return 0;
err_free:
- kfree(flow);
+ mlx5e_flow_put(priv, flow);
kvfree(parse_attr);
out:
return err;
@@ -3369,12 +3728,12 @@ out:
static int
mlx5e_tc_add_flow(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
- int flags,
+ unsigned long flags,
struct net_device *filter_dev,
struct mlx5e_tc_flow **flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- u16 flow_flags;
+ unsigned long flow_flags;
int err;
get_flags(flags, &flow_flags);
@@ -3393,14 +3752,16 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv,
}
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags)
+ struct flow_cls_offload *f, unsigned long flags)
{
struct netlink_ext_ack *extack = f->common.extack;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
int err = 0;
- flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+ rcu_read_lock();
+ flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
+ rcu_read_unlock();
if (flow) {
NL_SET_ERR_MSG_MOD(extack,
"flow cookie already exists, ignoring");
@@ -3411,55 +3772,68 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
goto out;
}
+ trace_mlx5e_configure_flower(f);
err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
if (err)
goto out;
- err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
+ err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
if (err)
goto err_free;
return 0;
err_free:
- mlx5e_tc_del_flow(priv, flow);
- kfree(flow);
+ mlx5e_flow_put(priv, flow);
out:
return err;
}
-#define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
-#define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
-
static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
{
- if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
- return true;
+ bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
+ bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
- return false;
+ return flow_flag_test(flow, INGRESS) == dir_ingress &&
+ flow_flag_test(flow, EGRESS) == dir_egress;
}
int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags)
+ struct flow_cls_offload *f, unsigned long flags)
{
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
+ int err;
+ rcu_read_lock();
flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
- if (!flow || !same_flow_direction(flow, flags))
- return -EINVAL;
+ if (!flow || !same_flow_direction(flow, flags)) {
+ err = -EINVAL;
+ goto errout;
+ }
+ /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
+ * set.
+ */
+ if (flow_flag_test_and_set(flow, DELETED)) {
+ err = -EINVAL;
+ goto errout;
+ }
rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
+ rcu_read_unlock();
- mlx5e_tc_del_flow(priv, flow);
-
- kfree(flow);
+ trace_mlx5e_delete_flower(f);
+ mlx5e_flow_put(priv, flow);
return 0;
+
+errout:
+ rcu_read_unlock();
+ return err;
}
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags)
+ struct flow_cls_offload *f, unsigned long flags)
{
struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
@@ -3469,15 +3843,24 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
u64 lastuse = 0;
u64 packets = 0;
u64 bytes = 0;
+ int err = 0;
- flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
- if (!flow || !same_flow_direction(flow, flags))
- return -EINVAL;
+ rcu_read_lock();
+ flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
+ tc_ht_params));
+ rcu_read_unlock();
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
- if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ if (!same_flow_direction(flow, flags)) {
+ err = -EINVAL;
+ goto errout;
+ }
+
+ if (mlx5e_is_offloaded_flow(flow)) {
counter = mlx5e_tc_get_counter(flow);
if (!counter)
- return 0;
+ goto errout;
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
}
@@ -3489,8 +3872,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
if (!peer_esw)
goto out;
- if ((flow->flags & MLX5E_TC_FLOW_DUP) &&
- (flow->peer_flow->flags & MLX5E_TC_FLOW_OFFLOADED)) {
+ if (flow_flag_test(flow, DUP) &&
+ flow_flag_test(flow->peer_flow, OFFLOADED)) {
u64 bytes2;
u64 packets2;
u64 lastuse2;
@@ -3509,15 +3892,118 @@ no_peer_counter:
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out:
flow_stats_update(&f->stats, bytes, packets, lastuse);
+ trace_mlx5e_stats_flower(f);
+errout:
+ mlx5e_flow_put(priv, flow);
+ return err;
+}
+
+static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch *esw;
+ u16 vport_num;
+ u32 rate_mbps;
+ int err;
+
+ esw = priv->mdev->priv.eswitch;
+ /* rate is given in bytes/sec.
+ * First convert to bits/sec and then round to the nearest mbit/secs.
+ * mbit means million bits.
+ * Moreover, if rate is non zero we choose to configure to a minimum of
+ * 1 mbit/sec.
+ */
+ rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
+ vport_num = rpriv->rep->vport;
+
+ err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
+
+ return err;
+}
+
+static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ const struct flow_action_entry *act;
+ int err;
+ int i;
+
+ if (!flow_action_has_entries(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
+ return -EINVAL;
+ }
+
+ if (!flow_offload_has_one_action(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
+ if (err)
+ return err;
+
+ rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
+ return -EOPNOTSUPP;
+ }
+ }
return 0;
}
+int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ struct netlink_ext_ack *extack = ma->common.extack;
+ int prio = TC_H_MAJ(ma->common.prio) >> 16;
+
+ if (prio != 1) {
+ NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
+ return -EINVAL;
+ }
+
+ return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
+}
+
+int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ struct netlink_ext_ack *extack = ma->common.extack;
+
+ return apply_police_params(priv, 0, extack);
+}
+
+void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct rtnl_link_stats64 cur_stats;
+ u64 dbytes;
+ u64 dpkts;
+
+ cur_stats = priv->stats.vf_vport;
+ dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
+ dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
+ rpriv->prev_vf_vport_stats = cur_stats;
+ flow_stats_update(&ma->stats, dpkts, dbytes, jiffies);
+}
+
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
- struct mlx5e_hairpin_entry *hpe;
+ struct mlx5e_hairpin_entry *hpe, *tmp;
+ LIST_HEAD(init_wait_list);
u16 peer_vhca_id;
int bkt;
@@ -3526,9 +4012,18 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
- hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
- if (hpe->peer_vhca_id == peer_vhca_id)
+ mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
+ hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
+ if (refcount_inc_not_zero(&hpe->refcnt))
+ list_add(&hpe->dead_peer_wait_list, &init_wait_list);
+ mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
+
+ list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
+ wait_for_completion(&hpe->res_ready);
+ if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
hpe->hp->pair->peer_gone = true;
+
+ mlx5e_hairpin_put(priv, hpe);
}
}
@@ -3564,7 +4059,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
struct mlx5e_tc_table *tc = &priv->fs.tc;
int err;
- hash_init(tc->mod_hdr_tbl);
+ mutex_init(&tc->t_lock);
+ mutex_init(&tc->mod_hdr.lock);
+ hash_init(tc->mod_hdr.hlist);
+ mutex_init(&tc->hairpin_tbl_lock);
hash_init(tc->hairpin_tbl);
err = rhashtable_init(&tc->ht, &tc_ht_params);
@@ -3596,12 +4094,16 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier(&tc->netdevice_nb);
+ mutex_destroy(&tc->mod_hdr.lock);
+ mutex_destroy(&tc->hairpin_tbl_lock);
+
rhashtable_destroy(&tc->ht);
if (!IS_ERR_OR_NULL(tc->t)) {
mlx5_destroy_flow_table(tc->t);
tc->t = NULL;
}
+ mutex_destroy(&tc->t_lock);
}
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
@@ -3614,7 +4116,7 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
}
-int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags)
+int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
{
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
@@ -3636,10 +4138,10 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
reoffload_flows_work);
struct mlx5e_tc_flow *flow, *tmp;
- rtnl_lock();
+ mutex_lock(&rpriv->unready_flows_lock);
list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
- remove_unready_flow(flow);
+ unready_flow_del(flow);
}
- rtnl_unlock();
+ mutex_unlock(&rpriv->unready_flows_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 3ab39275ca7d..924c6ef86a14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -40,13 +40,15 @@
#ifdef CONFIG_MLX5_ESWITCH
enum {
- MLX5E_TC_INGRESS = BIT(0),
- MLX5E_TC_EGRESS = BIT(1),
- MLX5E_TC_NIC_OFFLOAD = BIT(2),
- MLX5E_TC_ESW_OFFLOAD = BIT(3),
- MLX5E_TC_LAST_EXPORTED_BIT = 3,
+ MLX5E_TC_FLAG_INGRESS_BIT,
+ MLX5E_TC_FLAG_EGRESS_BIT,
+ MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
+ MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
+ MLX5E_TC_FLAG_LAST_EXPORTED_BIT = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
};
+#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
+
int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
@@ -54,23 +56,37 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags);
+ struct flow_cls_offload *f, unsigned long flags);
int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags);
+ struct flow_cls_offload *f, unsigned long flags);
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
- struct flow_cls_offload *f, int flags);
+ struct flow_cls_offload *f, unsigned long flags);
+
+int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *f);
+int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *f);
+void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
+ struct tc_cls_matchall_offload *ma);
struct mlx5e_encap_entry;
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e);
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list);
void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e);
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list);
+bool mlx5e_encap_take(struct mlx5e_encap_entry *e);
+void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e);
+
+void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list);
+void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
struct mlx5e_neigh_hash_entry;
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
-int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags);
+int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
@@ -80,7 +96,11 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
-static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags) { return 0; }
+static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv,
+ unsigned long flags)
+{
+ return 0;
+}
#endif
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 600e92cb629a..d3a67a9b4eba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -210,7 +210,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
@@ -292,8 +292,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
stats->packets += skb_shinfo(skb)->gso_segs;
} else {
- u8 mode = mlx5e_transport_inline_tx_wqe(wqe) ?
- MLX5_INLINE_MODE_TCP_UDP : sq->min_inline_mode;
+ u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb);
opcode = MLX5_OPCODE_SEND;
mss = 0;
@@ -608,9 +607,11 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
stats->packets += skb_shinfo(skb)->gso_segs;
} else {
+ u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb);
+
opcode = MLX5_OPCODE_SEND;
mss = 0;
- ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
+ ihs = mlx5e_calc_min_inline(mode, skb);
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
stats->packets++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 41f25ea2e8d9..580c71cb9dfa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -215,11 +215,7 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
*/
dma_rmb();
- if (likely(eqe->type < MLX5_EVENT_TYPE_MAX))
- atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
- else
- mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
-
+ atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
++eq->cons_index;
@@ -328,10 +324,13 @@ err_buf:
/**
* mlx5_eq_enable - Enable EQ for receiving EQEs
- * @dev - Device which owns the eq
- * @eq - EQ to enable
- * @nb - notifier call block
- * mlx5_eq_enable - must be called after EQ is created in device.
+ * @dev : Device which owns the eq
+ * @eq : EQ to enable
+ * @nb : Notifier call block
+ *
+ * Must be called after EQ is created in device.
+ *
+ * @return: 0 if no error
*/
int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
@@ -348,11 +347,12 @@ int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
EXPORT_SYMBOL(mlx5_eq_enable);
/**
- * mlx5_eq_disable - Enable EQ for receiving EQEs
- * @dev - Device which owns the eq
- * @eq - EQ to disable
- * @nb - notifier call block
- * mlx5_eq_disable - must be called before EQ is destroyed.
+ * mlx5_eq_disable - Disable EQ for receiving EQEs
+ * @dev : Device which owns the eq
+ * @eq : EQ to disable
+ * @nb : Notifier call block
+ *
+ * Must be called before EQ is destroyed.
*/
void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
@@ -415,7 +415,7 @@ void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
int mlx5_eq_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *eq_table;
- int i, err;
+ int i;
eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
if (!eq_table)
@@ -423,9 +423,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
dev->priv.eq_table = eq_table;
- err = mlx5_eq_debugfs_init(dev);
- if (err)
- goto kvfree_eq_table;
+ mlx5_eq_debugfs_init(dev);
mutex_init(&eq_table->lock);
for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
@@ -433,11 +431,6 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
eq_table->irq_table = dev->priv.irq_table;
return 0;
-
-kvfree_eq_table:
- kvfree(eq_table);
- dev->priv.eq_table = NULL;
- return err;
}
void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
@@ -945,9 +938,6 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{
struct mlx5_eq_table *eqt = dev->priv.eq_table;
- if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
- return -EINVAL;
-
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
}
EXPORT_SYMBOL(mlx5_eq_notifier_register);
@@ -956,9 +946,6 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{
struct mlx5_eq_table *eqt = dev->priv.eq_table;
- if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
- return -EINVAL;
-
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
}
EXPORT_SYMBOL(mlx5_eq_notifier_unregister);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 1f3891fde2eb..30aae76b6a1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -58,20 +58,9 @@ struct vport_addr {
bool mc_promisc;
};
-enum {
- UC_ADDR_CHANGE = BIT(0),
- MC_ADDR_CHANGE = BIT(1),
- PROMISC_CHANGE = BIT(3),
-};
-
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
-/* Vport context events */
-#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
- MC_ADDR_CHANGE | \
- PROMISC_CHANGE)
-
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
@@ -108,13 +97,13 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
- if (events_mask & UC_ADDR_CHANGE)
+ if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_uc_address_change, 1);
- if (events_mask & MC_ADDR_CHANGE)
+ if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_mc_address_change, 1);
- if (events_mask & PROMISC_CHANGE)
+ if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_promisc_change, 1);
@@ -463,6 +452,22 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
return err;
}
+#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
+ MLX5_VPORT_MC_ADDR_CHANGE | \
+ MLX5_VPORT_PROMISC_CHANGE)
+
+static int esw_legacy_enable(struct mlx5_eswitch *esw)
+{
+ int ret;
+
+ ret = esw_create_legacy_table(esw);
+ if (ret)
+ return ret;
+
+ mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
+ return 0;
+}
+
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
{
esw_cleanup_vepa_rules(esw);
@@ -470,6 +475,19 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
esw_destroy_legacy_vepa_table(esw);
}
+static void esw_legacy_disable(struct mlx5_eswitch *esw)
+{
+ struct esw_mc_addr *mc_promisc;
+
+ mlx5_eswitch_disable_pf_vf_vports(esw);
+
+ mc_promisc = &esw->mc_promisc;
+ if (mc_promisc->uplink_rule)
+ mlx5_del_flow_rules(mc_promisc->uplink_rule);
+
+ esw_destroy_legacy_table(esw);
+}
+
/* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
struct vport_addr *vaddr);
@@ -901,21 +919,21 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
vport->vport, mac);
- if (vport->enabled_events & UC_ADDR_CHANGE) {
+ if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
}
- if (vport->enabled_events & MC_ADDR_CHANGE)
+ if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
- if (vport->enabled_events & PROMISC_CHANGE) {
+ if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
esw_update_vport_rx_mode(esw, vport);
if (!IS_ERR_OR_NULL(vport->allmulti_rule))
esw_update_vport_mc_promisc(esw, vport);
}
- if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE))
+ if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
@@ -1393,18 +1411,49 @@ out:
return err;
}
+static bool element_type_supported(struct mlx5_eswitch *esw, int type)
+{
+ const struct mlx5_core_dev *dev = esw->dev;
+
+ switch (type) {
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_TASR;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_VPORT;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_VPORT_TC;
+ case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
+ return MLX5_CAP_QOS(dev, esw_element_type) &
+ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+ }
+ return false;
+}
+
/* Vport QoS management */
-static int esw_create_tsar(struct mlx5_eswitch *esw)
+static void esw_create_tsar(struct mlx5_eswitch *esw)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_core_dev *dev = esw->dev;
+ __be32 *attr;
int err;
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
- return 0;
+ return;
+
+ if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
+ return;
if (esw->qos.enabled)
- return -EEXIST;
+ return;
+
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+
+ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
+ *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
@@ -1412,11 +1461,10 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
&esw->qos.root_tsar_id);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
- return err;
+ return;
}
esw->qos.enabled = true;
- return 0;
}
static void esw_destroy_tsar(struct mlx5_eswitch *esw)
@@ -1537,6 +1585,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw,
return 0;
}
+int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
+ u32 rate_mbps)
+{
+ u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
+
+ return mlx5_modify_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ ctx,
+ vport->qos.esw_tsar_ix,
+ MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW);
+}
+
static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
{
((u8 *)node_guid)[7] = mac[0];
@@ -1619,7 +1683,7 @@ static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
}
static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
- int enable_events)
+ enum mlx5_eswitch_vport_event enabled_events)
{
u16 vport_num = vport->vport;
@@ -1641,7 +1705,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
/* Sync with current vport context */
- vport->enabled_events = enable_events;
+ vport->enabled_events = enabled_events;
vport->enabled = true;
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
@@ -1770,11 +1834,46 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
-int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
+/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
+ * whichever are present on the eswitch.
+ */
+void
+mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
+ enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
+ int i;
+
+ /* Enable PF vport */
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ esw_enable_vport(esw, vport, enabled_events);
+
+ /* Enable ECPF vports */
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ esw_enable_vport(esw, vport, enabled_events);
+ }
+
+ /* Enable VF vports */
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ esw_enable_vport(esw, vport, enabled_events);
+}
+
+/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
+ * whichever are previously enabled on the eswitch.
+ */
+void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int i;
+
+ mlx5_esw_for_all_vports_reverse(esw, i, vport)
+ esw_disable_vport(esw, vport);
+}
+
+int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
+{
int err;
- int i, enabled_events;
if (!ESW_ALLOWED(esw) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
@@ -1788,44 +1887,23 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
+ esw_create_tsar(esw);
+
esw->mode = mode;
mlx5_lag_update(esw->dev);
if (mode == MLX5_ESWITCH_LEGACY) {
- err = esw_create_legacy_table(esw);
- if (err)
- goto abort;
+ err = esw_legacy_enable(esw);
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- err = esw_offloads_init(esw);
+ err = esw_offloads_enable(esw);
}
if (err)
goto abort;
- err = esw_create_tsar(esw);
- if (err)
- esw_warn(esw->dev, "Failed to create eswitch TSAR");
-
- enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? SRIOV_VPORT_EVENTS :
- UC_ADDR_CHANGE;
-
- /* Enable PF vport */
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- esw_enable_vport(esw, vport, enabled_events);
-
- /* Enable ECPF vports */
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- esw_enable_vport(esw, vport, enabled_events);
- }
-
- /* Enable VF vports */
- mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
- esw_enable_vport(esw, vport, enabled_events);
-
mlx5_eswitch_event_handlers_register(esw);
esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
@@ -1847,10 +1925,7 @@ abort:
void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{
- struct esw_mc_addr *mc_promisc;
- struct mlx5_vport *vport;
int old_mode;
- int i;
if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
return;
@@ -1859,21 +1934,14 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
- mc_promisc = &esw->mc_promisc;
mlx5_eswitch_event_handlers_unregister(esw);
- mlx5_esw_for_all_vports(esw, i, vport)
- esw_disable_vport(esw, vport);
-
- if (mc_promisc && mc_promisc->uplink_rule)
- mlx5_del_flow_rules(mc_promisc->uplink_rule);
-
- esw_destroy_tsar(esw);
-
if (esw->mode == MLX5_ESWITCH_LEGACY)
- esw_destroy_legacy_table(esw);
+ esw_legacy_disable(esw);
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
- esw_offloads_cleanup(esw);
+ esw_offloads_disable(esw);
+
+ esw_destroy_tsar(esw);
old_mode = esw->mode;
esw->mode = MLX5_ESWITCH_NONE;
@@ -1931,8 +1999,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (err)
goto abort;
+ mutex_init(&esw->offloads.encap_tbl_lock);
hash_init(esw->offloads.encap_tbl);
- hash_init(esw->offloads.mod_hdr_tbl);
+ mutex_init(&esw->offloads.mod_hdr.lock);
+ hash_init(esw->offloads.mod_hdr.hlist);
+ atomic64_set(&esw->offloads.num_flows, 0);
mutex_init(&esw->state_lock);
mlx5_esw_for_all_vports(esw, i, vport) {
@@ -1968,6 +2039,8 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
esw_offloads_cleanup_reps(esw);
+ mutex_destroy(&esw->offloads.mod_hdr.lock);
+ mutex_destroy(&esw->offloads.encap_tbl_lock);
kfree(esw->vports);
kfree(esw);
}
@@ -2085,23 +2158,19 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
if (vlan > 4095 || qos > 7)
return -EINVAL;
- mutex_lock(&esw->state_lock);
-
err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
if (err)
- goto unlock;
+ return err;
evport->info.vlan = vlan;
evport->info.qos = qos;
if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
if (err)
- goto unlock;
+ return err;
err = esw_vport_egress_config(esw, evport);
}
-unlock:
- mutex_unlock(&esw->state_lock);
return err;
}
@@ -2109,11 +2178,16 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos)
{
u8 set_flags = 0;
+ int err;
if (vlan || qos)
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
- return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
+ mutex_lock(&esw->state_lock);
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
+ mutex_unlock(&esw->state_lock);
+
+ return err;
}
int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 04685dbb280c..aba9e7a6ad3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -35,6 +35,7 @@
#include <linux/if_ether.h>
#include <linux/if_link.h>
+#include <linux/atomic.h>
#include <net/devlink.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/eswitch.h>
@@ -101,6 +102,13 @@ struct mlx5_vport_info {
bool trusted;
};
+/* Vport context events */
+enum mlx5_eswitch_vport_event {
+ MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
+ MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
+ MLX5_VPORT_PROMISC_CHANGE = BIT(3),
+};
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
int vport;
@@ -122,7 +130,7 @@ struct mlx5_vport {
} qos;
bool enabled;
- u16 enabled_events;
+ enum mlx5_eswitch_vport_event enabled_events;
};
enum offloads_fdb_flags {
@@ -173,13 +181,14 @@ struct mlx5_esw_offload {
struct mlx5_eswitch_rep *vport_reps;
struct list_head peer_flows;
struct mutex peer_mutex;
+ struct mutex encap_tbl_lock; /* protects encap_tbl */
DECLARE_HASHTABLE(encap_tbl, 8);
- DECLARE_HASHTABLE(mod_hdr_tbl, 8);
+ struct mod_hdr_tbl mod_hdr;
DECLARE_HASHTABLE(termtbl_tbl, 8);
struct mutex termtbl_mutex; /* protects termtbl hash */
const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
u8 inline_mode;
- u64 num_flows;
+ atomic64_t num_flows;
enum devlink_eswitch_encap_mode encap;
};
@@ -207,8 +216,11 @@ enum {
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
+ /* legacy data structures */
struct mlx5_eswitch_fdb fdb_table;
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
+ struct esw_mc_addr mc_promisc;
+ /* end of legacy */
struct workqueue_struct *work_queue;
struct mlx5_vport *vports;
u32 flags;
@@ -218,7 +230,6 @@ struct mlx5_eswitch {
* and async SRIOV admin state changes
*/
struct mutex state_lock;
- struct esw_mc_addr mc_promisc;
struct {
bool enabled;
@@ -233,8 +244,8 @@ struct mlx5_eswitch {
struct mlx5_esw_functions esw_funcs;
};
-void esw_offloads_cleanup(struct mlx5_eswitch *esw);
-int esw_offloads_init(struct mlx5_eswitch *esw);
+void esw_offloads_disable(struct mlx5_eswitch *esw);
+int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@@ -251,6 +262,8 @@ void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
+int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
+ u32 rate_mbps);
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
@@ -513,6 +526,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
(vport) = &(esw)->vports[i], \
(i) < (esw)->total_vports; (i)++)
+#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
+ for ((i) = (esw)->total_vports - 1; \
+ (vport) = &(esw)->vports[i], \
+ (i) >= MLX5_VPORT_PF; (i)--)
+
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
for ((i) = MLX5_VPORT_FIRST_VF; \
(vport) = &(esw)->vports[(i)], \
@@ -574,6 +592,11 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
+void
+mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
+ enum mlx5_eswitch_vport_event enabled_events);
+void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 0323fd078271..7d3582ee66b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -229,7 +229,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (IS_ERR(rule))
goto err_add_rule;
else
- esw->offloads.num_flows++;
+ atomic64_inc(&esw->offloads.num_flows);
return rule;
@@ -294,7 +294,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
if (IS_ERR(rule))
goto add_err;
- esw->offloads.num_flows++;
+ atomic64_inc(&esw->offloads.num_flows);
return rule;
add_err:
@@ -322,7 +322,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
}
- esw->offloads.num_flows--;
+ atomic64_dec(&esw->offloads.num_flows);
if (fwd_rule) {
esw_put_prio_table(esw, attr->chain, attr->prio, 1);
@@ -438,9 +438,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
!attr->dest_chain);
+ mutex_lock(&esw->state_lock);
+
err = esw_add_vlan_action_check(attr, push, pop, fwd);
if (err)
- return err;
+ goto unlock;
attr->vlan_handled = false;
@@ -453,11 +455,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
attr->vlan_handled = true;
}
- return 0;
+ goto unlock;
}
if (!push && !pop)
- return 0;
+ goto unlock;
if (!(offloads->vlan_push_pop_refcount)) {
/* it's the 1st vlan rule, apply global vlan pop policy */
@@ -482,6 +484,8 @@ skip_set_push:
out:
if (!err)
attr->vlan_handled = true;
+unlock:
+ mutex_unlock(&esw->state_lock);
return err;
}
@@ -504,6 +508,8 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ mutex_lock(&esw->state_lock);
+
vport = esw_vlan_action_get_vport(attr, push, pop);
if (!push && !pop && fwd) {
@@ -511,7 +517,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
vport->vlan_refcount--;
- return 0;
+ goto out;
}
if (push) {
@@ -529,12 +535,13 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
skip_unset_push:
offloads->vlan_push_pop_refcount--;
if (offloads->vlan_push_pop_refcount)
- return 0;
+ goto out;
/* no more vlan rules, stop global vlan pop policy */
err = esw_set_global_vlan_pop(esw, 0);
out:
+ mutex_unlock(&esw->state_lock);
return err;
}
@@ -583,38 +590,15 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
-static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw)
+static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
u8 fdb_to_vport_reg_c_id;
int err;
- err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
- out, sizeof(out));
- if (err)
- return err;
-
- fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
- esw_vport_context.fdb_to_vport_reg_c_id);
-
- fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
- MLX5_SET(modify_esw_vport_context_in, in,
- esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
-
- MLX5_SET(modify_esw_vport_context_in, in,
- field_select.fdb_to_vport_reg_c_id, 1);
-
- return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
- in, sizeof(in));
-}
-
-static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
-{
- u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
- u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
- u8 fdb_to_vport_reg_c_id;
- int err;
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+ return 0;
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
out, sizeof(out));
@@ -624,7 +608,10 @@ static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
esw_vport_context.fdb_to_vport_reg_c_id);
- fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
+ if (enable)
+ fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
+ else
+ fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
@@ -1402,10 +1389,9 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
int total_vports = esw->total_vports;
- struct mlx5_core_dev *dev = esw->dev;
struct mlx5_eswitch_rep *rep;
- u8 hw_id[ETH_ALEN], rep_type;
int vport_index;
+ u8 rep_type;
esw->offloads.vport_reps = kcalloc(total_vports,
sizeof(struct mlx5_eswitch_rep),
@@ -1413,12 +1399,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
if (!esw->offloads.vport_reps)
return -ENOMEM;
- mlx5_query_mac_address(dev, hw_id);
-
mlx5_esw_for_all_reps(esw, vport_index, rep) {
rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
rep->vport_index = vport_index;
- ether_addr_copy(rep->hw_id, hw_id);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
atomic_set(&rep->rep_data[rep_type].state,
@@ -2120,7 +2103,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
return NOTIFY_OK;
}
-int esw_offloads_init(struct mlx5_eswitch *esw)
+int esw_offloads_enable(struct mlx5_eswitch *esw)
{
int err;
@@ -2134,11 +2117,11 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err)
return err;
- if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
- err = mlx5_eswitch_enable_passing_vport_metadata(esw);
- if (err)
- goto err_vport_metadata;
- }
+ err = esw_set_passing_vport_metadata(esw, true);
+ if (err)
+ goto err_vport_metadata;
+
+ mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
err = esw_offloads_load_all_reps(esw);
if (err)
@@ -2152,8 +2135,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
return 0;
err_reps:
- if (mlx5_eswitch_vport_match_metadata_enabled(esw))
- mlx5_eswitch_disable_passing_vport_metadata(esw);
+ mlx5_eswitch_disable_pf_vf_vports(esw);
+ esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_steering_cleanup(esw);
return err;
@@ -2178,13 +2161,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
return err;
}
-void esw_offloads_cleanup(struct mlx5_eswitch *esw)
+void esw_offloads_disable(struct mlx5_eswitch *esw)
{
mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw);
- if (mlx5_eswitch_vport_match_metadata_enabled(esw))
- mlx5_eswitch_disable_passing_vport_metadata(esw);
+ mlx5_eswitch_disable_pf_vf_vports(esw);
+ esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
@@ -2345,7 +2328,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
break;
}
- if (esw->offloads.num_flows > 0) {
+ if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set inline mode when flows are configured");
return -EOPNOTSUPP;
@@ -2455,7 +2438,7 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
if (esw->offloads.encap == encap)
return 0;
- if (esw->offloads.num_flows > 0) {
+ if (atomic64_read(&esw->offloads.num_flows) > 0) {
NL_SET_ERR_MSG_MOD(extack,
"Can't set encapsulation when flows are configured");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 7ac1249eadc3..1e3381604b3d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -182,7 +182,7 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
} else {
MLX5_SET(create_flow_table_in, in,
flow_table_context.table_miss_action,
- ns->def_miss_action);
+ ft->def_miss_action);
}
break;
@@ -262,7 +262,7 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
} else {
MLX5_SET(modify_flow_table_in, in,
flow_table_context.table_miss_action,
- ns->def_miss_action);
+ ft->def_miss_action);
}
}
@@ -566,7 +566,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
+int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
+ enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
+ u32 *id)
{
u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
@@ -574,6 +576,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
MLX5_SET(alloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
+ MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
@@ -581,6 +584,11 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
return err;
}
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
+{
+ return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
+}
+
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
{
u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
@@ -615,67 +623,24 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
return 0;
}
-struct mlx5_cmd_fc_bulk {
- u32 id;
- int num;
- int outlen;
- u32 out[0];
-};
-
-struct mlx5_cmd_fc_bulk *
-mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
-{
- struct mlx5_cmd_fc_bulk *b;
- int outlen =
- MLX5_ST_SZ_BYTES(query_flow_counter_out) +
- MLX5_ST_SZ_BYTES(traffic_counter) * num;
-
- b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
- if (!b)
- return NULL;
-
- b->id = id;
- b->num = num;
- b->outlen = outlen;
-
- return b;
-}
-
-void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
+int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
{
- kfree(b);
+ return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+ MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
}
-int
-mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
+int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
+ u32 *out)
{
+ int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
- MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
- MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
- return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
-}
-
-void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
- struct mlx5_cmd_fc_bulk *b, u32 id,
- u64 *packets, u64 *bytes)
-{
- int index = id - b->id;
- void *stats;
-
- if (index < 0 || index >= b->num) {
- mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
- id, b->id, b->id + b->num - 1);
- return;
- }
-
- stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
- flow_statistics[index]);
- *packets = MLX5_GET64(traffic_counter, stats, packets);
- *bytes = MLX5_GET64(traffic_counter, stats, octets);
+ MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
+ MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index e340f9af2f5a..bc4606306009 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -78,20 +78,16 @@ struct mlx5_flow_cmds {
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
+int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
+ enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
+ u32 *id);
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
u64 *packets, u64 *bytes);
-struct mlx5_cmd_fc_bulk;
-
-struct mlx5_cmd_fc_bulk *
-mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num);
-void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b);
-int
-mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b);
-void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
- struct mlx5_cmd_fc_bulk *b, u32 id,
- u64 *packets, u64 *bytes);
+int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len);
+int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
+ u32 *out);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 3e99799bdb40..7bdec442f0ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -60,7 +60,8 @@
ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
__VA_ARGS__)\
-#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
+#define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
+ .def_miss_action = def_miss_act,\
.children = (struct init_tree_node[]) {__VA_ARGS__},\
.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
}
@@ -131,33 +132,41 @@ static struct init_tree_node {
int num_leaf_prios;
int prio;
int num_levels;
+ enum mlx5_flow_table_miss_action def_miss_action;
} root_fs = {
.type = FS_TYPE_NAMESPACE,
.ar_size = 7,
- .children = (struct init_tree_node[]) {
- ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
- FS_CHAINING_CAPS,
- ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
- BY_PASS_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, LAG_MIN_LEVEL, 0,
- FS_CHAINING_CAPS,
- ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
- LAG_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
- ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
- FS_CHAINING_CAPS,
- ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
- ETHTOOL_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
- ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
- KERNEL_NIC_PRIO_NUM_LEVELS))),
- ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
- FS_CHAINING_CAPS,
- ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
- ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
+ .children = (struct init_tree_node[]){
+ ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
+ LAG_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
+ OFFLOADS_MAX_FT))),
+ ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
+ ETHTOOL_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
+ KERNEL_NIC_TC_NUM_LEVELS),
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
+ KERNEL_NIC_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
+ LEFTOVERS_NUM_LEVELS))),
+ ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
+ ANCHOR_NUM_LEVELS))),
}
};
@@ -167,8 +176,29 @@ static struct init_tree_node egress_root_fs = {
.children = (struct init_tree_node[]) {
ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
FS_CHAINING_CAPS_EGRESS,
- ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ }
+};
+
+#define RDMA_RX_BYPASS_PRIO 0
+#define RDMA_RX_KERNEL_PRIO 1
+static struct init_tree_node rdma_rx_root_fs = {
+ .type = FS_TYPE_NAMESPACE,
+ .ar_size = 2,
+ .children = (struct init_tree_node[]) {
+ [RDMA_RX_BYPASS_PRIO] =
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
+ [RDMA_RX_KERNEL_PRIO] =
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
+ ADD_MULTIPLE_PRIO(1, 1))),
}
};
@@ -1014,6 +1044,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
+ ft->def_miss_action = ns->def_miss_action;
err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
if (err)
goto free_ft;
@@ -2056,16 +2087,18 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
if (steering->sniffer_tx_root_ns)
return &steering->sniffer_tx_root_ns->ns;
return NULL;
- case MLX5_FLOW_NAMESPACE_RDMA_RX:
- if (steering->rdma_rx_root_ns)
- return &steering->rdma_rx_root_ns->ns;
- return NULL;
default:
break;
}
if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
root_ns = steering->egress_root_ns;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_BYPASS_PRIO;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_KERNEL_PRIO;
} else { /* Must be NIC RX */
root_ns = steering->root_ns;
prio = type;
@@ -2155,7 +2188,8 @@ static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
return ns;
}
-static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
+static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
+ int def_miss_act)
{
struct mlx5_flow_namespace *ns;
@@ -2164,6 +2198,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
return ERR_PTR(-ENOMEM);
fs_init_namespace(ns);
+ ns->def_miss_action = def_miss_act;
tree_init_node(&ns->node, NULL, del_sw_ns);
tree_add_node(&ns->node, &prio->node);
list_add_tail(&ns->node.list, &prio->node.children);
@@ -2230,7 +2265,7 @@ static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
base = &fs_prio->node;
} else if (init_node->type == FS_TYPE_NAMESPACE) {
fs_get_obj(fs_prio, fs_parent_node);
- fs_ns = fs_create_namespace(fs_prio);
+ fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
if (IS_ERR(fs_ns))
return PTR_ERR(fs_ns);
base = &fs_ns->node;
@@ -2494,18 +2529,25 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
{
- struct fs_prio *prio;
+ int err;
steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
if (!steering->rdma_rx_root_ns)
return -ENOMEM;
- steering->rdma_rx_root_ns->def_miss_action =
- MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN;
+ err = init_root_tree(steering, &rdma_rx_root_fs,
+ &steering->rdma_rx_root_ns->ns.node);
+ if (err)
+ goto out_err;
- /* Create single prio */
- prio = fs_create_prio(&steering->rdma_rx_root_ns->ns, 0, 1);
- return PTR_ERR_OR_ZERO(prio);
+ set_prio_attrs(steering->rdma_rx_root_ns);
+
+ return 0;
+
+out_err:
+ cleanup_root_ns(steering->rdma_rx_root_ns);
+ steering->rdma_rx_root_ns = NULL;
+ return err;
}
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
@@ -2543,7 +2585,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
}
for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) {
- ns = fs_create_namespace(maj_prio);
+ ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
if (IS_ERR(ns)) {
err = PTR_ERR(ns);
goto out_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index c1252d6be0ef..0d16b4b5ab83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -145,6 +145,7 @@ struct mlx5_flow_table {
struct list_head fwd_rules;
u32 flags;
struct rhltable fgs_hash;
+ enum mlx5_flow_table_miss_action def_miss_action;
};
struct mlx5_ft_underlay_qp {
@@ -191,6 +192,7 @@ struct fs_prio {
struct mlx5_flow_namespace {
/* parent == NULL => root ns */
struct fs_node node;
+ enum mlx5_flow_table_miss_action def_miss_action;
};
struct mlx5_flow_group_mask {
@@ -219,7 +221,6 @@ struct mlx5_flow_root_namespace {
struct mutex chain_lock;
struct list_head underlay_qpns;
const struct mlx5_flow_cmds *cmds;
- enum mlx5_flow_table_miss_action def_miss_action;
};
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 1834d9f3aa1c..ab69effb056d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -40,6 +40,8 @@
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
+#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
+#define MLX5_FC_POOL_USED_BUFF_RATIO 10
struct mlx5_fc_cache {
u64 packets;
@@ -58,12 +60,18 @@ struct mlx5_fc {
u64 lastpackets;
u64 lastbytes;
+ struct mlx5_fc_bulk *bulk;
u32 id;
bool aging;
struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
};
+static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev);
+static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool);
+static struct mlx5_fc *mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool);
+static void mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc);
+
/* locking scheme:
*
* It is the responsibility of the user to prevent concurrent calls or bad
@@ -75,7 +83,7 @@ struct mlx5_fc {
* access to counter list:
* - create (user context)
* - mlx5_fc_create() only adds to an addlist to be used by
- * mlx5_fc_stats_query_work(). addlist is a lockless single linked list
+ * mlx5_fc_stats_work(). addlist is a lockless single linked list
* that doesn't require any additional synchronization when adding single
* node.
* - spawn thread to do the actual destroy
@@ -136,81 +144,87 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
spin_unlock(&fc_stats->counters_idr_lock);
}
-/* The function returns the last counter that was queried so the caller
- * function can continue calling it till all counters are queried.
- */
-static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
- struct mlx5_fc *first,
- u32 last_id)
+static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
- struct mlx5_fc *counter = NULL;
- struct mlx5_cmd_fc_bulk *b;
- bool more = false;
- u32 afirst_id;
- int num;
- int err;
+ return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
+ (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
+}
- int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
- (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
+static void update_counter_cache(int index, u32 *bulk_raw_data,
+ struct mlx5_fc_cache *cache)
+{
+ void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
+ flow_statistics[index]);
+ u64 packets = MLX5_GET64(traffic_counter, stats, packets);
+ u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
- /* first id must be aligned to 4 when using bulk query */
- afirst_id = first->id & ~0x3;
+ if (cache->packets == packets)
+ return;
- /* number of counters to query inc. the last counter */
- num = ALIGN(last_id - afirst_id + 1, 4);
- if (num > max_bulk) {
- num = max_bulk;
- last_id = afirst_id + num - 1;
- }
+ cache->packets = packets;
+ cache->bytes = bytes;
+ cache->lastuse = jiffies;
+}
- b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
- if (!b) {
- mlx5_core_err(dev, "Error allocating resources for bulk query\n");
- return NULL;
- }
+static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
+ struct mlx5_fc *first,
+ u32 last_id)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ bool query_more_counters = (first->id <= last_id);
+ int max_bulk_len = get_max_bulk_query_len(dev);
+ u32 *data = fc_stats->bulk_query_out;
+ struct mlx5_fc *counter = first;
+ u32 bulk_base_id;
+ int bulk_len;
+ int err;
- err = mlx5_cmd_fc_bulk_query(dev, b);
- if (err) {
- mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
- goto out;
- }
+ while (query_more_counters) {
+ /* first id must be aligned to 4 when using bulk query */
+ bulk_base_id = counter->id & ~0x3;
- counter = first;
- list_for_each_entry_from(counter, &fc_stats->counters, list) {
- struct mlx5_fc_cache *c = &counter->cache;
- u64 packets;
- u64 bytes;
+ /* number of counters to query inc. the last counter */
+ bulk_len = min_t(int, max_bulk_len,
+ ALIGN(last_id - bulk_base_id + 1, 4));
- if (counter->id > last_id) {
- more = true;
- break;
+ err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
+ data);
+ if (err) {
+ mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
+ return;
}
+ query_more_counters = false;
- mlx5_cmd_fc_bulk_get(dev, b,
- counter->id, &packets, &bytes);
+ list_for_each_entry_from(counter, &fc_stats->counters, list) {
+ int counter_index = counter->id - bulk_base_id;
+ struct mlx5_fc_cache *cache = &counter->cache;
- if (c->packets == packets)
- continue;
+ if (counter->id >= bulk_base_id + bulk_len) {
+ query_more_counters = true;
+ break;
+ }
- c->packets = packets;
- c->bytes = bytes;
- c->lastuse = jiffies;
+ update_counter_cache(counter_index, data, cache);
+ }
}
-
-out:
- mlx5_cmd_fc_bulk_free(b);
-
- return more ? counter : NULL;
}
-static void mlx5_free_fc(struct mlx5_core_dev *dev,
- struct mlx5_fc *counter)
+static void mlx5_fc_free(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
{
mlx5_cmd_fc_free(dev, counter->id);
kfree(counter);
}
+static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ if (counter->bulk)
+ mlx5_fc_pool_release_counter(&fc_stats->fc_pool, counter);
+ else
+ mlx5_fc_free(dev, counter);
+}
+
static void mlx5_fc_stats_work(struct work_struct *work)
{
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
@@ -234,7 +248,7 @@ static void mlx5_fc_stats_work(struct work_struct *work)
llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
mlx5_fc_stats_remove(dev, counter);
- mlx5_free_fc(dev, counter);
+ mlx5_fc_release(dev, counter);
}
if (time_before(now, fc_stats->next_query) ||
@@ -244,32 +258,62 @@ static void mlx5_fc_stats_work(struct work_struct *work)
counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
list);
- while (counter)
- counter = mlx5_fc_stats_query(dev, counter, last->id);
+ if (counter)
+ mlx5_fc_stats_query_counter_range(dev, counter, last->id);
fc_stats->next_query = now + fc_stats->sampling_interval;
}
-struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+static struct mlx5_fc *mlx5_fc_single_alloc(struct mlx5_core_dev *dev)
{
- struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct mlx5_fc *counter;
int err;
counter = kzalloc(sizeof(*counter), GFP_KERNEL);
if (!counter)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&counter->list);
err = mlx5_cmd_fc_alloc(dev, &counter->id);
- if (err)
- goto err_out;
+ if (err) {
+ kfree(counter);
+ return ERR_PTR(err);
+ }
+
+ return counter;
+}
+
+static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc *counter;
+
+ if (aging && MLX5_CAP_GEN(dev, flow_counter_bulk_alloc) != 0) {
+ counter = mlx5_fc_pool_acquire_counter(&fc_stats->fc_pool);
+ if (!IS_ERR(counter))
+ return counter;
+ }
+
+ return mlx5_fc_single_alloc(dev);
+}
+
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+{
+ struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging);
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ int err;
+
+ if (IS_ERR(counter))
+ return counter;
+
+ INIT_LIST_HEAD(&counter->list);
+ counter->aging = aging;
if (aging) {
u32 id = counter->id;
counter->cache.lastuse = jiffies;
- counter->aging = true;
+ counter->lastbytes = counter->cache.bytes;
+ counter->lastpackets = counter->cache.packets;
idr_preload(GFP_KERNEL);
spin_lock(&fc_stats->counters_idr_lock);
@@ -290,10 +334,7 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
return counter;
err_out_alloc:
- mlx5_cmd_fc_free(dev, counter->id);
-err_out:
- kfree(counter);
-
+ mlx5_fc_release(dev, counter);
return ERR_PTR(err);
}
EXPORT_SYMBOL(mlx5_fc_create);
@@ -317,13 +358,15 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
return;
}
- mlx5_free_fc(dev, counter);
+ mlx5_fc_release(dev, counter);
}
EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ int max_bulk_len;
+ int max_out_len;
spin_lock_init(&fc_stats->counters_idr_lock);
idr_init(&fc_stats->counters_idr);
@@ -331,14 +374,25 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
init_llist_head(&fc_stats->addlist);
init_llist_head(&fc_stats->dellist);
+ max_bulk_len = get_max_bulk_query_len(dev);
+ max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
+ fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL);
+ if (!fc_stats->bulk_query_out)
+ return -ENOMEM;
+
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq)
- return -ENOMEM;
+ goto err_wq_create;
fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
+ mlx5_fc_pool_init(&fc_stats->fc_pool, dev);
return 0;
+
+err_wq_create:
+ kfree(fc_stats->bulk_query_out);
+ return -ENOMEM;
}
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
@@ -352,14 +406,16 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
destroy_workqueue(dev->priv.fc_stats.wq);
dev->priv.fc_stats.wq = NULL;
- idr_destroy(&fc_stats->counters_idr);
-
tmplist = llist_del_all(&fc_stats->addlist);
llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
- mlx5_free_fc(dev, counter);
+ mlx5_fc_release(dev, counter);
list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
- mlx5_free_fc(dev, counter);
+ mlx5_fc_release(dev, counter);
+
+ mlx5_fc_pool_cleanup(&fc_stats->fc_pool);
+ idr_destroy(&fc_stats->counters_idr);
+ kfree(fc_stats->bulk_query_out);
}
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
@@ -406,3 +462,243 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
fc_stats->sampling_interval = min_t(unsigned long, interval,
fc_stats->sampling_interval);
}
+
+/* Flow counter bluks */
+
+struct mlx5_fc_bulk {
+ struct list_head pool_list;
+ u32 base_id;
+ int bulk_len;
+ unsigned long *bitmask;
+ struct mlx5_fc fcs[0];
+};
+
+static void mlx5_fc_init(struct mlx5_fc *counter, struct mlx5_fc_bulk *bulk,
+ u32 id)
+{
+ counter->bulk = bulk;
+ counter->id = id;
+}
+
+static int mlx5_fc_bulk_get_free_fcs_amount(struct mlx5_fc_bulk *bulk)
+{
+ return bitmap_weight(bulk->bitmask, bulk->bulk_len);
+}
+
+static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
+{
+ enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask;
+ struct mlx5_fc_bulk *bulk;
+ int err = -ENOMEM;
+ int bulk_len;
+ u32 base_id;
+ int i;
+
+ alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
+ bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
+
+ bulk = kzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc),
+ GFP_KERNEL);
+ if (!bulk)
+ goto err_alloc_bulk;
+
+ bulk->bitmask = kcalloc(BITS_TO_LONGS(bulk_len), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!bulk->bitmask)
+ goto err_alloc_bitmask;
+
+ err = mlx5_cmd_fc_bulk_alloc(dev, alloc_bitmask, &base_id);
+ if (err)
+ goto err_mlx5_cmd_bulk_alloc;
+
+ bulk->base_id = base_id;
+ bulk->bulk_len = bulk_len;
+ for (i = 0; i < bulk_len; i++) {
+ mlx5_fc_init(&bulk->fcs[i], bulk, base_id + i);
+ set_bit(i, bulk->bitmask);
+ }
+
+ return bulk;
+
+err_mlx5_cmd_bulk_alloc:
+ kfree(bulk->bitmask);
+err_alloc_bitmask:
+ kfree(bulk);
+err_alloc_bulk:
+ return ERR_PTR(err);
+}
+
+static int
+mlx5_fc_bulk_destroy(struct mlx5_core_dev *dev, struct mlx5_fc_bulk *bulk)
+{
+ if (mlx5_fc_bulk_get_free_fcs_amount(bulk) < bulk->bulk_len) {
+ mlx5_core_err(dev, "Freeing bulk before all counters were released\n");
+ return -EBUSY;
+ }
+
+ mlx5_cmd_fc_free(dev, bulk->base_id);
+ kfree(bulk->bitmask);
+ kfree(bulk);
+
+ return 0;
+}
+
+static struct mlx5_fc *mlx5_fc_bulk_acquire_fc(struct mlx5_fc_bulk *bulk)
+{
+ int free_fc_index = find_first_bit(bulk->bitmask, bulk->bulk_len);
+
+ if (free_fc_index >= bulk->bulk_len)
+ return ERR_PTR(-ENOSPC);
+
+ clear_bit(free_fc_index, bulk->bitmask);
+ return &bulk->fcs[free_fc_index];
+}
+
+static int mlx5_fc_bulk_release_fc(struct mlx5_fc_bulk *bulk, struct mlx5_fc *fc)
+{
+ int fc_index = fc->id - bulk->base_id;
+
+ if (test_bit(fc_index, bulk->bitmask))
+ return -EINVAL;
+
+ set_bit(fc_index, bulk->bitmask);
+ return 0;
+}
+
+/* Flow counters pool API */
+
+static void mlx5_fc_pool_init(struct mlx5_fc_pool *fc_pool, struct mlx5_core_dev *dev)
+{
+ fc_pool->dev = dev;
+ mutex_init(&fc_pool->pool_lock);
+ INIT_LIST_HEAD(&fc_pool->fully_used);
+ INIT_LIST_HEAD(&fc_pool->partially_used);
+ INIT_LIST_HEAD(&fc_pool->unused);
+ fc_pool->available_fcs = 0;
+ fc_pool->used_fcs = 0;
+ fc_pool->threshold = 0;
+}
+
+static void mlx5_fc_pool_cleanup(struct mlx5_fc_pool *fc_pool)
+{
+ struct mlx5_core_dev *dev = fc_pool->dev;
+ struct mlx5_fc_bulk *bulk;
+ struct mlx5_fc_bulk *tmp;
+
+ list_for_each_entry_safe(bulk, tmp, &fc_pool->fully_used, pool_list)
+ mlx5_fc_bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &fc_pool->partially_used, pool_list)
+ mlx5_fc_bulk_destroy(dev, bulk);
+ list_for_each_entry_safe(bulk, tmp, &fc_pool->unused, pool_list)
+ mlx5_fc_bulk_destroy(dev, bulk);
+}
+
+static void mlx5_fc_pool_update_threshold(struct mlx5_fc_pool *fc_pool)
+{
+ fc_pool->threshold = min_t(int, MLX5_FC_POOL_MAX_THRESHOLD,
+ fc_pool->used_fcs / MLX5_FC_POOL_USED_BUFF_RATIO);
+}
+
+static struct mlx5_fc_bulk *
+mlx5_fc_pool_alloc_new_bulk(struct mlx5_fc_pool *fc_pool)
+{
+ struct mlx5_core_dev *dev = fc_pool->dev;
+ struct mlx5_fc_bulk *new_bulk;
+
+ new_bulk = mlx5_fc_bulk_create(dev);
+ if (!IS_ERR(new_bulk))
+ fc_pool->available_fcs += new_bulk->bulk_len;
+ mlx5_fc_pool_update_threshold(fc_pool);
+ return new_bulk;
+}
+
+static void
+mlx5_fc_pool_free_bulk(struct mlx5_fc_pool *fc_pool, struct mlx5_fc_bulk *bulk)
+{
+ struct mlx5_core_dev *dev = fc_pool->dev;
+
+ fc_pool->available_fcs -= bulk->bulk_len;
+ mlx5_fc_bulk_destroy(dev, bulk);
+ mlx5_fc_pool_update_threshold(fc_pool);
+}
+
+static struct mlx5_fc *
+mlx5_fc_pool_acquire_from_list(struct list_head *src_list,
+ struct list_head *next_list,
+ bool move_non_full_bulk)
+{
+ struct mlx5_fc_bulk *bulk;
+ struct mlx5_fc *fc;
+
+ if (list_empty(src_list))
+ return ERR_PTR(-ENODATA);
+
+ bulk = list_first_entry(src_list, struct mlx5_fc_bulk, pool_list);
+ fc = mlx5_fc_bulk_acquire_fc(bulk);
+ if (move_non_full_bulk || mlx5_fc_bulk_get_free_fcs_amount(bulk) == 0)
+ list_move(&bulk->pool_list, next_list);
+ return fc;
+}
+
+static struct mlx5_fc *
+mlx5_fc_pool_acquire_counter(struct mlx5_fc_pool *fc_pool)
+{
+ struct mlx5_fc_bulk *new_bulk;
+ struct mlx5_fc *fc;
+
+ mutex_lock(&fc_pool->pool_lock);
+
+ fc = mlx5_fc_pool_acquire_from_list(&fc_pool->partially_used,
+ &fc_pool->fully_used, false);
+ if (IS_ERR(fc))
+ fc = mlx5_fc_pool_acquire_from_list(&fc_pool->unused,
+ &fc_pool->partially_used,
+ true);
+ if (IS_ERR(fc)) {
+ new_bulk = mlx5_fc_pool_alloc_new_bulk(fc_pool);
+ if (IS_ERR(new_bulk)) {
+ fc = ERR_CAST(new_bulk);
+ goto out;
+ }
+ fc = mlx5_fc_bulk_acquire_fc(new_bulk);
+ list_add(&new_bulk->pool_list, &fc_pool->partially_used);
+ }
+ fc_pool->available_fcs--;
+ fc_pool->used_fcs++;
+
+out:
+ mutex_unlock(&fc_pool->pool_lock);
+ return fc;
+}
+
+static void
+mlx5_fc_pool_release_counter(struct mlx5_fc_pool *fc_pool, struct mlx5_fc *fc)
+{
+ struct mlx5_core_dev *dev = fc_pool->dev;
+ struct mlx5_fc_bulk *bulk = fc->bulk;
+ int bulk_free_fcs_amount;
+
+ mutex_lock(&fc_pool->pool_lock);
+
+ if (mlx5_fc_bulk_release_fc(bulk, fc)) {
+ mlx5_core_warn(dev, "Attempted to release a counter which is not acquired\n");
+ goto unlock;
+ }
+
+ fc_pool->available_fcs++;
+ fc_pool->used_fcs--;
+
+ bulk_free_fcs_amount = mlx5_fc_bulk_get_free_fcs_amount(bulk);
+ if (bulk_free_fcs_amount == 1)
+ list_move_tail(&bulk->pool_list, &fc_pool->partially_used);
+ if (bulk_free_fcs_amount == bulk->bulk_len) {
+ list_del(&bulk->pool_list);
+ if (fc_pool->available_fcs > fc_pool->threshold)
+ mlx5_fc_pool_free_bulk(fc_pool, bulk);
+ else
+ list_add(&bulk->pool_list, &fc_pool->unused);
+ }
+
+unlock:
+ mutex_unlock(&fc_pool->pool_lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 1a2560e3bf7c..3ed8ab2d703d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -279,7 +279,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
return err;
}
- err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0]);
+ err = mlx5i_create_tis(priv->mdev, ipriv->qp.qpn, &priv->tisn[0][0]);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
goto err_destroy_underlay_qp;
@@ -296,7 +296,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
{
struct mlx5i_priv *ipriv = priv->ppriv;
- mlx5e_destroy_tis(priv->mdev, priv->tisn[0]);
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]);
mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index c5a491e22e55..96e64187c089 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -210,7 +210,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
goto err_unint_underlay_qp;
}
- err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0]);
+ err = mlx5i_create_tis(mdev, ipriv->qp.qpn, &epriv->tisn[0][0]);
if (err) {
mlx5_core_warn(mdev, "create child tis failed, %d\n", err);
goto err_remove_rx_uderlay_qp;
@@ -228,7 +228,7 @@ static int mlx5i_pkey_open(struct net_device *netdev)
return 0;
err_clear_state_opened_flag:
- mlx5e_destroy_tis(mdev, epriv->tisn[0]);
+ mlx5e_destroy_tis(mdev, epriv->tisn[0][0]);
err_remove_rx_uderlay_qp:
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
err_unint_underlay_qp:
@@ -257,7 +257,7 @@ static int mlx5i_pkey_close(struct net_device *netdev)
mlx5i_uninit_underlay_qp(priv);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
- mlx5e_destroy_tis(mdev, priv->tisn[0]);
+ mlx5e_destroy_tis(mdev, priv->tisn[0][0]);
unlock:
mutex_unlock(&priv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index ea1d4d26ece0..3fc575d1c3ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -2,6 +2,7 @@
// Copyright (c) 2019 Mellanox Technologies.
#include "mlx5_core.h"
+#include "lib/mlx5.h"
int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
void *key, u32 sz_bytes,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 3dfab91ae5f2..4be4d2d36218 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -87,7 +87,7 @@ void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
/* This function should only be called after mlx5_cmd_force_teardown_hca */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c
new file mode 100644
index 000000000000..583dc7e2aca8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2018 Mellanox Technologies
+
+#include <linux/hyperv.h>
+#include "mlx5_core.h"
+#include "lib/hv.h"
+
+static int mlx5_hv_config_common(struct mlx5_core_dev *dev, void *buf, int len,
+ int offset, bool read)
+{
+ int rc = -EOPNOTSUPP;
+ int bytes_returned;
+ int block_id;
+
+ if (offset % HV_CONFIG_BLOCK_SIZE_MAX || len != HV_CONFIG_BLOCK_SIZE_MAX)
+ return -EINVAL;
+
+ block_id = offset / HV_CONFIG_BLOCK_SIZE_MAX;
+
+ rc = read ?
+ hyperv_read_cfg_blk(dev->pdev, buf,
+ HV_CONFIG_BLOCK_SIZE_MAX, block_id,
+ &bytes_returned) :
+ hyperv_write_cfg_blk(dev->pdev, buf,
+ HV_CONFIG_BLOCK_SIZE_MAX, block_id);
+
+ /* Make sure len bytes were read successfully */
+ if (read && !rc && len != bytes_returned)
+ rc = -EIO;
+
+ if (rc) {
+ mlx5_core_err(dev, "Failed to %s hv config, err = %d, len = %d, offset = %d\n",
+ read ? "read" : "write", rc, len,
+ offset);
+ return rc;
+ }
+
+ return 0;
+}
+
+int mlx5_hv_read_config(struct mlx5_core_dev *dev, void *buf, int len,
+ int offset)
+{
+ return mlx5_hv_config_common(dev, buf, len, offset, true);
+}
+
+int mlx5_hv_write_config(struct mlx5_core_dev *dev, void *buf, int len,
+ int offset)
+{
+ return mlx5_hv_config_common(dev, buf, len, offset, false);
+}
+
+int mlx5_hv_register_invalidate(struct mlx5_core_dev *dev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask))
+{
+ return hyperv_reg_block_invalidate(dev->pdev, context,
+ block_invalidate);
+}
+
+void mlx5_hv_unregister_invalidate(struct mlx5_core_dev *dev)
+{
+ hyperv_reg_block_invalidate(dev->pdev, NULL, NULL);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h
new file mode 100644
index 000000000000..f9a45573f459
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __LIB_HV_H__
+#define __LIB_HV_H__
+
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
+
+#include <linux/hyperv.h>
+#include <linux/mlx5/driver.h>
+
+int mlx5_hv_read_config(struct mlx5_core_dev *dev, void *buf, int len,
+ int offset);
+int mlx5_hv_write_config(struct mlx5_core_dev *dev, void *buf, int len,
+ int offset);
+int mlx5_hv_register_invalidate(struct mlx5_core_dev *dev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask));
+void mlx5_hv_unregister_invalidate(struct mlx5_core_dev *dev);
+#endif
+
+#endif /* __LIB_HV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
new file mode 100644
index 000000000000..4047629a876b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2018 Mellanox Technologies
+
+#include <linux/hyperv.h>
+#include "mlx5_core.h"
+#include "lib/hv.h"
+#include "lib/hv_vhca.h"
+
+struct mlx5_hv_vhca {
+ struct mlx5_core_dev *dev;
+ struct workqueue_struct *work_queue;
+ struct mlx5_hv_vhca_agent *agents[MLX5_HV_VHCA_AGENT_MAX];
+ struct mutex agents_lock; /* Protect agents array */
+};
+
+struct mlx5_hv_vhca_work {
+ struct work_struct invalidate_work;
+ struct mlx5_hv_vhca *hv_vhca;
+ u64 block_mask;
+};
+
+struct mlx5_hv_vhca_data_block {
+ u16 sequence;
+ u16 offset;
+ u8 reserved[4];
+ u64 data[15];
+};
+
+struct mlx5_hv_vhca_agent {
+ enum mlx5_hv_vhca_agent_type type;
+ struct mlx5_hv_vhca *hv_vhca;
+ void *priv;
+ u16 seq;
+ void (*control)(struct mlx5_hv_vhca_agent *agent,
+ struct mlx5_hv_vhca_control_block *block);
+ void (*invalidate)(struct mlx5_hv_vhca_agent *agent,
+ u64 block_mask);
+ void (*cleanup)(struct mlx5_hv_vhca_agent *agent);
+};
+
+struct mlx5_hv_vhca *mlx5_hv_vhca_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_hv_vhca *hv_vhca = NULL;
+
+ hv_vhca = kzalloc(sizeof(*hv_vhca), GFP_KERNEL);
+ if (!hv_vhca)
+ return ERR_PTR(-ENOMEM);
+
+ hv_vhca->work_queue = create_singlethread_workqueue("mlx5_hv_vhca");
+ if (!hv_vhca->work_queue) {
+ kfree(hv_vhca);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ hv_vhca->dev = dev;
+ mutex_init(&hv_vhca->agents_lock);
+
+ return hv_vhca;
+}
+
+void mlx5_hv_vhca_destroy(struct mlx5_hv_vhca *hv_vhca)
+{
+ if (IS_ERR_OR_NULL(hv_vhca))
+ return;
+
+ destroy_workqueue(hv_vhca->work_queue);
+ kfree(hv_vhca);
+}
+
+static void mlx5_hv_vhca_invalidate_work(struct work_struct *work)
+{
+ struct mlx5_hv_vhca_work *hwork;
+ struct mlx5_hv_vhca *hv_vhca;
+ int i;
+
+ hwork = container_of(work, struct mlx5_hv_vhca_work, invalidate_work);
+ hv_vhca = hwork->hv_vhca;
+
+ mutex_lock(&hv_vhca->agents_lock);
+ for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) {
+ struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i];
+
+ if (!agent || !agent->invalidate)
+ continue;
+
+ if (!(BIT(agent->type) & hwork->block_mask))
+ continue;
+
+ agent->invalidate(agent, hwork->block_mask);
+ }
+ mutex_unlock(&hv_vhca->agents_lock);
+
+ kfree(hwork);
+}
+
+void mlx5_hv_vhca_invalidate(void *context, u64 block_mask)
+{
+ struct mlx5_hv_vhca *hv_vhca = (struct mlx5_hv_vhca *)context;
+ struct mlx5_hv_vhca_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->invalidate_work, mlx5_hv_vhca_invalidate_work);
+ work->hv_vhca = hv_vhca;
+ work->block_mask = block_mask;
+
+ queue_work(hv_vhca->work_queue, &work->invalidate_work);
+}
+
+#define AGENT_MASK(type) (type ? BIT(type - 1) : 0 /* control */)
+
+static void mlx5_hv_vhca_agents_control(struct mlx5_hv_vhca *hv_vhca,
+ struct mlx5_hv_vhca_control_block *block)
+{
+ int i;
+
+ for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) {
+ struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i];
+
+ if (!agent || !agent->control)
+ continue;
+
+ if (!(AGENT_MASK(agent->type) & block->control))
+ continue;
+
+ agent->control(agent, block);
+ }
+}
+
+static void mlx5_hv_vhca_capabilities(struct mlx5_hv_vhca *hv_vhca,
+ u32 *capabilities)
+{
+ int i;
+
+ for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++) {
+ struct mlx5_hv_vhca_agent *agent = hv_vhca->agents[i];
+
+ if (agent)
+ *capabilities |= AGENT_MASK(agent->type);
+ }
+}
+
+static void
+mlx5_hv_vhca_control_agent_invalidate(struct mlx5_hv_vhca_agent *agent,
+ u64 block_mask)
+{
+ struct mlx5_hv_vhca *hv_vhca = agent->hv_vhca;
+ struct mlx5_core_dev *dev = hv_vhca->dev;
+ struct mlx5_hv_vhca_control_block *block;
+ u32 capabilities = 0;
+ int err;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return;
+
+ err = mlx5_hv_read_config(dev, block, sizeof(*block), 0);
+ if (err)
+ goto free_block;
+
+ mlx5_hv_vhca_capabilities(hv_vhca, &capabilities);
+
+ /* In case no capabilities, send empty block in return */
+ if (!capabilities) {
+ memset(block, 0, sizeof(*block));
+ goto write;
+ }
+
+ if (block->capabilities != capabilities)
+ block->capabilities = capabilities;
+
+ if (block->control & ~capabilities)
+ goto free_block;
+
+ mlx5_hv_vhca_agents_control(hv_vhca, block);
+ block->command_ack = block->command;
+
+write:
+ mlx5_hv_write_config(dev, block, sizeof(*block), 0);
+
+free_block:
+ kfree(block);
+}
+
+static struct mlx5_hv_vhca_agent *
+mlx5_hv_vhca_control_agent_create(struct mlx5_hv_vhca *hv_vhca)
+{
+ return mlx5_hv_vhca_agent_create(hv_vhca, MLX5_HV_VHCA_AGENT_CONTROL,
+ NULL,
+ mlx5_hv_vhca_control_agent_invalidate,
+ NULL, NULL);
+}
+
+static void mlx5_hv_vhca_control_agent_destroy(struct mlx5_hv_vhca_agent *agent)
+{
+ mlx5_hv_vhca_agent_destroy(agent);
+}
+
+int mlx5_hv_vhca_init(struct mlx5_hv_vhca *hv_vhca)
+{
+ struct mlx5_hv_vhca_agent *agent;
+ int err;
+
+ if (IS_ERR_OR_NULL(hv_vhca))
+ return IS_ERR_OR_NULL(hv_vhca);
+
+ err = mlx5_hv_register_invalidate(hv_vhca->dev, hv_vhca,
+ mlx5_hv_vhca_invalidate);
+ if (err)
+ return err;
+
+ agent = mlx5_hv_vhca_control_agent_create(hv_vhca);
+ if (IS_ERR_OR_NULL(agent)) {
+ mlx5_hv_unregister_invalidate(hv_vhca->dev);
+ return IS_ERR_OR_NULL(agent);
+ }
+
+ hv_vhca->agents[MLX5_HV_VHCA_AGENT_CONTROL] = agent;
+
+ return 0;
+}
+
+void mlx5_hv_vhca_cleanup(struct mlx5_hv_vhca *hv_vhca)
+{
+ struct mlx5_hv_vhca_agent *agent;
+ int i;
+
+ if (IS_ERR_OR_NULL(hv_vhca))
+ return;
+
+ agent = hv_vhca->agents[MLX5_HV_VHCA_AGENT_CONTROL];
+ if (agent)
+ mlx5_hv_vhca_control_agent_destroy(agent);
+
+ mutex_lock(&hv_vhca->agents_lock);
+ for (i = 0; i < MLX5_HV_VHCA_AGENT_MAX; i++)
+ WARN_ON(hv_vhca->agents[i]);
+
+ mutex_unlock(&hv_vhca->agents_lock);
+
+ mlx5_hv_unregister_invalidate(hv_vhca->dev);
+}
+
+static void mlx5_hv_vhca_agents_update(struct mlx5_hv_vhca *hv_vhca)
+{
+ mlx5_hv_vhca_invalidate(hv_vhca, BIT(MLX5_HV_VHCA_AGENT_CONTROL));
+}
+
+struct mlx5_hv_vhca_agent *
+mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
+ enum mlx5_hv_vhca_agent_type type,
+ void (*control)(struct mlx5_hv_vhca_agent*,
+ struct mlx5_hv_vhca_control_block *block),
+ void (*invalidate)(struct mlx5_hv_vhca_agent*,
+ u64 block_mask),
+ void (*cleaup)(struct mlx5_hv_vhca_agent *agent),
+ void *priv)
+{
+ struct mlx5_hv_vhca_agent *agent;
+
+ if (IS_ERR_OR_NULL(hv_vhca))
+ return ERR_PTR(-ENOMEM);
+
+ if (type >= MLX5_HV_VHCA_AGENT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&hv_vhca->agents_lock);
+ if (hv_vhca->agents[type]) {
+ mutex_unlock(&hv_vhca->agents_lock);
+ return ERR_PTR(-EINVAL);
+ }
+ mutex_unlock(&hv_vhca->agents_lock);
+
+ agent = kzalloc(sizeof(*agent), GFP_KERNEL);
+ if (!agent)
+ return ERR_PTR(-ENOMEM);
+
+ agent->type = type;
+ agent->hv_vhca = hv_vhca;
+ agent->priv = priv;
+ agent->control = control;
+ agent->invalidate = invalidate;
+ agent->cleanup = cleaup;
+
+ mutex_lock(&hv_vhca->agents_lock);
+ hv_vhca->agents[type] = agent;
+ mutex_unlock(&hv_vhca->agents_lock);
+
+ mlx5_hv_vhca_agents_update(hv_vhca);
+
+ return agent;
+}
+
+void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent)
+{
+ struct mlx5_hv_vhca *hv_vhca = agent->hv_vhca;
+
+ mutex_lock(&hv_vhca->agents_lock);
+
+ if (WARN_ON(agent != hv_vhca->agents[agent->type])) {
+ mutex_unlock(&hv_vhca->agents_lock);
+ return;
+ }
+
+ hv_vhca->agents[agent->type] = NULL;
+ mutex_unlock(&hv_vhca->agents_lock);
+
+ if (agent->cleanup)
+ agent->cleanup(agent);
+
+ kfree(agent);
+
+ mlx5_hv_vhca_agents_update(hv_vhca);
+}
+
+static int mlx5_hv_vhca_data_block_prepare(struct mlx5_hv_vhca_agent *agent,
+ struct mlx5_hv_vhca_data_block *data_block,
+ void *src, int len, int *offset)
+{
+ int bytes = min_t(int, (int)sizeof(data_block->data), len);
+
+ data_block->sequence = agent->seq;
+ data_block->offset = (*offset)++;
+ memcpy(data_block->data, src, bytes);
+
+ return bytes;
+}
+
+static void mlx5_hv_vhca_agent_seq_update(struct mlx5_hv_vhca_agent *agent)
+{
+ agent->seq++;
+}
+
+int mlx5_hv_vhca_agent_write(struct mlx5_hv_vhca_agent *agent,
+ void *buf, int len)
+{
+ int offset = agent->type * HV_CONFIG_BLOCK_SIZE_MAX;
+ int block_offset = 0;
+ int total = 0;
+ int err;
+
+ while (len) {
+ struct mlx5_hv_vhca_data_block data_block = {0};
+ int bytes;
+
+ bytes = mlx5_hv_vhca_data_block_prepare(agent, &data_block,
+ buf + total,
+ len, &block_offset);
+ if (!bytes)
+ return -ENOMEM;
+
+ err = mlx5_hv_write_config(agent->hv_vhca->dev, &data_block,
+ sizeof(data_block), offset);
+ if (err)
+ return err;
+
+ total += bytes;
+ len -= bytes;
+ }
+
+ mlx5_hv_vhca_agent_seq_update(agent);
+
+ return 0;
+}
+
+void *mlx5_hv_vhca_agent_priv(struct mlx5_hv_vhca_agent *agent)
+{
+ return agent->priv;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h
new file mode 100644
index 000000000000..4bad6a5fde56
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __LIB_HV_VHCA_H__
+#define __LIB_HV_VHCA_H__
+
+#include "en.h"
+#include "lib/hv.h"
+
+struct mlx5_hv_vhca_agent;
+struct mlx5_hv_vhca;
+struct mlx5_hv_vhca_control_block;
+
+enum mlx5_hv_vhca_agent_type {
+ MLX5_HV_VHCA_AGENT_CONTROL = 0,
+ MLX5_HV_VHCA_AGENT_STATS = 1,
+ MLX5_HV_VHCA_AGENT_MAX = 32,
+};
+
+#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
+
+struct mlx5_hv_vhca_control_block {
+ u32 capabilities;
+ u32 control;
+ u16 command;
+ u16 command_ack;
+ u16 version;
+ u16 rings;
+ u32 reserved1[28];
+};
+
+struct mlx5_hv_vhca *mlx5_hv_vhca_create(struct mlx5_core_dev *dev);
+void mlx5_hv_vhca_destroy(struct mlx5_hv_vhca *hv_vhca);
+int mlx5_hv_vhca_init(struct mlx5_hv_vhca *hv_vhca);
+void mlx5_hv_vhca_cleanup(struct mlx5_hv_vhca *hv_vhca);
+void mlx5_hv_vhca_invalidate(void *context, u64 block_mask);
+
+struct mlx5_hv_vhca_agent *
+mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
+ enum mlx5_hv_vhca_agent_type type,
+ void (*control)(struct mlx5_hv_vhca_agent*,
+ struct mlx5_hv_vhca_control_block *block),
+ void (*invalidate)(struct mlx5_hv_vhca_agent*,
+ u64 block_mask),
+ void (*cleanup)(struct mlx5_hv_vhca_agent *agent),
+ void *context);
+
+void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent);
+int mlx5_hv_vhca_agent_write(struct mlx5_hv_vhca_agent *agent,
+ void *buf, int len);
+void *mlx5_hv_vhca_agent_priv(struct mlx5_hv_vhca_agent *agent);
+
+#else
+
+static inline struct mlx5_hv_vhca *
+mlx5_hv_vhca_create(struct mlx5_core_dev *dev)
+{
+ return NULL;
+}
+
+static inline void mlx5_hv_vhca_destroy(struct mlx5_hv_vhca *hv_vhca)
+{
+}
+
+static inline int mlx5_hv_vhca_init(struct mlx5_hv_vhca *hv_vhca)
+{
+ return 0;
+}
+
+static inline void mlx5_hv_vhca_cleanup(struct mlx5_hv_vhca *hv_vhca)
+{
+}
+
+static inline void mlx5_hv_vhca_invalidate(void *context,
+ u64 block_mask)
+{
+}
+
+static inline struct mlx5_hv_vhca_agent *
+mlx5_hv_vhca_agent_create(struct mlx5_hv_vhca *hv_vhca,
+ enum mlx5_hv_vhca_agent_type type,
+ void (*control)(struct mlx5_hv_vhca_agent*,
+ struct mlx5_hv_vhca_control_block *block),
+ void (*invalidate)(struct mlx5_hv_vhca_agent*,
+ u64 block_mask),
+ void (*cleanup)(struct mlx5_hv_vhca_agent *agent),
+ void *context)
+{
+ return NULL;
+}
+
+static inline void mlx5_hv_vhca_agent_destroy(struct mlx5_hv_vhca_agent *agent)
+{
+}
+
+static inline int
+mlx5_hv_vhca_write_agent(struct mlx5_hv_vhca_agent *agent,
+ void *buf, int len)
+{
+ return 0;
+}
+#endif
+
+#endif /* __LIB_HV_VHCA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index b9d4f4e19ff9..148b55c3db7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/refcount.h>
#include <linux/mlx5/driver.h>
#include <net/vxlan.h>
#include "mlx5_core.h"
@@ -48,7 +49,7 @@ struct mlx5_vxlan {
struct mlx5_vxlan_port {
struct hlist_node hlist;
- atomic_t refcount;
+ refcount_t refcount;
u16 udp_port;
};
@@ -113,7 +114,7 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
vxlanp = mlx5_vxlan_lookup_port(vxlan, port);
if (vxlanp) {
- atomic_inc(&vxlanp->refcount);
+ refcount_inc(&vxlanp->refcount);
return 0;
}
@@ -137,7 +138,7 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
}
vxlanp->udp_port = port;
- atomic_set(&vxlanp->refcount, 1);
+ refcount_set(&vxlanp->refcount, 1);
spin_lock_bh(&vxlan->lock);
hash_add(vxlan->htable, &vxlanp->hlist, port);
@@ -170,7 +171,7 @@ int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
goto out_unlock;
}
- if (atomic_dec_and_test(&vxlanp->refcount)) {
+ if (refcount_dec_and_test(&vxlanp->refcount)) {
hash_del(&vxlanp->hlist);
remove = true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b15b27a497fc..dee1a8658c87 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -69,6 +69,7 @@
#include "lib/pci_vsc.h"
#include "diag/fw_tracer.h"
#include "ecpf.h"
+#include "lib/hv_vhca.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -495,6 +496,12 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
+ ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
if (do_set)
err = set_caps(dev, set_ctx, set_sz,
@@ -826,11 +833,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_eq_cleanup;
}
- err = mlx5_cq_debugfs_init(dev);
- if (err) {
- mlx5_core_err(dev, "failed to initialize cq debugfs\n");
- goto err_events_cleanup;
- }
+ mlx5_cq_debugfs_init(dev);
mlx5_init_qp_table(dev);
@@ -874,6 +877,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
}
dev->tracer = mlx5_fw_tracer_create(dev);
+ dev->hv_vhca = mlx5_hv_vhca_create(dev);
return 0;
@@ -891,7 +895,6 @@ err_tables_cleanup:
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
-err_events_cleanup:
mlx5_events_cleanup(dev);
err_eq_cleanup:
mlx5_eq_table_cleanup(dev);
@@ -905,6 +908,7 @@ err_devcom:
static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
{
+ mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
mlx5_fpga_cleanup(dev);
mlx5_eswitch_cleanup(dev->priv.eswitch);
@@ -1072,6 +1076,8 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_fw_tracer;
}
+ mlx5_hv_vhca_init(dev->hv_vhca);
+
err = mlx5_fpga_device_start(dev);
if (err) {
mlx5_core_err(dev, "fpga device start failed %d\n", err);
@@ -1127,6 +1133,7 @@ err_tls_start:
err_ipsec_start:
mlx5_fpga_device_stop(dev);
err_fpga_start:
+ mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_tracer_cleanup(dev->tracer);
err_fw_tracer:
mlx5_eq_table_destroy(dev);
@@ -1147,6 +1154,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
+ mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_eq_table_destroy(dev);
mlx5_irq_table_destroy(dev);
@@ -1217,8 +1225,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{
int err = 0;
- if (cleanup)
+ if (cleanup) {
+ mlx5_unregister_device(dev);
mlx5_drain_health_wq(dev);
+ }
mutex_lock(&dev->intf_state_mutex);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
@@ -1369,7 +1379,6 @@ static void remove_one(struct pci_dev *pdev)
mlx5_crdump_disable(dev);
mlx5_devlink_unregister(devlink);
- mlx5_unregister_device(dev);
if (mlx5_unload_one(dev, true)) {
mlx5_core_err(dev, "mlx5_unload_one failed\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 471bbc48bc1f..87b75b2207c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -146,7 +146,7 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
void mlx5_cmd_flush(struct mlx5_core_dev *dev);
-int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index b8ba74de9555..c3aea4cc2fff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -53,7 +53,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
common = radix_tree_lookup(&table->tree, rsn);
if (common)
- atomic_inc(&common->refcount);
+ refcount_inc(&common->refcount);
spin_unlock_irqrestore(&table->lock, flags);
@@ -62,7 +62,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
{
- if (atomic_dec_and_test(&common->refcount))
+ if (refcount_dec_and_test(&common->refcount))
complete(&common->free);
}
@@ -162,7 +162,7 @@ static int rsc_event_notifier(struct notifier_block *nb,
common = mlx5_get_rsc(table, rsn);
if (!common) {
- mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", rsn);
+ mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn);
return NOTIFY_OK;
}
@@ -209,7 +209,7 @@ static int create_resource_common(struct mlx5_core_dev *dev,
if (err)
return err;
- atomic_set(&qp->common.refcount, 1);
+ refcount_set(&qp->common.refcount, 1);
init_completion(&qp->common.free);
qp->pid = current->pid;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 17ce9dd56b13..18af6981e0be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -51,7 +51,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
return -ENOMEM;
}
- ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX);
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL);
if (!ns) {
mlx5_core_err(dev, "Failed to get RDMA RX namespace");
err = -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index c912d82ca64b..30f7848a6f88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -122,12 +122,13 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
u8 *min_inline_mode)
{
switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
+ case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+ if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode))
+ break;
+ /* fall through */
case MLX5_CAP_INLINE_MODE_L2:
*min_inline_mode = MLX5_INLINE_MODE_L2;
break;
- case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
- mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
- break;
case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
*min_inline_mode = MLX5_INLINE_MODE_NONE;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 953cc8efba69..dd2315ce4441 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -44,6 +44,11 @@ u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
return wq->fbc.sz_m1 + 1;
}
+u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq)
+{
+ return wq->fbc.log_stride;
+}
+
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
{
return (u32)wq->fbc.sz_m1 + 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index f1ec58c9e9e3..55791f71a778 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -89,6 +89,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
+u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq);
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 06c80343d9ed..f458fd1ce9f8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -71,7 +71,7 @@ config MLXSW_SWITCHX2
module will be called mlxsw_switchx2.
config MLXSW_SPECTRUM
- tristate "Mellanox Technologies Spectrum support"
+ tristate "Mellanox Technologies Spectrum family support"
depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q
depends on PSAMPLE || PSAMPLE=n
depends on BRIDGE || BRIDGE=n
@@ -87,8 +87,8 @@ config MLXSW_SPECTRUM
select NET_PTP_CLASSIFY if PTP_1588_CLOCK
default m
---help---
- This driver supports Mellanox Technologies Spectrum Ethernet
- Switch ASICs.
+ This driver supports Mellanox Technologies
+ Spectrum/Spectrum-2/Spectrum-3 Ethernet Switch ASICs.
To compile this driver as a module, choose M here: the
module will be called mlxsw_spectrum.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 171b36bd8a4e..0e86a581d45b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -29,7 +29,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_mr_tcam.o spectrum_mr.o \
spectrum_qdisc.o spectrum_span.o \
spectrum_nve.o spectrum_nve_vxlan.o \
- spectrum_dpipe.o
+ spectrum_dpipe.o spectrum_trap.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 17ceac7505e5..963a2b4b61b1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1017,6 +1017,54 @@ static int mlxsw_devlink_flash_update(struct devlink *devlink,
component, extack);
}
+static int mlxsw_devlink_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_init)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx);
+}
+
+static void mlxsw_devlink_trap_fini(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_fini)
+ return;
+ mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx);
+}
+
+static int mlxsw_devlink_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_action_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_action_set(mlxsw_core, trap, action);
+}
+
+static int
+mlxsw_devlink_trap_group_init(struct devlink *devlink,
+ const struct devlink_trap_group *group)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->trap_group_init)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->trap_group_init(mlxsw_core, group);
+}
+
static const struct devlink_ops mlxsw_devlink_ops = {
.reload = mlxsw_devlink_core_bus_device_reload,
.port_type_set = mlxsw_devlink_port_type_set,
@@ -1034,6 +1082,10 @@ static const struct devlink_ops mlxsw_devlink_ops = {
.sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
.info_get = mlxsw_devlink_info_get,
.flash_update = mlxsw_devlink_flash_update,
+ .trap_init = mlxsw_devlink_trap_init,
+ .trap_fini = mlxsw_devlink_trap_fini,
+ .trap_action_set = mlxsw_devlink_trap_action_set,
+ .trap_group_init = mlxsw_devlink_trap_group_init,
};
static int
@@ -1477,6 +1529,18 @@ void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_trap_unregister);
+int mlxsw_core_trap_action_set(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ enum mlxsw_reg_hpkt_action action)
+{
+ char hpkt_pl[MLXSW_REG_HPKT_LEN];
+
+ mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
+ listener->trap_group, listener->is_ctrl);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+}
+EXPORT_SYMBOL(mlxsw_core_trap_action_set);
+
static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
{
return atomic64_inc_return(&mlxsw_core->emad.tid);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 8efcff4b59cb..b65a17d49e43 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -128,6 +128,9 @@ int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
const struct mlxsw_listener *listener,
void *priv);
+int mlxsw_core_trap_action_set(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_listener *listener,
+ enum mlxsw_reg_hpkt_action action);
typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload,
size_t payload_len, unsigned long cb_priv);
@@ -289,6 +292,15 @@ struct mlxsw_driver {
int (*flash_update)(struct mlxsw_core *mlxsw_core,
const char *file_name, const char *component,
struct netlink_ext_ack *extack);
+ int (*trap_init)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+ void (*trap_fini)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+ int (*trap_action_set)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action);
+ int (*trap_group_init)(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
int (*resources_register)(struct mlxsw_core *mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
index 946339e13eb9..5b1323645a5d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -9,6 +9,7 @@
#define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84
#define PCI_DEVICE_ID_MELLANOX_SPECTRUM2 0xcf6c
+#define PCI_DEVICE_ID_MELLANOX_SPECTRUM3 0xcf70
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB 0xcb20
#define PCI_DEVICE_ID_MELLANOX_SWITCHIB2 0xcf08
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index ead36702549a..5494cf93f34c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4126,7 +4126,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2 BIT(5)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 BIT(6)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 BIT(7)
-#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4 BIT(8)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR BIT(12)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR BIT(13)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR BIT(14)
@@ -5422,6 +5421,14 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0,
MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1,
+
+ __MLXSW_REG_HTGT_TRAP_GROUP_MAX,
+ MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
+};
+
+enum mlxsw_reg_htgt_discard_trap_group {
+ MLXSW_REG_HTGT_DISCARD_TRAP_GROUP_BASE = MLXSW_REG_HTGT_TRAP_GROUP_MAX,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS,
};
/* reg_htgt_trap_group
@@ -5559,6 +5566,8 @@ enum mlxsw_reg_hpkt_action {
MLXSW_REG_HPKT_ACTION_DISCARD,
MLXSW_REG_HPKT_ACTION_SOFT_DISCARD,
MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD,
+ MLXSW_REG_HPKT_ACTION_TRAP_EXCEPTION_TO_CPU,
+ MLXSW_REG_HPKT_ACTION_SET_FW_DEFAULT = 15,
};
/* reg_hpkt_action
@@ -5569,6 +5578,8 @@ enum mlxsw_reg_hpkt_action {
* 3 - Discard.
* 4 - Soft discard (allow other traps to act on the packet).
* 5 - Trap and soft discard (allow other traps to overwrite this trap).
+ * 6 - Trap to CPU (CPU receives sole copy) and count it as error.
+ * 15 - Restore the firmware's default action.
* Access: RW
*
* Note: Must be set to 0 (forward) for event trap IDs, as they are already
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index eda9c23e87b2..91e4792bb7e7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -48,7 +48,7 @@
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 2000
-#define MLXSW_SP1_FWREV_SUBMINOR 1122
+#define MLXSW_SP1_FWREV_SUBMINOR 1886
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -65,6 +65,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
+static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
static const char mlxsw_sp_driver_version[] = "1.0";
static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
@@ -174,6 +175,10 @@ struct mlxsw_sp_ptp_ops {
void (*shaper_work)(struct work_struct *work);
int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
struct ethtool_ts_info *info);
+ int (*get_stats_count)(void);
+ void (*get_stats_strings)(u8 **p);
+ void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+ u64 *data, int data_index);
};
static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
@@ -1625,7 +1630,7 @@ mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
}
flow_block_cb_incref(block_cb);
err = mlxsw_sp_acl_block_bind(mlxsw_sp, acl_block,
- mlxsw_sp_port, ingress);
+ mlxsw_sp_port, ingress, f->extack);
if (err)
goto err_block_bind;
@@ -2328,6 +2333,7 @@ static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
static void mlxsw_sp_port_get_strings(struct net_device *dev,
u32 stringset, u8 *data)
{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
u8 *p = data;
int i;
@@ -2369,6 +2375,7 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
for (i = 0; i < TC_MAX_QUEUE; i++)
mlxsw_sp_port_get_tc_strings(&p, i);
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p);
break;
}
}
@@ -2463,6 +2470,7 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
static void mlxsw_sp_port_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int i, data_index = 0;
/* IEEE 802.3 Counters */
@@ -2503,13 +2511,21 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
data, data_index);
data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
}
+
+ /* PTP counters */
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port,
+ data, data_index);
+ data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count();
}
static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
switch (sset) {
case ETH_SS_STATS:
- return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
+ return MLXSW_SP_PORT_ETHTOOL_STATS_LEN +
+ mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count();
default:
return -EOPNOTSUPP;
}
@@ -2608,26 +2624,6 @@ static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
.speed = SPEED_50000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
- .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
- .speed = SPEED_56000,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
- .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
- .speed = SPEED_56000,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
- .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
- .speed = SPEED_56000,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
- .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
- .speed = SPEED_56000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
.mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
.speed = SPEED_100000,
@@ -2674,7 +2670,7 @@ mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
- unsigned long *mode)
+ u8 width, unsigned long *mode)
{
int i;
@@ -2715,7 +2711,7 @@ mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
}
static u32
-mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
+mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd)
{
u32 ptys_proto = 0;
@@ -2729,7 +2725,8 @@ mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
return ptys_proto;
}
-static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed)
+static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width,
+ u32 speed)
{
u32 ptys_proto = 0;
int i;
@@ -2917,11 +2914,31 @@ mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4)
+#define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0)
+#define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1)
+#define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2)
+
+static u8 mlxsw_sp_port_mask_width_get(u8 width)
+{
+ switch (width) {
+ case 1:
+ return MLXSW_SP_PORT_MASK_WIDTH_1X;
+ case 2:
+ return MLXSW_SP_PORT_MASK_WIDTH_2X;
+ case 4:
+ return MLXSW_SP_PORT_MASK_WIDTH_4X;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
struct mlxsw_sp2_port_link_mode {
const enum ethtool_link_mode_bit_indices *mask_ethtool;
int m_ethtool_len;
u32 mask;
u32 speed;
+ u8 mask_width;
};
static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
@@ -2929,72 +2946,97 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M,
.mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_100,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII,
.mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_1000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII,
.mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_2500,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R,
.mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_5000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G,
.mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_10000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G,
.mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_40000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR,
.mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_25000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2,
.mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X |
+ MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_50000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR,
.mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X,
.speed = SPEED_50000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_100000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2,
.mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X,
.speed = SPEED_100000,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
+ .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
.speed = SPEED_200000,
},
};
@@ -3022,12 +3064,14 @@ mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
static void
mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
- unsigned long *mode)
+ u8 width, unsigned long *mode)
{
+ u8 mask_width = mlxsw_sp_port_mask_width_get(width);
int i;
for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
- if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask)
+ if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) &&
+ (mask_width & mlxsw_sp2_port_link_mode[i].mask_width))
mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
mode);
}
@@ -3078,27 +3122,32 @@ mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
}
static u32
-mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
+mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd)
{
+ u8 mask_width = mlxsw_sp_port_mask_width_get(width);
u32 ptys_proto = 0;
int i;
for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
- if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
+ if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) &&
+ mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
cmd->link_modes.advertising))
ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
}
return ptys_proto;
}
-static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed)
+static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp,
+ u8 width, u32 speed)
{
+ u8 mask_width = mlxsw_sp_port_mask_width_get(width);
u32 ptys_proto = 0;
int i;
for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
- if (speed == mlxsw_sp2_port_link_mode[i].speed)
+ if ((speed == mlxsw_sp2_port_link_mode[i].speed) &&
+ (mask_width & mlxsw_sp2_port_link_mode[i].mask_width))
ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
}
return ptys_proto;
@@ -3182,7 +3231,7 @@ mlxsw_sp2_port_type_speed_ops = {
static void
mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap,
- struct ethtool_link_ksettings *cmd)
+ u8 width, struct ethtool_link_ksettings *cmd)
{
const struct mlxsw_sp_port_type_speed_ops *ops;
@@ -3193,12 +3242,13 @@ mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap,
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd);
- ops->from_ptys_link(mlxsw_sp, eth_proto_cap, cmd->link_modes.supported);
+ ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width,
+ cmd->link_modes.supported);
}
static void
mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp,
- u32 eth_proto_admin, bool autoneg,
+ u32 eth_proto_admin, bool autoneg, u8 width,
struct ethtool_link_ksettings *cmd)
{
const struct mlxsw_sp_port_type_speed_ops *ops;
@@ -3209,7 +3259,7 @@ mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp,
return;
ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
- ops->from_ptys_link(mlxsw_sp, eth_proto_admin,
+ ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width,
cmd->link_modes.advertising);
}
@@ -3264,10 +3314,11 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
&eth_proto_admin, &eth_proto_oper);
- mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd);
+ mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap,
+ mlxsw_sp_port->mapping.width, cmd);
mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg,
- cmd);
+ mlxsw_sp_port->mapping.width, cmd);
cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl);
@@ -3300,13 +3351,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap, NULL, NULL);
autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
- if (!autoneg && cmd->base.speed == SPEED_56000) {
- netdev_err(dev, "56G not supported with autoneg off\n");
- return -EINVAL;
- }
eth_proto_new = autoneg ?
- ops->to_ptys_advert_link(mlxsw_sp, cmd) :
- ops->to_ptys_speed(mlxsw_sp, cmd->base.speed);
+ ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width,
+ cmd) :
+ ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width,
+ cmd->base.speed);
eth_proto_new = eth_proto_new & eth_proto_cap;
if (!eth_proto_new) {
@@ -4609,6 +4658,9 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
.hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
.shaper_work = mlxsw_sp1_ptp_shaper_work,
.get_ts_info = mlxsw_sp1_ptp_get_ts_info,
+ .get_stats_count = mlxsw_sp1_get_stats_count,
+ .get_stats_strings = mlxsw_sp1_get_stats_strings,
+ .get_stats = mlxsw_sp1_get_stats,
};
static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
@@ -4622,6 +4674,9 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
.hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
.shaper_work = mlxsw_sp2_ptp_shaper_work,
.get_ts_info = mlxsw_sp2_ptp_get_ts_info,
+ .get_stats_count = mlxsw_sp2_get_stats_count,
+ .get_stats_strings = mlxsw_sp2_get_stats_strings,
+ .get_stats = mlxsw_sp2_get_stats,
};
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
@@ -4664,6 +4719,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_traps_init;
}
+ err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
+ goto err_devlink_traps_init;
+ }
+
err = mlxsw_sp_buffers_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
@@ -4797,6 +4858,8 @@ err_span_init:
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
+ mlxsw_sp_devlink_traps_fini(mlxsw_sp);
+err_devlink_traps_init:
mlxsw_sp_traps_fini(mlxsw_sp);
err_traps_init:
mlxsw_sp_fids_fini(mlxsw_sp);
@@ -4869,6 +4932,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
+ mlxsw_sp_devlink_traps_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_fids_fini(mlxsw_sp);
mlxsw_sp_kvdl_fini(mlxsw_sp);
@@ -5026,6 +5090,26 @@ static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
return 0;
}
+static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params kvd_size_params;
+ u32 kvd_size;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
+ return -EIO;
+
+ kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
+ devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
+ MLXSW_SP_KVD_GRANULARITY,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
+ kvd_size, MLXSW_SP_RESOURCE_KVD,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &kvd_size_params);
+}
+
static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
{
return mlxsw_sp1_resources_kvd_register(mlxsw_core);
@@ -5033,7 +5117,7 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
{
- return 0;
+ return mlxsw_sp2_resources_kvd_register(mlxsw_core);
}
static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -5230,6 +5314,10 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.flash_update = mlxsw_sp_flash_update,
+ .trap_init = mlxsw_sp_trap_init,
+ .trap_fini = mlxsw_sp_trap_fini,
+ .trap_action_set = mlxsw_sp_trap_action_set,
+ .trap_group_init = mlxsw_sp_trap_group_init,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
@@ -5260,6 +5348,43 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.flash_update = mlxsw_sp_flash_update,
+ .trap_init = mlxsw_sp_trap_init,
+ .trap_fini = mlxsw_sp_trap_fini,
+ .trap_action_set = mlxsw_sp_trap_action_set,
+ .trap_group_init = mlxsw_sp_trap_group_init,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .resources_register = mlxsw_sp2_resources_register,
+ .params_register = mlxsw_sp2_params_register,
+ .params_unregister = mlxsw_sp2_params_unregister,
+ .ptp_transmitted = mlxsw_sp_ptp_transmitted,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp2_config_profile,
+ .res_query_enabled = true,
+};
+
+static struct mlxsw_driver mlxsw_sp3_driver = {
+ .kind = mlxsw_sp3_driver_name,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp2_init,
+ .fini = mlxsw_sp_fini,
+ .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .flash_update = mlxsw_sp_flash_update,
+ .trap_init = mlxsw_sp_trap_init,
+ .trap_fini = mlxsw_sp_trap_fini,
+ .trap_action_set = mlxsw_sp_trap_action_set,
+ .trap_group_init = mlxsw_sp_trap_group_init,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
.params_register = mlxsw_sp2_params_register,
@@ -6304,6 +6429,16 @@ static struct pci_driver mlxsw_sp2_pci_driver = {
.id_table = mlxsw_sp2_pci_id_table,
};
+static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
+ {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
+ {0, },
+};
+
+static struct pci_driver mlxsw_sp3_pci_driver = {
+ .name = mlxsw_sp3_driver_name,
+ .id_table = mlxsw_sp3_pci_id_table,
+};
+
static int __init mlxsw_sp_module_init(void)
{
int err;
@@ -6319,6 +6454,10 @@ static int __init mlxsw_sp_module_init(void)
if (err)
goto err_sp2_core_driver_register;
+ err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
+ if (err)
+ goto err_sp3_core_driver_register;
+
err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
if (err)
goto err_sp1_pci_driver_register;
@@ -6327,11 +6466,19 @@ static int __init mlxsw_sp_module_init(void)
if (err)
goto err_sp2_pci_driver_register;
+ err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
+ if (err)
+ goto err_sp3_pci_driver_register;
+
return 0;
+err_sp3_pci_driver_register:
+ mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
err_sp2_pci_driver_register:
mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
err_sp1_pci_driver_register:
+ mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
+err_sp3_core_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
err_sp2_core_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
@@ -6343,8 +6490,10 @@ err_sp1_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
+ mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
+ mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
@@ -6359,4 +6508,5 @@ MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Spectrum driver");
MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
+MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 6664119fb0c8..b2a0028b1694 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -225,6 +225,16 @@ struct mlxsw_sp_port_xstats {
u64 tx_packets[IEEE_8021QAZ_MAX_TCS];
};
+struct mlxsw_sp_ptp_port_dir_stats {
+ u64 packets;
+ u64 timestamps;
+};
+
+struct mlxsw_sp_ptp_port_stats {
+ struct mlxsw_sp_ptp_port_dir_stats rx_gcd;
+ struct mlxsw_sp_ptp_port_dir_stats tx_gcd;
+};
+
struct mlxsw_sp_port {
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
@@ -271,6 +281,7 @@ struct mlxsw_sp_port {
struct hwtstamp_config hwtstamp_config;
u16 ing_types;
u16 egr_types;
+ struct mlxsw_sp_ptp_port_stats stats;
} ptp;
};
@@ -279,14 +290,14 @@ struct mlxsw_sp_port_type_speed_ops {
u32 ptys_eth_proto,
struct ethtool_link_ksettings *cmd);
void (*from_ptys_link)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
- unsigned long *mode);
+ u8 width, unsigned long *mode);
u32 (*from_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto);
void (*from_ptys_speed_duplex)(struct mlxsw_sp *mlxsw_sp,
bool carrier_ok, u32 ptys_eth_proto,
struct ethtool_link_ksettings *cmd);
- u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp,
+ u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd);
- u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u32 speed);
+ u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u8 width, u32 speed);
u32 (*to_ptys_upper_speed)(struct mlxsw_sp *mlxsw_sp, u32 upper_speed);
int (*port_speed_base)(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u32 *base_speed);
@@ -623,7 +634,8 @@ struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
struct mlxsw_afa_block *act_block;
- u8 action_created:1;
+ u8 action_created:1,
+ egress_bind_blocker:1;
unsigned int counter_index;
};
@@ -642,6 +654,7 @@ struct mlxsw_sp_acl_block {
struct mlxsw_sp *mlxsw_sp;
unsigned int rule_count;
unsigned int disable_count;
+ unsigned int egress_blocker_rule_count;
struct net *net;
};
@@ -657,7 +670,8 @@ void mlxsw_sp_acl_block_destroy(struct mlxsw_sp_acl_block *block);
int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress);
+ bool ingress,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_acl_block_unbind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
@@ -955,4 +969,17 @@ void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp);
+/* spectrum_trap.c */
+int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx);
+int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action);
+int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 84a87d059333..150b3a144b83 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -239,7 +239,8 @@ mlxsw_sp_acl_block_lookup(struct mlxsw_sp_acl_block *block,
int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_block *block,
struct mlxsw_sp_port *mlxsw_sp_port,
- bool ingress)
+ bool ingress,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_acl_block_binding *binding;
int err;
@@ -247,6 +248,11 @@ int mlxsw_sp_acl_block_bind(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(mlxsw_sp_acl_block_lookup(block, mlxsw_sp_port, ingress)))
return -EEXIST;
+ if (!ingress && block->egress_blocker_rule_count) {
+ NL_SET_ERR_MSG_MOD(extack, "Block cannot be bound to egress because it contains unsupported rules");
+ return -EOPNOTSUPP;
+ }
+
binding = kzalloc(sizeof(*binding), GFP_KERNEL);
if (!binding)
return -ENOMEM;
@@ -672,6 +678,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
int err;
err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
@@ -689,14 +696,14 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
* one, to be directly bound to device. The rest of the
* rulesets are bound by "Goto action set".
*/
- err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset,
- ruleset->ht_key.block);
+ err = mlxsw_sp_acl_ruleset_block_bind(mlxsw_sp, ruleset, block);
if (err)
goto err_ruleset_block_bind;
}
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
- ruleset->ht_key.block->rule_count++;
+ block->rule_count++;
+ block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker;
return 0;
err_ruleset_block_bind:
@@ -712,7 +719,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_block *block = ruleset->ht_key.block;
+ block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker;
ruleset->ht_key.block->rule_count--;
list_del(&rule->list);
if (!ruleset->ht_key.chain_index &&
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 202e9a246019..0ad1a24abfc6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -78,6 +78,16 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid;
u16 fid_index;
+ if (mlxsw_sp_acl_block_is_egress_bound(block)) {
+ NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
+ return -EOPNOTSUPP;
+ }
+
+ /* Forbid block with this rulei to be bound
+ * to egress in future.
+ */
+ rulei->egress_bind_blocker = 1;
+
fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
fid_index = mlxsw_sp_fid_index(fid);
err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
@@ -257,6 +267,12 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
flow_rule_match_tcp(rule, &match);
+ if (match.mask->flags & htons(0x0E00)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits");
+ dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n");
+ return -EINVAL;
+ }
+
mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
ntohs(match.key->flags),
ntohs(match.mask->flags));
@@ -390,6 +406,12 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
return -EOPNOTSUPP;
}
+
+ /* Forbid block with this rulei to be bound
+ * to egress in future.
+ */
+ rulei->egress_bind_blocker = 1;
+
if (match.mask->vlan_id != 0)
mlxsw_sp_acl_rulei_keymask_u32(rulei,
MLXSW_AFK_ELEMENT_VID,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 38bb1cfe4e8c..ec2ff3d7f41c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -630,6 +630,8 @@ static void
mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
struct mlxsw_sp1_ptp_unmatched *unmatched)
{
+ struct mlxsw_sp_ptp_port_dir_stats *stats;
+ struct mlxsw_sp_port *mlxsw_sp_port;
int err;
/* If an unmatched entry has an SKB, it has to be handed over to the
@@ -650,6 +652,17 @@ mlxsw_sp1_ptp_ht_gc_collect(struct mlxsw_sp_ptp_state *ptp_state,
/* The packet was matched with timestamp during the walk. */
goto out;
+ mlxsw_sp_port = ptp_state->mlxsw_sp->ports[unmatched->key.local_port];
+ if (mlxsw_sp_port) {
+ stats = unmatched->key.ingress ?
+ &mlxsw_sp_port->ptp.stats.rx_gcd :
+ &mlxsw_sp_port->ptp.stats.tx_gcd;
+ if (unmatched->skb)
+ stats->packets++;
+ else
+ stats->timestamps++;
+ }
+
/* mlxsw_sp1_ptp_unmatched_finish() invokes netif_receive_skb(). While
* the comment at that function states that it can only be called in
* soft IRQ context, this pattern of local_bh_disable() +
@@ -1098,3 +1111,57 @@ int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+
+struct mlxsw_sp_ptp_port_stat {
+ char str[ETH_GSTRING_LEN];
+ ptrdiff_t offset;
+};
+
+#define MLXSW_SP_PTP_PORT_STAT(NAME, FIELD) \
+ { \
+ .str = NAME, \
+ .offset = offsetof(struct mlxsw_sp_ptp_port_stats, \
+ FIELD), \
+ }
+
+static const struct mlxsw_sp_ptp_port_stat mlxsw_sp_ptp_port_stats[] = {
+ MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_packets", rx_gcd.packets),
+ MLXSW_SP_PTP_PORT_STAT("ptp_rx_gcd_timestamps", rx_gcd.timestamps),
+ MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_packets", tx_gcd.packets),
+ MLXSW_SP_PTP_PORT_STAT("ptp_tx_gcd_timestamps", tx_gcd.timestamps),
+};
+
+#undef MLXSW_SP_PTP_PORT_STAT
+
+#define MLXSW_SP_PTP_PORT_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_ptp_port_stats)
+
+int mlxsw_sp1_get_stats_count(void)
+{
+ return MLXSW_SP_PTP_PORT_STATS_LEN;
+}
+
+void mlxsw_sp1_get_stats_strings(u8 **p)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) {
+ memcpy(*p, mlxsw_sp_ptp_port_stats[i].str,
+ ETH_GSTRING_LEN);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
+void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ u64 *data, int data_index)
+{
+ void *stats = &mlxsw_sp_port->ptp.stats;
+ ptrdiff_t offset;
+ int i;
+
+ data += data_index;
+ for (i = 0; i < MLXSW_SP_PTP_PORT_STATS_LEN; i++) {
+ offset = mlxsw_sp_ptp_port_stats[i].offset;
+ *data++ = *(u64 *)(stats + offset);
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 72e55f6926b9..8c386571afce 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -59,6 +59,11 @@ void mlxsw_sp1_ptp_shaper_work(struct work_struct *work);
int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
struct ethtool_ts_info *info);
+int mlxsw_sp1_get_stats_count(void);
+void mlxsw_sp1_get_stats_strings(u8 **p);
+void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ u64 *data, int data_index);
+
#else
static inline struct mlxsw_sp_ptp_clock *
@@ -125,6 +130,19 @@ static inline int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
+static inline int mlxsw_sp1_get_stats_count(void)
+{
+ return 0;
+}
+
+static inline void mlxsw_sp1_get_stats_strings(u8 **p)
+{
+}
+
+static inline void mlxsw_sp1_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ u64 *data, int data_index)
+{
+}
#endif
static inline struct mlxsw_sp_ptp_clock *
@@ -183,4 +201,18 @@ static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
+static inline int mlxsw_sp2_get_stats_count(void)
+{
+ return 0;
+}
+
+static inline void mlxsw_sp2_get_stats_strings(u8 **p)
+{
+}
+
+static inline void mlxsw_sp2_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ u64 *data, int data_index)
+{
+}
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index e618be7ce6c6..a330b369e899 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2943,7 +2943,7 @@ static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
val = nh_grp->count;
for (i = 0; i < nh_grp->count; i++) {
nh = &nh_grp->nexthops[i];
- val ^= nh->ifindex;
+ val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
}
return jhash(&val, sizeof(val), seed);
default:
@@ -2961,7 +2961,7 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
dev = mlxsw_sp_rt6->rt->fib6_nh->fib_nh_dev;
- val ^= dev->ifindex;
+ val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
}
return jhash(&val, sizeof(val), seed);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
new file mode 100644
index 000000000000..899450b28621
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#include <linux/kernel.h>
+#include <net/devlink.h>
+#include <uapi/linux/devlink.h>
+
+#include "core.h"
+#include "reg.h"
+#include "spectrum.h"
+
+#define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
+
+static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
+ void *priv);
+
+#define MLXSW_SP_TRAP_DROP(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ MLXSW_SP_TRAP_METADATA)
+
+#define MLXSW_SP_RXL_DISCARD(_id, _group_id) \
+ MLXSW_RXL(mlxsw_sp_rx_drop_listener, DISCARD_##_id, SET_FW_DEFAULT, \
+ false, SP_##_group_id, DISCARD)
+
+static struct devlink_trap mlxsw_sp_traps_arr[] = {
+ MLXSW_SP_TRAP_DROP(SMAC_MC, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS),
+ MLXSW_SP_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
+};
+
+static struct mlxsw_listener mlxsw_sp_listeners_arr[] = {
+ MLXSW_SP_RXL_DISCARD(ING_PACKET_SMAC_MC, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_VTAG_ALLOW, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_VLAN, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(ING_SWITCH_STP, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_UC, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_MC_NULL, L2_DISCARDS),
+ MLXSW_SP_RXL_DISCARD(LOOKUP_SWITCH_LB, L2_DISCARDS),
+};
+
+/* Mapping between hardware trap and devlink trap. Multiple hardware traps can
+ * be mapped to the same devlink trap. Order is according to
+ * 'mlxsw_sp_listeners_arr'.
+ */
+static u16 mlxsw_sp_listener_devlink_map[] = {
+ DEVLINK_TRAP_GENERIC_ID_SMAC_MC,
+ DEVLINK_TRAP_GENERIC_ID_VLAN_TAG_MISMATCH,
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_STP_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
+ DEVLINK_TRAP_GENERIC_ID_EMPTY_TX_LIST,
+ DEVLINK_TRAP_GENERIC_ID_PORT_LOOPBACK_FILTER,
+};
+
+static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+ u8 local_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
+
+ if (unlikely(!mlxsw_sp_port)) {
+ dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
+ local_port);
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ skb->dev = mlxsw_sp_port->dev;
+
+ pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ return 0;
+}
+
+static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
+ void *trap_ctx)
+{
+ struct devlink_port *in_devlink_port;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ struct devlink *devlink;
+
+ mlxsw_sp = devlink_trap_ctx_priv(trap_ctx);
+ mlxsw_sp_port = mlxsw_sp->ports[local_port];
+
+ if (mlxsw_sp_rx_listener(mlxsw_sp, skb, local_port, mlxsw_sp_port))
+ return;
+
+ devlink = priv_to_devlink(mlxsw_sp->core);
+ in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ local_port);
+ devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
+ consume_skb(skb);
+}
+
+int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ if (WARN_ON(ARRAY_SIZE(mlxsw_sp_listener_devlink_map) !=
+ ARRAY_SIZE(mlxsw_sp_listeners_arr)))
+ return -EINVAL;
+
+ return devlink_traps_register(devlink, mlxsw_sp_traps_arr,
+ ARRAY_SIZE(mlxsw_sp_traps_arr),
+ mlxsw_sp);
+}
+
+void mlxsw_sp_devlink_traps_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ devlink_traps_unregister(devlink, mlxsw_sp_traps_arr,
+ ARRAY_SIZE(mlxsw_sp_traps_arr));
+}
+
+int mlxsw_sp_trap_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
+ struct mlxsw_listener *listener;
+ int err;
+
+ if (mlxsw_sp_listener_devlink_map[i] != trap->id)
+ continue;
+ listener = &mlxsw_sp_listeners_arr[i];
+
+ err = mlxsw_core_trap_register(mlxsw_core, listener, trap_ctx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap, void *trap_ctx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
+ struct mlxsw_listener *listener;
+
+ if (mlxsw_sp_listener_devlink_map[i] != trap->id)
+ continue;
+ listener = &mlxsw_sp_listeners_arr[i];
+
+ mlxsw_core_trap_unregister(mlxsw_core, listener, trap_ctx);
+ }
+}
+
+int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener_devlink_map); i++) {
+ enum mlxsw_reg_hpkt_action hw_action;
+ struct mlxsw_listener *listener;
+ int err;
+
+ if (mlxsw_sp_listener_devlink_map[i] != trap->id)
+ continue;
+ listener = &mlxsw_sp_listeners_arr[i];
+
+ switch (action) {
+ case DEVLINK_TRAP_ACTION_DROP:
+ hw_action = MLXSW_REG_HPKT_ACTION_SET_FW_DEFAULT;
+ break;
+ case DEVLINK_TRAP_ACTION_TRAP:
+ hw_action = MLXSW_REG_HPKT_ACTION_TRAP_EXCEPTION_TO_CPU;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = mlxsw_core_trap_action_set(mlxsw_core, listener,
+ hw_action);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+#define MLXSW_SP_DISCARD_POLICER_ID (MLXSW_REG_HTGT_TRAP_GROUP_MAX + 1)
+
+static int
+mlxsw_sp_trap_group_policer_init(struct mlxsw_sp *mlxsw_sp,
+ const struct devlink_trap_group *group)
+{
+ enum mlxsw_reg_qpcr_ir_units ir_units;
+ char qpcr_pl[MLXSW_REG_QPCR_LEN];
+ u16 policer_id;
+ u8 burst_size;
+ bool is_bytes;
+ u32 rate;
+
+ switch (group->id) {
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
+ policer_id = MLXSW_SP_DISCARD_POLICER_ID;
+ ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
+ is_bytes = false;
+ rate = 10 * 1024; /* 10Kpps */
+ burst_size = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mlxsw_reg_qpcr_pack(qpcr_pl, policer_id, ir_units, is_bytes, rate,
+ burst_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
+}
+
+static int
+__mlxsw_sp_trap_group_init(struct mlxsw_sp *mlxsw_sp,
+ const struct devlink_trap_group *group)
+{
+ char htgt_pl[MLXSW_REG_HTGT_LEN];
+ u8 priority, tc, group_id;
+ u16 policer_id;
+
+ switch (group->id) {
+ case DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS:
+ group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_L2_DISCARDS;
+ policer_id = MLXSW_SP_DISCARD_POLICER_ID;
+ priority = 0;
+ tc = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mlxsw_reg_htgt_pack(htgt_pl, group_id, policer_id, priority, tc);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
+}
+
+int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core,
+ const struct devlink_trap_group *group)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ int err;
+
+ err = mlxsw_sp_trap_group_policer_init(mlxsw_sp, group);
+ if (err)
+ return err;
+
+ err = __mlxsw_sp_trap_group_init(mlxsw_sp, group);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index bdab96f5bc70..1c14c051ee52 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -637,12 +637,6 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
.speed = 50000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
- .supported = SUPPORTED_56000baseKR4_Full,
- .advertised = ADVERTISED_56000baseKR4_Full,
- .speed = 56000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 19202bdb5105..7618f084cae9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -66,6 +66,13 @@ enum {
MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
+ MLXSW_TRAP_ID_DISCARD_ING_PACKET_SMAC_MC = 0x140,
+ MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VTAG_ALLOW = 0x148,
+ MLXSW_TRAP_ID_DISCARD_ING_SWITCH_VLAN = 0x149,
+ MLXSW_TRAP_ID_DISCARD_ING_SWITCH_STP = 0x14A,
+ MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_UC = 0x150,
+ MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_MC_NULL = 0x151,
+ MLXSW_TRAP_ID_DISCARD_LOOKUP_SWITCH_LB = 0x152,
MLXSW_TRAP_ID_ACL0 = 0x1C0,
/* Multicast trap used for routes with trap action */
MLXSW_TRAP_ID_ACL1 = 0x1C1,
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index ccd06702cc56..da329ca115cc 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -580,9 +580,7 @@ out:
dma_unmap_single(adapter->dev, sg_dma_address(sg),
DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
sg_dma_address(sg) = 0;
- if (ctl->skb)
- dev_kfree_skb(ctl->skb);
-
+ dev_kfree_skb(ctl->skb);
ctl->skb = NULL;
printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err);
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index e52b015e31a9..a41a90c589db 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1225,7 +1225,6 @@ MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids);
static int ks8851_probe(struct platform_device *pdev)
{
int err;
- struct resource *io_d, *io_c;
struct net_device *netdev;
struct ks_net *ks;
u16 id, data;
@@ -1240,15 +1239,13 @@ static int ks8851_probe(struct platform_device *pdev)
ks = netdev_priv(netdev);
ks->netdev = netdev;
- io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ks->hw_addr = devm_ioremap_resource(&pdev->dev, io_d);
+ ks->hw_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ks->hw_addr)) {
err = PTR_ERR(ks->hw_addr);
goto err_free;
}
- io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ks->hw_addr_cmd = devm_ioremap_resource(&pdev->dev, io_c);
+ ks->hw_addr_cmd = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ks->hw_addr_cmd)) {
err = PTR_ERR(ks->hw_addr_cmd);
goto err_free;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 13e6bf13ac4d..15a8be6bad27 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1434,7 +1434,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
}
static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
- const struct skb_frag_struct *fragment,
+ const skb_frag_t *fragment,
unsigned int frame_length)
{
/* called only from within lan743x_tx_xmit_frame
@@ -1607,9 +1607,8 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx,
goto finish;
for (j = 0; j < nr_frags; j++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag = &(skb_shinfo(skb)->frags[j]);
- frag = &(skb_shinfo(skb)->frags[j]);
if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) {
/* upon error no need to call
* lan743x_tx_frame_end
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index b2109eca81fd..57b26c2acf87 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -963,8 +963,7 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter)
index++) {
struct sk_buff *skb = ptp->tx_ts_skb_queue[index];
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
ptp->tx_ts_skb_queue[index] = NULL;
ptp->tx_ts_seconds_queue[index] = 0;
ptp->tx_ts_nseconds_queue[index] = 0;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 6932e615d4b0..4d1bce4389c7 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/skbuff.h>
#include <linux/iopoll.h>
#include <net/arp.h>
@@ -538,7 +539,7 @@ static int ocelot_port_stop(struct net_device *dev)
*/
static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
{
- ifh[0] = IFH_INJ_BYPASS;
+ ifh[0] = IFH_INJ_BYPASS | ((0x1ff & info->rew_op) << 21);
ifh[1] = (0xf00 & info->port) >> 8;
ifh[2] = (0xff & info->port) << 24;
ifh[3] = (info->tag_type << 16) | info->vid;
@@ -548,6 +549,7 @@ static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
struct ocelot_port *port = netdev_priv(dev);
struct ocelot *ocelot = port->ocelot;
u32 val, ifh[IFH_LEN];
@@ -566,6 +568,14 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
info.port = BIT(port->chip_port);
info.tag_type = IFH_TAG_TYPE_C;
info.vid = skb_vlan_tag_get(skb);
+
+ /* Check if timestamping is needed */
+ if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) {
+ info.rew_op = port->ptp_cmd;
+ if (port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
+ info.rew_op |= (port->ts_id % 4) << 3;
+ }
+
ocelot_gen_ifh(ifh, &info);
for (i = 0; i < IFH_LEN; i++)
@@ -596,11 +606,58 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev_kfree_skb_any(skb);
+ if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP &&
+ port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ struct ocelot_skb *oskb =
+ kzalloc(sizeof(struct ocelot_skb), GFP_ATOMIC);
+
+ if (unlikely(!oskb))
+ goto out;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ oskb->skb = skb;
+ oskb->id = port->ts_id % 4;
+ port->ts_id++;
+
+ list_add_tail(&oskb->head, &port->skbs);
+
+ return NETDEV_TX_OK;
+ }
+
+out:
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
+void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ /* Read current PTP time to get seconds */
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+ ts->tv_sec = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+
+ /* Read packet HW timestamp from FIFO */
+ val = ocelot_read(ocelot, SYS_PTP_TXSTAMP);
+ ts->tv_nsec = SYS_PTP_TXSTAMP_PTP_TXSTAMP(val);
+
+ /* Sec has incremented since the ts was registered */
+ if ((ts->tv_sec & 0x1) != !!(val & SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC))
+ ts->tv_sec--;
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+}
+EXPORT_SYMBOL(ocelot_get_hwtimestamp);
+
static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
{
struct ocelot_port *port = netdev_priv(dev);
@@ -917,6 +974,97 @@ static int ocelot_get_port_parent_id(struct net_device *dev,
return 0;
}
+static int ocelot_hwstamp_get(struct ocelot_port *port, struct ifreq *ifr)
+{
+ struct ocelot *ocelot = port->ocelot;
+
+ return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
+ sizeof(ocelot->hwtstamp_config)) ? -EFAULT : 0;
+}
+
+static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr)
+{
+ struct ocelot *ocelot = port->ocelot;
+ struct hwtstamp_config cfg;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (cfg.flags)
+ return -EINVAL;
+
+ /* Tx type sanity check */
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_ON:
+ port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP;
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we
+ * need to update the origin time.
+ */
+ port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP;
+ break;
+ case HWTSTAMP_TX_OFF:
+ port->ptp_cmd = 0;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ mutex_lock(&ocelot->ptp_lock);
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ mutex_unlock(&ocelot->ptp_lock);
+ return -ERANGE;
+ }
+
+ /* Commit back the result & save it */
+ memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
+ mutex_unlock(&ocelot->ptp_lock);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct ocelot_port *port = netdev_priv(dev);
+ struct ocelot *ocelot = port->ocelot;
+
+ /* The function is only used for PTP operations for now */
+ if (!ocelot->ptp)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return ocelot_hwstamp_set(port, ifr);
+ case SIOCGHWTSTAMP:
+ return ocelot_hwstamp_get(port, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_open = ocelot_port_open,
.ndo_stop = ocelot_port_stop,
@@ -933,6 +1081,7 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_set_features = ocelot_set_features,
.ndo_get_port_parent_id = ocelot_get_port_parent_id,
.ndo_setup_tc = ocelot_setup_tc,
+ .ndo_do_ioctl = ocelot_ioctl,
};
static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -1014,12 +1163,37 @@ static int ocelot_get_sset_count(struct net_device *dev, int sset)
return ocelot->num_stats;
}
+static int ocelot_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct ocelot_port *ocelot_port = netdev_priv(dev);
+ struct ocelot *ocelot = ocelot_port->ocelot;
+
+ if (!ocelot->ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
+ info->phc_index = ocelot->ptp_clock ?
+ ptp_clock_index(ocelot->ptp_clock) : -1;
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
+ BIT(HWTSTAMP_TX_ONESTEP_SYNC);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static const struct ethtool_ops ocelot_ethtool_ops = {
.get_strings = ocelot_get_strings,
.get_ethtool_stats = ocelot_get_ethtool_stats,
.get_sset_count = ocelot_get_sset_count,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_ts_info = ocelot_get_ts_info,
};
static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port,
@@ -1629,6 +1803,196 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
};
EXPORT_SYMBOL(ocelot_switchdev_blocking_nb);
+int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ time64_t s;
+ u32 val;
+ s64 ns;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_SAVE);
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ s = ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN) & 0xffff;
+ s <<= 32;
+ s += ocelot_read_rix(ocelot, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+ ns = ocelot_read_rix(ocelot, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+
+ /* Deal with negative values */
+ if (ns >= 0x3ffffff0 && ns <= 0x3fffffff) {
+ s--;
+ ns &= 0xf;
+ ns += 999999984;
+ }
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_ptp_gettime64);
+
+static int ocelot_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ ocelot_write_rix(ocelot, lower_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_LSB,
+ TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, upper_32_bits(ts->tv_sec), PTP_PIN_TOD_SEC_MSB,
+ TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, ts->tv_nsec, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_LOAD);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+}
+
+static int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ if (delta > -(NSEC_PER_SEC / 2) && delta < (NSEC_PER_SEC / 2)) {
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_IDLE);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_LSB, TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, 0, PTP_PIN_TOD_SEC_MSB, TOD_ACC_PIN);
+ ocelot_write_rix(ocelot, delta, PTP_PIN_TOD_NSEC, TOD_ACC_PIN);
+
+ val = ocelot_read_rix(ocelot, PTP_PIN_CFG, TOD_ACC_PIN);
+ val &= ~(PTP_PIN_CFG_SYNC | PTP_PIN_CFG_ACTION_MASK | PTP_PIN_CFG_DOM);
+ val |= PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_DELTA);
+
+ ocelot_write_rix(ocelot, val, PTP_PIN_CFG, TOD_ACC_PIN);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ } else {
+ /* Fall back using ocelot_ptp_settime64 which is not exact. */
+ struct timespec64 ts;
+ u64 now;
+
+ ocelot_ptp_gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ ocelot_ptp_settime64(ptp, &ts);
+ }
+ return 0;
+}
+
+static int ocelot_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
+ u32 unit = 0, direction = 0;
+ unsigned long flags;
+ u64 adj = 0;
+
+ spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
+
+ if (!scaled_ppm)
+ goto disable_adj;
+
+ if (scaled_ppm < 0) {
+ direction = PTP_CFG_CLK_ADJ_CFG_DIR;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ adj = PSEC_PER_SEC << 16;
+ do_div(adj, scaled_ppm);
+ do_div(adj, 1000);
+
+ /* If the adjustment value is too large, use ns instead */
+ if (adj >= (1L << 30)) {
+ unit = PTP_CFG_CLK_ADJ_FREQ_NS;
+ do_div(adj, 1000);
+ }
+
+ /* Still too big */
+ if (adj >= (1L << 30))
+ goto disable_adj;
+
+ ocelot_write(ocelot, unit | adj, PTP_CLK_CFG_ADJ_FREQ);
+ ocelot_write(ocelot, PTP_CFG_CLK_ADJ_CFG_ENA | direction,
+ PTP_CLK_CFG_ADJ_CFG);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+
+disable_adj:
+ ocelot_write(ocelot, 0, PTP_CLK_CFG_ADJ_CFG);
+
+ spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
+ return 0;
+}
+
+static struct ptp_clock_info ocelot_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "ocelot ptp",
+ .max_adj = 0x7fffffff,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .gettime64 = ocelot_ptp_gettime64,
+ .settime64 = ocelot_ptp_settime64,
+ .adjtime = ocelot_ptp_adjtime,
+ .adjfine = ocelot_ptp_adjfine,
+};
+
+static int ocelot_init_timestamp(struct ocelot *ocelot)
+{
+ ocelot->ptp_info = ocelot_ptp_clock_info;
+ ocelot->ptp_clock = ptp_clock_register(&ocelot->ptp_info, ocelot->dev);
+ if (IS_ERR(ocelot->ptp_clock))
+ return PTR_ERR(ocelot->ptp_clock);
+ /* Check if PHC support is missing at the configuration level */
+ if (!ocelot->ptp_clock)
+ return 0;
+
+ ocelot_write(ocelot, SYS_PTP_CFG_PTP_STAMP_WID(30), SYS_PTP_CFG);
+ ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_LOW);
+ ocelot_write(ocelot, 0xffffffff, ANA_TABLES_PTP_ID_HIGH);
+
+ ocelot_write(ocelot, PTP_CFG_MISC_PTP_EN, PTP_CFG_MISC);
+
+ /* There is no device reconfiguration, PTP Rx stamping is always
+ * enabled.
+ */
+ ocelot->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+
+ return 0;
+}
+
int ocelot_probe_port(struct ocelot *ocelot, u8 port,
void __iomem *regs,
struct phy_device *phy)
@@ -1661,6 +2025,8 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
ENTRYTYPE_LOCKED);
+ INIT_LIST_HEAD(&ocelot_port->skbs);
+
err = register_netdev(dev);
if (err) {
dev_err(ocelot->dev, "register_netdev failed\n");
@@ -1684,7 +2050,7 @@ EXPORT_SYMBOL(ocelot_probe_port);
int ocelot_init(struct ocelot *ocelot)
{
u32 port;
- int i, cpu = ocelot->num_phys_ports;
+ int i, ret, cpu = ocelot->num_phys_ports;
char queue_name[32];
ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
@@ -1699,6 +2065,8 @@ int ocelot_init(struct ocelot *ocelot)
return -ENOMEM;
mutex_init(&ocelot->stats_lock);
+ mutex_init(&ocelot->ptp_lock);
+ spin_lock_init(&ocelot->ptp_clock_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
dev_name(ocelot->dev));
ocelot->stats_queue = create_singlethread_workqueue(queue_name);
@@ -1812,16 +2180,43 @@ int ocelot_init(struct ocelot *ocelot)
INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
+
+ if (ocelot->ptp) {
+ ret = ocelot_init_timestamp(ocelot);
+ if (ret) {
+ dev_err(ocelot->dev,
+ "Timestamp initialization failed\n");
+ return ret;
+ }
+ }
+
return 0;
}
EXPORT_SYMBOL(ocelot_init);
void ocelot_deinit(struct ocelot *ocelot)
{
+ struct list_head *pos, *tmp;
+ struct ocelot_port *port;
+ struct ocelot_skb *entry;
+ int i;
+
cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
ocelot_ace_deinit();
+
+ for (i = 0; i < ocelot->num_phys_ports; i++) {
+ port = ocelot->ports[i];
+
+ list_for_each_safe(pos, tmp, &port->skbs) {
+ entry = list_entry(pos, struct ocelot_skb, head);
+
+ list_del(pos);
+ dev_kfree_skb_any(entry->skb);
+ kfree(entry);
+ }
+ }
}
EXPORT_SYMBOL(ocelot_deinit);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index f7eeb4806897..e40773c01a44 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -11,9 +11,11 @@
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
+#include <linux/net_tstamp.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/regmap.h>
#include "ocelot_ana.h"
@@ -23,6 +25,7 @@
#include "ocelot_sys.h"
#include "ocelot_qs.h"
#include "ocelot_tc.h"
+#include "ocelot_ptp.h"
#define PGID_AGGR 64
#define PGID_SRC 80
@@ -38,14 +41,17 @@
#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
+#define OCELOT_PTP_QUEUE_SZ 128
+
#define IFH_LEN 4
struct frame_info {
u32 len;
u16 port;
u16 vid;
- u8 cpuq;
u8 tag_type;
+ u16 rew_op;
+ u32 timestamp; /* rew_val */
};
#define IFH_INJ_BYPASS BIT(31)
@@ -54,6 +60,12 @@ struct frame_info {
#define IFH_TAG_TYPE_C 0
#define IFH_TAG_TYPE_S 1
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_DSCP 0x1
+#define IFH_REW_OP_ONE_STEP_PTP 0x2
+#define IFH_REW_OP_TWO_STEP_PTP 0x3
+#define IFH_REW_OP_ORIGIN_PTP 0x5
+
#define OCELOT_SPEED_2500 0
#define OCELOT_SPEED_1000 1
#define OCELOT_SPEED_100 2
@@ -71,6 +83,7 @@ enum ocelot_target {
SYS,
S2,
HSIO,
+ PTP,
TARGET_MAX,
};
@@ -343,6 +356,13 @@ enum ocelot_reg {
S2_CACHE_ACTION_DAT,
S2_CACHE_CNT_DAT,
S2_CACHE_TG_DAT,
+ PTP_PIN_CFG = PTP << TARGET_OFFSET,
+ PTP_PIN_TOD_SEC_MSB,
+ PTP_PIN_TOD_SEC_LSB,
+ PTP_PIN_TOD_NSEC,
+ PTP_CFG_MISC,
+ PTP_CLK_CFG_ADJ_CFG,
+ PTP_CLK_CFG_ADJ_FREQ,
};
enum ocelot_regfield {
@@ -393,6 +413,13 @@ enum ocelot_regfield {
REGFIELD_MAX
};
+enum ocelot_clk_pins {
+ ALT_PPS_PIN = 1,
+ EXT_CLK_PIN,
+ ALT_LDST_PIN,
+ TOD_ACC_PIN
+};
+
struct ocelot_multicast {
struct list_head list;
unsigned char addr[ETH_ALEN];
@@ -442,6 +469,13 @@ struct ocelot {
u64 *stats;
struct delayed_work stats_work;
struct workqueue_struct *stats_queue;
+
+ u8 ptp:1;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_info;
+ struct hwtstamp_config hwtstamp_config;
+ struct mutex ptp_lock; /* Protects the PTP interface state */
+ spinlock_t ptp_clock_lock; /* Protects the PTP clock */
};
struct ocelot_port {
@@ -465,6 +499,16 @@ struct ocelot_port {
struct phy *serdes;
struct ocelot_port_tc tc;
+
+ u8 ptp_cmd;
+ struct list_head skbs;
+ u8 ts_id;
+};
+
+struct ocelot_skb {
+ struct list_head head;
+ struct sk_buff *skb;
+ u8 id;
};
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
@@ -509,4 +553,7 @@ extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
+int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
+void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts);
+
#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 2451d4a96490..b063eb78fa0c 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -16,24 +16,27 @@
#include "ocelot.h"
-static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info)
+#define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0))
+
+static int ocelot_parse_ifh(u32 *_ifh, struct frame_info *info)
{
- int i;
u8 llen, wlen;
+ u64 ifh[2];
+
+ ifh[0] = be64_to_cpu(((__force __be64 *)_ifh)[0]);
+ ifh[1] = be64_to_cpu(((__force __be64 *)_ifh)[1]);
- /* The IFH is in network order, switch to CPU order */
- for (i = 0; i < IFH_LEN; i++)
- ifh[i] = ntohl((__force __be32)ifh[i]);
+ wlen = IFH_EXTRACT_BITFIELD64(ifh[0], 7, 8);
+ llen = IFH_EXTRACT_BITFIELD64(ifh[0], 15, 6);
- wlen = (ifh[1] >> 7) & 0xff;
- llen = (ifh[1] >> 15) & 0x3f;
info->len = OCELOT_BUFFER_CELL_SZ * wlen + llen - 80;
- info->port = (ifh[2] & GENMASK(14, 11)) >> 11;
+ info->timestamp = IFH_EXTRACT_BITFIELD64(ifh[0], 21, 32);
+
+ info->port = IFH_EXTRACT_BITFIELD64(ifh[1], 43, 4);
- info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20;
- info->tag_type = (ifh[3] & BIT(16)) >> 16;
- info->vid = ifh[3] & GENMASK(11, 0);
+ info->tag_type = IFH_EXTRACT_BITFIELD64(ifh[1], 16, 1);
+ info->vid = IFH_EXTRACT_BITFIELD64(ifh[1], 0, 12);
return 0;
}
@@ -91,13 +94,14 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
return IRQ_NONE;
do {
- struct sk_buff *skb;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ u64 tod_in_ns, full_ts_in_ns;
+ struct frame_info info = {};
struct net_device *dev;
- u32 *buf;
+ u32 ifh[4], val, *buf;
+ struct timespec64 ts;
int sz, len, buf_len;
- u32 ifh[4];
- u32 val;
- struct frame_info info;
+ struct sk_buff *skb;
for (i = 0; i < IFH_LEN; i++) {
err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]);
@@ -144,6 +148,22 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
break;
}
+ if (ocelot->ptp) {
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+
+ tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+ if ((tod_in_ns & 0xffffffff) < info.timestamp)
+ full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
+ info.timestamp;
+ else
+ full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
+ info.timestamp;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+ }
+
/* Everything we see on an interface that is in the HW bridge
* has already been forwarded.
*/
@@ -163,6 +183,66 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
+static irqreturn_t ocelot_ptp_rdy_irq_handler(int irq, void *arg)
+{
+ int budget = OCELOT_PTP_QUEUE_SZ;
+ struct ocelot *ocelot = arg;
+
+ while (budget--) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct list_head *pos, *tmp;
+ struct sk_buff *skb = NULL;
+ struct ocelot_skb *entry;
+ struct ocelot_port *port;
+ struct timespec64 ts;
+ u32 val, id, txport;
+
+ val = ocelot_read(ocelot, SYS_PTP_STATUS);
+
+ /* Check if a timestamp can be retrieved */
+ if (!(val & SYS_PTP_STATUS_PTP_MESS_VLD))
+ break;
+
+ WARN_ON(val & SYS_PTP_STATUS_PTP_OVFL);
+
+ /* Retrieve the ts ID and Tx port */
+ id = SYS_PTP_STATUS_PTP_MESS_ID_X(val);
+ txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val);
+
+ /* Retrieve its associated skb */
+ port = ocelot->ports[txport];
+
+ list_for_each_safe(pos, tmp, &port->skbs) {
+ entry = list_entry(pos, struct ocelot_skb, head);
+ if (entry->id != id)
+ continue;
+
+ skb = entry->skb;
+
+ list_del(pos);
+ kfree(entry);
+ }
+
+ /* Next ts */
+ ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+
+ if (unlikely(!skb))
+ continue;
+
+ /* Get the h/w timestamp */
+ ocelot_get_hwtimestamp(ocelot, &ts);
+
+ /* Set the timestamp into the skb */
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb, &shhwtstamps);
+
+ dev_kfree_skb_any(skb);
+ }
+
+ return IRQ_HANDLED;
+}
+
static const struct of_device_id mscc_ocelot_match[] = {
{ .compatible = "mscc,vsc7514-switch" },
{ }
@@ -171,17 +251,18 @@ MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
static int mscc_ocelot_probe(struct platform_device *pdev)
{
- int err, irq;
- unsigned int i;
struct device_node *np = pdev->dev.of_node;
struct device_node *ports, *portnp;
+ int err, irq_xtr, irq_ptp_rdy;
struct ocelot *ocelot;
struct regmap *hsio;
+ unsigned int i;
u32 val;
struct {
enum ocelot_target id;
char *name;
+ u8 optional:1;
} res[] = {
{ SYS, "sys" },
{ REW, "rew" },
@@ -189,6 +270,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
{ ANA, "ana" },
{ QS, "qs" },
{ S2, "s2" },
+ { PTP, "ptp", 1 },
};
if (!np && !pdev->dev.platform_data)
@@ -205,8 +287,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
struct regmap *target;
target = ocelot_io_platform_init(ocelot, pdev, res[i].name);
- if (IS_ERR(target))
+ if (IS_ERR(target)) {
+ if (res[i].optional) {
+ ocelot->targets[res[i].id] = NULL;
+ continue;
+ }
+
return PTR_ERR(target);
+ }
ocelot->targets[res[i].id] = target;
}
@@ -223,16 +311,29 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (err)
return err;
- irq = platform_get_irq_byname(pdev, "xtr");
- if (irq < 0)
+ irq_xtr = platform_get_irq_byname(pdev, "xtr");
+ if (irq_xtr < 0)
return -ENODEV;
- err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ err = devm_request_threaded_irq(&pdev->dev, irq_xtr, NULL,
ocelot_xtr_irq_handler, IRQF_ONESHOT,
"frame extraction", ocelot);
if (err)
return err;
+ irq_ptp_rdy = platform_get_irq_byname(pdev, "ptp_rdy");
+ if (irq_ptp_rdy > 0 && ocelot->targets[PTP]) {
+ err = devm_request_threaded_irq(&pdev->dev, irq_ptp_rdy, NULL,
+ ocelot_ptp_rdy_irq_handler,
+ IRQF_ONESHOT, "ptp ready",
+ ocelot);
+ if (err)
+ return err;
+
+ /* Both the PTP interrupt and the PTP bank are available */
+ ocelot->ptp = 1;
+ }
+
regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.h b/drivers/net/ethernet/mscc/ocelot_ptp.h
new file mode 100644
index 000000000000..9ede14a12573
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * License: Dual MIT/GPL
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_PTP_H_
+#define _MSCC_OCELOT_PTP_H_
+
+#define PTP_PIN_CFG_RSZ 0x20
+#define PTP_PIN_TOD_SEC_MSB_RSZ PTP_PIN_CFG_RSZ
+#define PTP_PIN_TOD_SEC_LSB_RSZ PTP_PIN_CFG_RSZ
+#define PTP_PIN_TOD_NSEC_RSZ PTP_PIN_CFG_RSZ
+
+#define PTP_PIN_CFG_DOM BIT(0)
+#define PTP_PIN_CFG_SYNC BIT(2)
+#define PTP_PIN_CFG_ACTION(x) ((x) << 3)
+#define PTP_PIN_CFG_ACTION_MASK PTP_PIN_CFG_ACTION(0x7)
+
+enum {
+ PTP_PIN_ACTION_IDLE = 0,
+ PTP_PIN_ACTION_LOAD,
+ PTP_PIN_ACTION_SAVE,
+ PTP_PIN_ACTION_CLOCK,
+ PTP_PIN_ACTION_DELTA,
+ PTP_PIN_ACTION_NOSYNC,
+ PTP_PIN_ACTION_SYNC,
+};
+
+#define PTP_CFG_MISC_PTP_EN BIT(2)
+
+#define PSEC_PER_SEC 1000000000000LL
+
+#define PTP_CFG_CLK_ADJ_CFG_ENA BIT(0)
+#define PTP_CFG_CLK_ADJ_CFG_DIR BIT(1)
+
+#define PTP_CFG_CLK_ADJ_FREQ_NS BIT(30)
+
+#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
index 6c387f994ec5..e59977d20400 100644
--- a/drivers/net/ethernet/mscc/ocelot_regs.c
+++ b/drivers/net/ethernet/mscc/ocelot_regs.c
@@ -234,6 +234,16 @@ static const u32 ocelot_s2_regmap[] = {
REG(S2_CACHE_TG_DAT, 0x000388),
};
+static const u32 ocelot_ptp_regmap[] = {
+ REG(PTP_PIN_CFG, 0x000000),
+ REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
+ REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
+ REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_CFG_MISC, 0x0000a0),
+ REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
+ REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
+};
+
static const u32 *ocelot_regmap[] = {
[ANA] = ocelot_ana_regmap,
[QS] = ocelot_qs_regmap,
@@ -241,6 +251,7 @@ static const u32 *ocelot_regmap[] = {
[REW] = ocelot_rew_regmap,
[SYS] = ocelot_sys_regmap,
[S2] = ocelot_s2_regmap,
+ [PTP] = ocelot_ptp_regmap,
};
static const struct reg_field ocelot_regfields[] = {
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 337b0cbfd153..c979f38a2e0c 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1286,7 +1286,7 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
{
u8 *va;
struct vlan_ethhdr *veh;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
__wsum vsum;
va = addr;
@@ -1306,8 +1306,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
skb->len -= VLAN_HLEN;
skb->data_len -= VLAN_HLEN;
frag = skb_shinfo(skb)->frags;
- frag->page_offset += VLAN_HLEN;
- skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN);
+ skb_frag_off_add(frag, VLAN_HLEN);
+ skb_frag_size_sub(frag, VLAN_HLEN);
}
}
@@ -1318,7 +1318,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
{
struct myri10ge_priv *mgp = ss->mgp;
struct sk_buff *skb;
- struct skb_frag_struct *rx_frags;
+ skb_frag_t *rx_frags;
struct myri10ge_rx_buf *rx;
int i, idx, remainder, bytes;
struct pci_dev *pdev = mgp->pdev;
@@ -1351,7 +1351,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
return 0;
}
rx_frags = skb_shinfo(skb)->frags;
- /* Fill skb_frag_struct(s) with data from our receive */
+ /* Fill skb_frag_t(s) with data from our receive */
for (i = 0, remainder = len; remainder > 0; i++) {
myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
skb_fill_page_desc(skb, i, rx->info[idx].page,
@@ -1364,8 +1364,8 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
}
/* remove padding */
- rx_frags[0].page_offset += MXGEFW_PAD;
- rx_frags[0].size -= MXGEFW_PAD;
+ skb_frag_off_add(&rx_frags[0], MXGEFW_PAD);
+ skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD);
len -= MXGEFW_PAD;
skb->len = len;
@@ -2628,7 +2628,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
struct myri10ge_slice_state *ss;
struct mcp_kreq_ether_send *req;
struct myri10ge_tx_buf *tx;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
struct netdev_queue *netdev_queue;
dma_addr_t bus;
u32 low;
@@ -3037,7 +3037,6 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
- int error = 0;
netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
if (mgp->running) {
@@ -3049,7 +3048,7 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
} else
dev->mtu = new_mtu;
- return error;
+ return 0;
}
/*
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 5a54fe848de4..1b019fdfcd97 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -2,10 +2,12 @@
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
+#include <linux/mpls.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_csum.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_mpls.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
@@ -25,6 +27,80 @@
NFP_FL_TUNNEL_KEY | \
NFP_FL_TUNNEL_GENEVE_OPT)
+static int
+nfp_fl_push_mpls(struct nfp_fl_push_mpls *push_mpls,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ size_t act_size = sizeof(struct nfp_fl_push_mpls);
+ u32 mpls_lse = 0;
+
+ push_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_MPLS;
+ push_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ /* BOS is optional in the TC action but required for offload. */
+ if (act->mpls_push.bos != ACT_MPLS_BOS_NOT_SET) {
+ mpls_lse |= act->mpls_push.bos << MPLS_LS_S_SHIFT;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: BOS field must explicitly be set for MPLS push");
+ return -EOPNOTSUPP;
+ }
+
+ /* Leave MPLS TC as a default value of 0 if not explicitly set. */
+ if (act->mpls_push.tc != ACT_MPLS_TC_NOT_SET)
+ mpls_lse |= act->mpls_push.tc << MPLS_LS_TC_SHIFT;
+
+ /* Proto, label and TTL are enforced and verified for MPLS push. */
+ mpls_lse |= act->mpls_push.label << MPLS_LS_LABEL_SHIFT;
+ mpls_lse |= act->mpls_push.ttl << MPLS_LS_TTL_SHIFT;
+ push_mpls->ethtype = act->mpls_push.proto;
+ push_mpls->lse = cpu_to_be32(mpls_lse);
+
+ return 0;
+}
+
+static void
+nfp_fl_pop_mpls(struct nfp_fl_pop_mpls *pop_mpls,
+ const struct flow_action_entry *act)
+{
+ size_t act_size = sizeof(struct nfp_fl_pop_mpls);
+
+ pop_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_POP_MPLS;
+ pop_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+ pop_mpls->ethtype = act->mpls_pop.proto;
+}
+
+static void
+nfp_fl_set_mpls(struct nfp_fl_set_mpls *set_mpls,
+ const struct flow_action_entry *act)
+{
+ size_t act_size = sizeof(struct nfp_fl_set_mpls);
+ u32 mpls_lse = 0, mpls_mask = 0;
+
+ set_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_SET_MPLS;
+ set_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
+
+ if (act->mpls_mangle.label != ACT_MPLS_LABEL_NOT_SET) {
+ mpls_lse |= act->mpls_mangle.label << MPLS_LS_LABEL_SHIFT;
+ mpls_mask |= MPLS_LS_LABEL_MASK;
+ }
+ if (act->mpls_mangle.tc != ACT_MPLS_TC_NOT_SET) {
+ mpls_lse |= act->mpls_mangle.tc << MPLS_LS_TC_SHIFT;
+ mpls_mask |= MPLS_LS_TC_MASK;
+ }
+ if (act->mpls_mangle.bos != ACT_MPLS_BOS_NOT_SET) {
+ mpls_lse |= act->mpls_mangle.bos << MPLS_LS_S_SHIFT;
+ mpls_mask |= MPLS_LS_S_MASK;
+ }
+ if (act->mpls_mangle.ttl) {
+ mpls_lse |= act->mpls_mangle.ttl << MPLS_LS_TTL_SHIFT;
+ mpls_mask |= MPLS_LS_TTL_MASK;
+ }
+
+ set_mpls->lse = cpu_to_be32(mpls_lse);
+ set_mpls->lse_mask = cpu_to_be32(mpls_mask);
+}
+
static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
{
size_t act_size = sizeof(struct nfp_fl_pop_vlan);
@@ -97,7 +173,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
struct nfp_fl_payload *nfp_flow,
bool last, struct net_device *in_dev,
enum nfp_flower_tun_type tun_type, int *tun_out_cnt,
- struct netlink_ext_ack *extack)
+ bool pkt_host, struct netlink_ext_ack *extack)
{
size_t act_size = sizeof(struct nfp_fl_output);
struct nfp_flower_priv *priv = app->priv;
@@ -142,6 +218,20 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
return gid;
}
output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
+ } else if (nfp_flower_internal_port_can_offload(app, out_dev)) {
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware");
+ return -EOPNOTSUPP;
+ }
+
+ if (nfp_flow->pre_tun_rule.dev || !pkt_host) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action");
+ return -EOPNOTSUPP;
+ }
+
+ nfp_flow->pre_tun_rule.dev = out_dev;
+
+ return 0;
} else {
/* Set action output parameters. */
output->flags = cpu_to_be16(tmp_flags);
@@ -809,7 +899,7 @@ nfp_flower_output_action(struct nfp_app *app,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt, u32 *csum_updated,
+ int *out_cnt, u32 *csum_updated, bool pkt_host,
struct netlink_ext_ack *extack)
{
struct nfp_flower_priv *priv = app->priv;
@@ -831,7 +921,7 @@ nfp_flower_output_action(struct nfp_app *app,
output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
- tun_out_cnt, extack);
+ tun_out_cnt, pkt_host, extack);
if (err)
return err;
@@ -863,30 +953,37 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
int *out_cnt, u32 *csum_updated,
- struct nfp_flower_pedit_acts *set_act,
+ struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
struct netlink_ext_ack *extack, int act_idx)
{
struct nfp_fl_set_ipv4_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
struct nfp_fl_push_vlan *psh_v;
+ struct nfp_fl_push_mpls *psh_m;
struct nfp_fl_pop_vlan *pop_v;
+ struct nfp_fl_pop_mpls *pop_m;
+ struct nfp_fl_set_mpls *set_m;
int err;
switch (act->id) {
case FLOW_ACTION_DROP:
nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
break;
+ case FLOW_ACTION_REDIRECT_INGRESS:
case FLOW_ACTION_REDIRECT:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
true, tun_type, tun_out_cnt,
- out_cnt, csum_updated, extack);
+ out_cnt, csum_updated, *pkt_host,
+ extack);
if (err)
return err;
break;
+ case FLOW_ACTION_MIRRED_INGRESS:
case FLOW_ACTION_MIRRED:
err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
false, tun_type, tun_out_cnt,
- out_cnt, csum_updated, extack);
+ out_cnt, csum_updated, *pkt_host,
+ extack);
if (err)
return err;
break;
@@ -975,6 +1072,54 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
*/
*csum_updated &= ~act->csum_flags;
break;
+ case FLOW_ACTION_MPLS_PUSH:
+ if (*a_len +
+ sizeof(struct nfp_fl_push_mpls) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push MPLS");
+ return -EOPNOTSUPP;
+ }
+
+ psh_m = (struct nfp_fl_push_mpls *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ err = nfp_fl_push_mpls(psh_m, act, extack);
+ if (err)
+ return err;
+ *a_len += sizeof(struct nfp_fl_push_mpls);
+ break;
+ case FLOW_ACTION_MPLS_POP:
+ if (*a_len +
+ sizeof(struct nfp_fl_pop_mpls) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop MPLS");
+ return -EOPNOTSUPP;
+ }
+
+ pop_m = (struct nfp_fl_pop_mpls *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ nfp_fl_pop_mpls(pop_m, act);
+ *a_len += sizeof(struct nfp_fl_pop_mpls);
+ break;
+ case FLOW_ACTION_MPLS_MANGLE:
+ if (*a_len +
+ sizeof(struct nfp_fl_set_mpls) > NFP_FL_MAX_A_SIZ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at set MPLS");
+ return -EOPNOTSUPP;
+ }
+
+ set_m = (struct nfp_fl_set_mpls *)&nfp_fl->action_data[*a_len];
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+
+ nfp_fl_set_mpls(set_m, act);
+ *a_len += sizeof(struct nfp_fl_set_mpls);
+ break;
+ case FLOW_ACTION_PTYPE:
+ /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */
+ if (act->ptype != PACKET_HOST)
+ return -EOPNOTSUPP;
+
+ *pkt_host = true;
+ break;
default:
/* Currently we do not handle any other actions. */
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
@@ -1030,6 +1175,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
struct nfp_flower_pedit_acts set_act;
enum nfp_flower_tun_type tun_type;
struct flow_action_entry *act;
+ bool pkt_host = false;
u32 csum_updated = 0;
memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
@@ -1046,7 +1192,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
&out_cnt, &csum_updated,
- &set_act, extack, i);
+ &set_act, &pkt_host, extack, i);
if (err)
return err;
act_cnt++;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 0f1706ae5bfc..7eb2ec8969c3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -68,8 +68,11 @@
#define NFP_FL_ACTION_OPCODE_OUTPUT 0
#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
+#define NFP_FL_ACTION_OPCODE_PUSH_MPLS 3
+#define NFP_FL_ACTION_OPCODE_POP_MPLS 4
#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
+#define NFP_FL_ACTION_OPCODE_SET_MPLS 8
#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
#define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10
#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11
@@ -217,7 +220,8 @@ struct nfp_fl_set_ipv4_tun {
__be16 tun_flags;
u8 ttl;
u8 tos;
- __be32 extra;
+ __be16 outer_vlan_tpid;
+ __be16 outer_vlan_tci;
u8 tun_len;
u8 res2;
__be16 tun_proto;
@@ -232,6 +236,24 @@ struct nfp_fl_push_geneve {
u8 opt_data[];
};
+struct nfp_fl_push_mpls {
+ struct nfp_fl_act_head head;
+ __be16 ethtype;
+ __be32 lse;
+};
+
+struct nfp_fl_pop_mpls {
+ struct nfp_fl_act_head head;
+ __be16 ethtype;
+};
+
+struct nfp_fl_set_mpls {
+ struct nfp_fl_act_head head;
+ __be16 reserved;
+ __be32 lse_mask;
+ __be32 lse;
+};
+
/* Metadata with L2 (1W/4B)
* ----------------------------------------------------------------
* 3 2 1
@@ -462,6 +484,7 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18,
NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
+ NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE = 21,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index eb846133943b..7a20447cca19 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -781,6 +781,7 @@ static int nfp_flower_init(struct nfp_app *app)
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
INIT_LIST_HEAD(&app_priv->non_repr_priv);
+ app_priv->pre_tun_rule_cnt = 0;
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index af9441d5787f..31d94592a7c0 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -42,6 +42,7 @@ struct nfp_app;
#define NFP_FL_FEATS_VLAN_PCP BIT(3)
#define NFP_FL_FEATS_VF_RLIM BIT(4)
#define NFP_FL_FEATS_FLOW_MOD BIT(5)
+#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
#define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31)
@@ -162,6 +163,7 @@ struct nfp_fl_internal_ports {
* @qos_stats_work: Workqueue for qos stats processing
* @qos_rate_limiters: Current active qos rate limiters
* @qos_stats_lock: Lock on qos stats updates
+ * @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -193,6 +195,7 @@ struct nfp_flower_priv {
struct delayed_work qos_stats_work;
unsigned int qos_rate_limiters;
spinlock_t qos_stats_lock; /* Protect the qos stats */
+ int pre_tun_rule_cnt;
};
/**
@@ -218,6 +221,7 @@ struct nfp_fl_qos {
* @block_shared: Flag indicating if offload applies to shared blocks
* @mac_list: List entry of reprs that share the same offloaded MAC
* @qos_table: Stored info on filters implementing qos
+ * @on_bridge: Indicates if the repr is attached to a bridge
*/
struct nfp_flower_repr_priv {
struct nfp_repr *nfp_repr;
@@ -227,6 +231,7 @@ struct nfp_flower_repr_priv {
bool block_shared;
struct list_head mac_list;
struct nfp_fl_qos qos_table;
+ bool on_bridge;
};
/**
@@ -280,6 +285,11 @@ struct nfp_fl_payload {
char *action_data;
struct list_head linked_flows;
bool in_hw;
+ struct {
+ struct net_device *dev;
+ __be16 vlan_tci;
+ __be16 port_idx;
+ } pre_tun_rule;
};
struct nfp_fl_payload_link {
@@ -333,6 +343,11 @@ static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay)
return flow_pay->tc_flower_cookie == (unsigned long)flow_pay;
}
+static inline bool nfp_flower_is_supported_bridge(struct net_device *netdev)
+{
+ return netif_is_ovs_master(netdev);
+}
+
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_ctx_split);
void nfp_flower_metadata_cleanup(struct nfp_app *app);
@@ -415,4 +430,8 @@ void
nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
struct net_device *netdev);
+int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow);
+int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 457bdc60f3ee..987ae221f6be 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -61,6 +61,11 @@
NFP_FLOWER_LAYER_IPV4 | \
NFP_FLOWER_LAYER_IPV6)
+#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
+ (NFP_FLOWER_LAYER_PORT | \
+ NFP_FLOWER_LAYER_MAC | \
+ NFP_FLOWER_LAYER_IPV4)
+
struct nfp_flower_merge_check {
union {
struct {
@@ -489,6 +494,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
flow_pay->meta.flags = 0;
INIT_LIST_HEAD(&flow_pay->linked_flows);
flow_pay->in_hw = false;
+ flow_pay->pre_tun_rule.dev = NULL;
return flow_pay;
@@ -732,28 +738,62 @@ nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
return act_off;
}
-static int nfp_fl_verify_post_tun_acts(char *acts, int len)
+static int
+nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
{
struct nfp_fl_act_head *a;
unsigned int act_off = 0;
while (act_off < len) {
a = (struct nfp_fl_act_head *)&acts[act_off];
- if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
+
+ if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
+ *vlan = (struct nfp_fl_push_vlan *)a;
+ else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
return -EOPNOTSUPP;
act_off += a->len_lw << NFP_FL_LW_SIZ;
}
+ /* Ensure any VLAN push also has an egress action. */
+ if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
+ return -EOPNOTSUPP;
+
return 0;
}
static int
+nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
+{
+ struct nfp_fl_set_ipv4_tun *tun;
+ struct nfp_fl_act_head *a;
+ unsigned int act_off = 0;
+
+ while (act_off < len) {
+ a = (struct nfp_fl_act_head *)&acts[act_off];
+
+ if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL) {
+ tun = (struct nfp_fl_set_ipv4_tun *)a;
+ tun->outer_vlan_tpid = vlan->vlan_tpid;
+ tun->outer_vlan_tci = vlan->vlan_tci;
+
+ return 0;
+ }
+
+ act_off += a->len_lw << NFP_FL_LW_SIZ;
+ }
+
+ /* Return error if no tunnel action is found. */
+ return -EOPNOTSUPP;
+}
+
+static int
nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
struct nfp_fl_payload *sub_flow2,
struct nfp_fl_payload *merge_flow)
{
unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
+ struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
bool tunnel_act = false;
char *merge_act;
int err;
@@ -790,18 +830,36 @@ nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
sub2_act_len -= pre_off2;
/* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
- * a tunnel, sub_flow 2 can only have output actions for a valid merge.
+ * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
+ * valid merge.
*/
if (tunnel_act) {
char *post_tun_acts = &sub_flow2->action_data[pre_off2];
- err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len);
+ err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
+ &post_tun_push_vlan);
if (err)
return err;
+
+ if (post_tun_push_vlan) {
+ pre_off2 += sizeof(*post_tun_push_vlan);
+ sub2_act_len -= sizeof(*post_tun_push_vlan);
+ }
}
/* Copy remaining actions from sub_flows 1 and 2. */
memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
+
+ if (post_tun_push_vlan) {
+ /* Update tunnel action in merge to include VLAN push. */
+ err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
+ post_tun_push_vlan);
+ if (err)
+ return err;
+
+ merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
+ }
+
merge_act += sub1_act_len;
memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
@@ -945,6 +1003,106 @@ err_destroy_merge_flow:
}
/**
+ * nfp_flower_validate_pre_tun_rule()
+ * @app: Pointer to the APP handle
+ * @flow: Pointer to NFP flow representation of rule
+ * @extack: Netlink extended ACK report
+ *
+ * Verifies the flow as a pre-tunnel rule.
+ *
+ * Return: negative value on error, 0 if verified.
+ */
+static int
+nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
+ struct nfp_fl_payload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_flower_meta_tci *meta_tci;
+ struct nfp_flower_mac_mpls *mac;
+ struct nfp_fl_act_head *act;
+ u8 *mask = flow->mask_data;
+ bool vlan = false;
+ int act_offset;
+ u8 key_layer;
+
+ meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
+ if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
+ u16 vlan_tci = be16_to_cpu(meta_tci->tci);
+
+ vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
+ vlan = true;
+ } else {
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ }
+
+ key_layer = meta_tci->nfp_flow_key_layer;
+ if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
+ return -EOPNOTSUPP;
+ }
+
+ if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required");
+ return -EOPNOTSUPP;
+ }
+
+ /* Skip fields known to exist. */
+ mask += sizeof(struct nfp_flower_meta_tci);
+ mask += sizeof(struct nfp_flower_in_port);
+
+ /* Ensure destination MAC address is fully matched. */
+ mac = (struct nfp_flower_mac_mpls *)mask;
+ if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked");
+ return -EOPNOTSUPP;
+ }
+
+ if (key_layer & NFP_FLOWER_LAYER_IPV4) {
+ int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
+ int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
+ int i;
+
+ mask += sizeof(struct nfp_flower_mac_mpls);
+
+ /* Ensure proto and flags are the only IP layer fields. */
+ for (i = 0; i < sizeof(struct nfp_flower_ipv4); i++)
+ if (mask[i] && i != ip_flags && i != ip_proto) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ /* Action must be a single egress or pop_vlan and egress. */
+ act_offset = 0;
+ act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
+ if (vlan) {
+ if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
+ return -EOPNOTSUPP;
+ }
+
+ act_offset += act->len_lw << NFP_FL_LW_SIZ;
+ act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
+ }
+
+ if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
+ return -EOPNOTSUPP;
+ }
+
+ act_offset += act->len_lw << NFP_FL_LW_SIZ;
+
+ /* Ensure there are no more actions after egress. */
+ if (act_offset != flow->meta.act_len) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
* nfp_flower_add_offload() - Adds a new flow to hardware.
* @app: Pointer to the APP handle
* @netdev: netdev structure.
@@ -994,6 +1152,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
+ if (flow_pay->pre_tun_rule.dev) {
+ err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack);
+ if (err)
+ goto err_destroy_flow;
+ }
+
err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
if (err)
goto err_destroy_flow;
@@ -1006,8 +1170,11 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_release_metadata;
}
- err = nfp_flower_xmit_flow(app, flow_pay,
- NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
+ if (flow_pay->pre_tun_rule.dev)
+ err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
+ else
+ err = nfp_flower_xmit_flow(app, flow_pay,
+ NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
if (err)
goto err_remove_rhash;
@@ -1149,8 +1316,11 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
goto err_free_merge_flow;
}
- err = nfp_flower_xmit_flow(app, nfp_flow,
- NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
+ if (nfp_flow->pre_tun_rule.dev)
+ err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
+ else
+ err = nfp_flower_xmit_flow(app, nfp_flow,
+ NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
/* Fall through on error. */
err_free_merge_flow:
@@ -1487,16 +1657,17 @@ int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
return NOTIFY_OK;
if (event == NETDEV_REGISTER) {
- err = __tc_indr_block_cb_register(netdev, app,
- nfp_flower_indr_setup_tc_cb,
- app);
+ err = __flow_indr_block_cb_register(netdev, app,
+ nfp_flower_indr_setup_tc_cb,
+ app);
if (err)
nfp_flower_cmsg_warn(app,
"Indirect block reg failed - %s\n",
netdev->name);
} else if (event == NETDEV_UNREGISTER) {
- __tc_indr_block_cb_unregister(netdev,
- nfp_flower_indr_setup_tc_cb, app);
+ __flow_indr_block_cb_unregister(netdev,
+ nfp_flower_indr_setup_tc_cb,
+ app);
}
return NOTIFY_OK;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index f0ee982eb1b5..2600ce476d6b 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -15,6 +15,24 @@
#define NFP_FL_MAX_ROUTES 32
+#define NFP_TUN_PRE_TUN_RULE_LIMIT 32
+#define NFP_TUN_PRE_TUN_RULE_DEL 0x1
+#define NFP_TUN_PRE_TUN_IDX_BIT 0x8
+
+/**
+ * struct nfp_tun_pre_run_rule - rule matched before decap
+ * @flags: options for the rule offset
+ * @port_idx: index of destination MAC address for the rule
+ * @vlan_tci: VLAN info associated with MAC
+ * @host_ctx_id: stats context of rule to update
+ */
+struct nfp_tun_pre_tun_rule {
+ __be32 flags;
+ __be16 port_idx;
+ __be16 vlan_tci;
+ __be32 host_ctx_id;
+};
+
/**
* struct nfp_tun_active_tuns - periodic message of active tunnels
* @seq: sequence number of the message
@@ -124,11 +142,12 @@ enum nfp_flower_mac_offload_cmd {
/**
* struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
- * @ht_node: Hashtable entry
- * @addr: Offloaded MAC address
- * @index: Offloaded index for given MAC address
- * @ref_count: Number of devs using this MAC address
- * @repr_list: List of reprs sharing this MAC address
+ * @ht_node: Hashtable entry
+ * @addr: Offloaded MAC address
+ * @index: Offloaded index for given MAC address
+ * @ref_count: Number of devs using this MAC address
+ * @repr_list: List of reprs sharing this MAC address
+ * @bridge_count: Number of bridge/internal devs with MAC
*/
struct nfp_tun_offloaded_mac {
struct rhash_head ht_node;
@@ -136,6 +155,7 @@ struct nfp_tun_offloaded_mac {
u16 index;
int ref_count;
struct list_head repr_list;
+ int bridge_count;
};
static const struct rhashtable_params offloaded_macs_params = {
@@ -556,6 +576,8 @@ nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
list_del(&repr_priv->mac_list);
list_add_tail(&repr_priv->mac_list, &entry->repr_list);
+ } else if (nfp_flower_is_supported_bridge(netdev)) {
+ entry->bridge_count++;
}
entry->ref_count++;
@@ -572,20 +594,35 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
- nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
- return 0;
+ if (entry->bridge_count ||
+ !nfp_flower_is_supported_bridge(netdev)) {
+ nfp_tunnel_offloaded_macs_inc_ref_and_link(entry,
+ netdev, mod);
+ return 0;
+ }
+
+ /* MAC is global but matches need to go to pre_tun table. */
+ nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT;
}
- /* Assign a global index if non-repr or MAC address is now shared. */
- if (entry || !port) {
- ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
- NFP_MAX_MAC_INDEX, GFP_KERNEL);
- if (ida_idx < 0)
- return ida_idx;
+ if (!nfp_mac_idx) {
+ /* Assign a global index if non-repr or MAC is now shared. */
+ if (entry || !port) {
+ ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
+ NFP_MAX_MAC_INDEX, GFP_KERNEL);
+ if (ida_idx < 0)
+ return ida_idx;
- nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
- } else {
- nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
+ nfp_mac_idx =
+ nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
+
+ if (nfp_flower_is_supported_bridge(netdev))
+ nfp_mac_idx |= NFP_TUN_PRE_TUN_IDX_BIT;
+
+ } else {
+ nfp_mac_idx =
+ nfp_tunnel_get_mac_idx_from_phy_port_id(port);
+ }
}
if (!entry) {
@@ -654,6 +691,25 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
list_del(&repr_priv->mac_list);
}
+ if (nfp_flower_is_supported_bridge(netdev)) {
+ entry->bridge_count--;
+
+ if (!entry->bridge_count && entry->ref_count) {
+ u16 nfp_mac_idx;
+
+ nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
+ if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
+ false)) {
+ nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
+ netdev_name(netdev));
+ return 0;
+ }
+
+ entry->index = nfp_mac_idx;
+ return 0;
+ }
+ }
+
/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
u16 nfp_mac_idx;
@@ -713,6 +769,9 @@ nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
return 0;
repr_priv = repr->app_priv;
+ if (repr_priv->on_bridge)
+ return 0;
+
mac_offloaded = &repr_priv->mac_offloaded;
off_mac = &repr_priv->offloaded_mac_addr[0];
port = nfp_repr_get_port_id(netdev);
@@ -828,10 +887,119 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
if (err)
nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
netdev_name(netdev));
+ } else if (event == NETDEV_CHANGEUPPER) {
+ /* If a repr is attached to a bridge then tunnel packets
+ * entering the physical port are directed through the bridge
+ * datapath and cannot be directly detunneled. Therefore,
+ * associated offloaded MACs and indexes should not be used
+ * by fw for detunneling.
+ */
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct net_device *upper = info->upper_dev;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_repr *repr;
+
+ if (!nfp_netdev_is_nfp_repr(netdev) ||
+ !nfp_flower_is_supported_bridge(upper))
+ return NOTIFY_OK;
+
+ repr = netdev_priv(netdev);
+ if (repr->app != app)
+ return NOTIFY_OK;
+
+ repr_priv = repr->app_priv;
+
+ if (info->linking) {
+ if (nfp_tunnel_offload_mac(app, netdev,
+ NFP_TUNNEL_MAC_OFFLOAD_DEL))
+ nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n",
+ netdev_name(netdev));
+ repr_priv->on_bridge = true;
+ } else {
+ repr_priv->on_bridge = false;
+
+ if (!(netdev->flags & IFF_UP))
+ return NOTIFY_OK;
+
+ if (nfp_tunnel_offload_mac(app, netdev,
+ NFP_TUNNEL_MAC_OFFLOAD_ADD))
+ nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
+ netdev_name(netdev));
+ }
}
return NOTIFY_OK;
}
+int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow)
+{
+ struct nfp_flower_priv *app_priv = app->priv;
+ struct nfp_tun_offloaded_mac *mac_entry;
+ struct nfp_tun_pre_tun_rule payload;
+ struct net_device *internal_dev;
+ int err;
+
+ if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT)
+ return -ENOSPC;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
+
+ internal_dev = flow->pre_tun_rule.dev;
+ payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
+ payload.host_ctx_id = flow->meta.host_ctx_id;
+
+ /* Lookup MAC index for the pre-tunnel rule egress device.
+ * Note that because the device is always an internal port, it will
+ * have a constant global index so does not need to be tracked.
+ */
+ mac_entry = nfp_tunnel_lookup_offloaded_macs(app,
+ internal_dev->dev_addr);
+ if (!mac_entry)
+ return -ENOENT;
+
+ payload.port_idx = cpu_to_be16(mac_entry->index);
+
+ /* Copy mac id and vlan to flow - dev may not exist at delete time. */
+ flow->pre_tun_rule.vlan_tci = payload.vlan_tci;
+ flow->pre_tun_rule.port_idx = payload.port_idx;
+
+ err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
+ sizeof(struct nfp_tun_pre_tun_rule),
+ (unsigned char *)&payload, GFP_KERNEL);
+ if (err)
+ return err;
+
+ app_priv->pre_tun_rule_cnt++;
+
+ return 0;
+}
+
+int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app,
+ struct nfp_fl_payload *flow)
+{
+ struct nfp_flower_priv *app_priv = app->priv;
+ struct nfp_tun_pre_tun_rule payload;
+ u32 tmp_flags = 0;
+ int err;
+
+ memset(&payload, 0, sizeof(struct nfp_tun_pre_tun_rule));
+
+ tmp_flags |= NFP_TUN_PRE_TUN_RULE_DEL;
+ payload.flags = cpu_to_be32(tmp_flags);
+ payload.vlan_tci = flow->pre_tun_rule.vlan_tci;
+ payload.port_idx = flow->pre_tun_rule.port_idx;
+
+ err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_PRE_TUN_RULE,
+ sizeof(struct nfp_tun_pre_tun_rule),
+ (unsigned char *)&payload, GFP_KERNEL);
+ if (err)
+ return err;
+
+ app_priv->pre_tun_rule_cnt--;
+
+ return 0;
+}
+
int nfp_tunnel_config_start(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 60e57f08de80..81679647e842 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -815,6 +815,8 @@ static void __exit nfp_main_exit(void)
module_init(nfp_main_init);
module_exit(nfp_main_exit);
+MODULE_FIRMWARE("netronome/nic_AMDA0058-0011_2x40.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0058-0012_2x40.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_1x40.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_4x10.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0096-0001_2x10.nffw");
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 9903805717da..6f97b554f7da 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -975,7 +975,7 @@ static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
int f, nr_frags, wr_idx, md_bytes;
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_r_vector *r_vec;
@@ -1155,7 +1155,7 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
while (todo--) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
struct nfp_net_tx_buf *tx_buf;
struct sk_buff *skb;
int fidx, nr_frags;
@@ -1270,7 +1270,7 @@ static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
static void
nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
struct netdev_queue *nd_q;
while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index ab7f2498e1c4..553c708694e8 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -159,19 +159,13 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
else
strcpy(name, "ctrl-vnic");
nn->debugfs_dir = debugfs_create_dir(name, ddir);
- if (IS_ERR_OR_NULL(nn->debugfs_dir))
- return;
/* Create queue debugging sub-tree */
queues = debugfs_create_dir("queue", nn->debugfs_dir);
- if (IS_ERR_OR_NULL(queues))
- return;
rx = debugfs_create_dir("rx", queues);
tx = debugfs_create_dir("tx", queues);
xdp = debugfs_create_dir("xdp", queues);
- if (IS_ERR_OR_NULL(rx) || IS_ERR_OR_NULL(tx) || IS_ERR_OR_NULL(xdp))
- return;
for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) {
sprintf(name, "%d", i);
@@ -190,16 +184,7 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
{
- struct dentry *dev_dir;
-
- if (IS_ERR_OR_NULL(nfp_dir))
- return NULL;
-
- dev_dir = debugfs_create_dir(pci_name(pdev), nfp_dir);
- if (IS_ERR_OR_NULL(dev_dir))
- return NULL;
-
- return dev_dir;
+ return debugfs_create_dir(pci_name(pdev), nfp_dir);
}
void nfp_net_debugfs_dir_clean(struct dentry **dir)
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 3d73970b3a2e..219b0b863c89 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -993,14 +993,12 @@ static int w90p910_ether_probe(struct platform_device *pdev)
ether->txirq = platform_get_irq(pdev, 0);
if (ether->txirq < 0) {
- dev_err(&pdev->dev, "failed to get ether tx irq\n");
error = -ENXIO;
goto failed_free_io;
}
ether->rxirq = platform_get_irq(pdev, 1);
if (ether->rxirq < 0) {
- dev_err(&pdev->dev, "failed to get ether rx irq\n");
error = -ENXIO;
goto failed_free_io;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index b327b29f5d57..ecca794c55e2 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -6126,8 +6126,7 @@ static void nv_remove(struct pci_dev *pci_dev)
#ifdef CONFIG_PM_SLEEP
static int nv_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
int i;
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 6f8d6584f809..5113ee647090 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1258,8 +1258,7 @@ static int yellowfin_close(struct net_device *dev)
yp->rx_skbuff[i] = NULL;
}
for (i = 0; i < TX_RING_SIZE; i++) {
- if (yp->tx_skbuff[i])
- dev_kfree_skb(yp->tx_skbuff[i]);
+ dev_kfree_skb(yp->tx_skbuff[i]);
yp->tx_skbuff[i] = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index a391cf6ee4b2..55a29ec76680 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -66,15 +66,6 @@ config QLCNIC_HWMON
This data is available via the hwmon sysfs interface.
-config QLGE
- tristate "QLogic QLGE 10Gb Ethernet Driver Support"
- depends on PCI
- ---help---
- This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
-
- To compile this driver as a module, choose M here: the module
- will be called qlge.
-
config NETXEN_NIC
tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
depends on PCI
diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile
index 6cd2e333a5fc..1ae4a0743bd5 100644
--- a/drivers/net/ethernet/qlogic/Makefile
+++ b/drivers/net/ethernet/qlogic/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_QLA3XXX) += qla3xxx.o
obj-$(CONFIG_QLCNIC) += qlcnic/
-obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_NETXEN_NIC) += netxen/
obj-$(CONFIG_QED) += qed/
obj-$(CONFIG_QEDE)+= qede/
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 58e2eaf77014..c692a41e4548 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1980,7 +1980,7 @@ netxen_map_tx_skb(struct pci_dev *pdev,
struct sk_buff *skb, struct netxen_cmd_buffer *pbuf)
{
struct netxen_skb_frag *nf;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int i, nr_frags;
dma_addr_t map;
@@ -2043,7 +2043,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct pci_dev *pdev;
int i, k;
int delta = 0;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
u32 producer;
int frag_count;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 5ea6c4fc6050..859caa6c1a1f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -1756,6 +1756,15 @@ static u32 qed_read_unaligned_dword(u8 *buf)
return dword;
}
+/* Sets the value of the specified GRC param */
+static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param, u32 val)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ dev_data->grc.param_val[grc_param] = val;
+}
+
/* Returns the value of the specified GRC param */
static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
enum dbg_grc_params grc_param)
@@ -5119,6 +5128,69 @@ bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
return false;
}
+enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum dbg_grc_params grc_param, u32 val)
+{
+ enum dbg_status status;
+ int i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
+ "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
+
+ status = qed_dbg_dev_init(p_hwfn, p_ptt);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Initializes the GRC parameters (if not initialized). Needed in order
+ * to set the default parameter values for the first time.
+ */
+ qed_dbg_grc_init_params(p_hwfn);
+
+ if (grc_param >= MAX_DBG_GRC_PARAMS)
+ return DBG_STATUS_INVALID_ARGS;
+ if (val < s_grc_param_defs[grc_param].min ||
+ val > s_grc_param_defs[grc_param].max)
+ return DBG_STATUS_INVALID_ARGS;
+
+ if (s_grc_param_defs[grc_param].is_preset) {
+ /* Preset param */
+
+ /* Disabling a preset is not allowed. Call
+ * dbg_grc_set_params_default instead.
+ */
+ if (!val)
+ return DBG_STATUS_INVALID_ARGS;
+
+ /* Update all params with the preset values */
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
+ u32 preset_val;
+
+ /* Skip persistent params */
+ if (s_grc_param_defs[i].is_persistent)
+ continue;
+
+ /* Find preset value */
+ if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
+ preset_val =
+ s_grc_param_defs[i].exclude_all_preset_val;
+ else if (grc_param == DBG_GRC_PARAM_CRASH)
+ preset_val =
+ s_grc_param_defs[i].crash_preset_val;
+ else
+ return DBG_STATUS_INVALID_ARGS;
+
+ qed_grc_set_param(p_hwfn,
+ (enum dbg_grc_params)i, preset_val);
+ }
+ } else {
+ /* Regular param - set its value */
+ qed_grc_set_param(p_hwfn, grc_param, val);
+ }
+
+ return DBG_STATUS_OK;
+}
+
/* Assign default GRC param values */
void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
{
@@ -7997,9 +8069,16 @@ static u32 qed_calc_regdump_header(enum debug_print_features feature,
int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
{
u8 cur_engine, omit_engine = 0, org_engine;
+ struct qed_hwfn *p_hwfn =
+ &cdev->hwfns[cdev->dbg_params.engine_for_debug];
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ int grc_params[MAX_DBG_GRC_PARAMS], i;
u32 offset = 0, feature_size;
int rc;
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ grc_params[i] = dev_data->grc.param_val[i];
+
if (cdev->num_hwfns == 1)
omit_engine = 1;
@@ -8087,6 +8166,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
rc);
}
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ dev_data->grc.param_val[i] = grc_params[i];
+
/* GRC dump - must be last because when mcp stuck it will
* clutter idle_chk, reg_fifo, ...
*/
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index e054f6c69e3a..cf3ceb62e397 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -3024,6 +3024,21 @@ void qed_read_regs(struct qed_hwfn *p_hwfn,
*/
bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct fw_info *fw_info);
+/**
+ * @brief qed_dbg_grc_config - Sets the value of a GRC parameter.
+ *
+ * @param p_hwfn - HW device data
+ * @param grc_param - GRC parameter
+ * @param val - Value to set.
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * - grc_param is invalid
+ * - val is outside the allowed boundaries
+ */
+enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum dbg_grc_params grc_param, u32 val);
/**
* @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
@@ -12580,6 +12595,8 @@ struct public_drv_mb {
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000
+#define DRV_MSG_CODE_GET_NVM_CFG_OPTION 0x003e0000
+#define DRV_MSG_CODE_SET_NVM_CFG_OPTION 0x003f0000
#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
@@ -12748,6 +12765,21 @@ struct public_drv_mb {
#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000FFFF
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000
+
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
#define FW_MSG_CODE_UNSUPPORTED 0x00000000
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 9f36e7948222..1a5fc2ae351c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1631,10 +1631,9 @@ static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
}
}
-static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack void
+__qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct eth_pstorm_per_queue_stat pstats;
u32 pstats_addr = 0, pstats_len = 0;
@@ -1661,10 +1660,9 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(pstats.error_drop_pkts);
}
-static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack void
+__qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct tstorm_per_port_stat tstats;
u32 tstats_addr, tstats_len;
@@ -1709,10 +1707,9 @@ static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
}
}
-static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack
+void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct eth_ustorm_per_queue_stat ustats;
u32 ustats_addr = 0, ustats_len = 0;
@@ -1751,10 +1748,9 @@ static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
}
}
-static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats,
- u16 statistics_bin)
+static noinline_for_stack void
+__qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
{
struct eth_mstorm_per_queue_stat mstats;
u32 mstats_addr = 0, mstats_len = 0;
@@ -1780,9 +1776,9 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
-static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_eth_stats *p_stats)
+static noinline_for_stack void
+__qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats)
{
struct qed_eth_stats_common *p_common = &p_stats->common;
struct port_stats port_stats;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 1efff7f68ef6..ac1511a834d8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -67,6 +67,10 @@
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
#define QED_RDMA_SRQS QED_ROCE_QPS
+#define QED_NVM_CFG_SET_FLAGS 0xE
+#define QED_NVM_CFG_SET_PF_FLAGS 0x1E
+#define QED_NVM_CFG_GET_FLAGS 0xA
+#define QED_NVM_CFG_GET_PF_FLAGS 0x1A
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -1690,6 +1694,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
switch (media_type) {
case MEDIA_DA_TWINAX:
+ *if_capability |= QED_LM_FIBRE_BIT;
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
*if_capability |= QED_LM_20000baseKR2_Full_BIT;
/* For DAC media multiple speed capabilities are supported*/
@@ -1709,6 +1714,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
*if_capability |= QED_LM_100000baseCR4_Full_BIT;
break;
case MEDIA_BASE_T:
+ *if_capability |= QED_LM_TP_BIT;
if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
if (capability &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
@@ -1720,6 +1726,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
}
}
if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
+ *if_capability |= QED_LM_FIBRE_BIT;
if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET)
*if_capability |= QED_LM_1000baseT_Full_BIT;
if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET)
@@ -1730,6 +1737,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
case MEDIA_SFPP_10G_FIBER:
case MEDIA_XFP_FIBER:
case MEDIA_MODULE_FIBER:
+ *if_capability |= QED_LM_FIBRE_BIT;
if (capability &
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) ||
@@ -1772,6 +1780,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
break;
case MEDIA_KR:
+ *if_capability |= QED_LM_Backplane_BIT;
if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
*if_capability |= QED_LM_20000baseKR2_Full_BIT;
if (capability &
@@ -1823,7 +1832,6 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
if_link->link_up = true;
/* TODO - at the moment assume supported and advertised speed equal */
- if_link->supported_caps = QED_LM_FIBRE_BIT;
if (link_caps.default_speed_autoneg)
if_link->supported_caps |= QED_LM_Autoneg_BIT;
if (params.pause.autoneg ||
@@ -2229,6 +2237,93 @@ static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
return 0;
}
+/* Binary file format -
+ * /----------------------------------------------------------------------\
+ * 0B | 0x5 [command index] |
+ * 4B | Entity ID | Reserved | Number of config attributes |
+ * 8B | Config ID | Length | Value |
+ * | |
+ * \----------------------------------------------------------------------/
+ * There can be several cfg_id-Length-Value sets as specified by 'Number of...'.
+ * Entity ID - A non zero entity value for which the config need to be updated.
+ *
+ * The API parses config attributes from the user provided buffer and flashes
+ * them to the respective NVM path using Management FW inerface.
+ */
+static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ u8 entity_id, len, buf[32];
+ struct qed_ptt *ptt;
+ u16 cfg_id, count;
+ int rc = 0, i;
+ u32 flags;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ /* NVM CFG ID attribute header */
+ *data += 4;
+ entity_id = **data;
+ *data += 2;
+ count = *((u16 *)*data);
+ *data += 2;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Read config ids: entity id %02x num _attrs = %0d\n",
+ entity_id, count);
+ /* NVM CFG ID attributes */
+ for (i = 0; i < count; i++) {
+ cfg_id = *((u16 *)*data);
+ *data += 2;
+ len = **data;
+ (*data)++;
+ memcpy(buf, *data, len);
+ *data += len;
+
+ flags = entity_id ? QED_NVM_CFG_SET_PF_FLAGS :
+ QED_NVM_CFG_SET_FLAGS;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "cfg_id = %d len = %d\n", cfg_id, len);
+ rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags,
+ buf, len);
+ if (rc) {
+ DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id);
+ break;
+ }
+ }
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data,
+ u32 cmd, u32 entity_id)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ u32 flags, len;
+ int rc = 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Read config cmd = %d entity id %d\n", cmd, entity_id);
+ flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS;
+ rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len);
+ if (rc)
+ DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
{
const struct firmware *image;
@@ -2270,6 +2365,9 @@ static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
rc = qed_nvm_flash_image_access(cdev, &data,
&check_resp);
break;
+ case QED_NVM_FLASH_CMD_NVM_CFG_ID:
+ rc = qed_nvm_flash_cfg_write(cdev, &data);
+ break;
default:
DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
rc = -EINVAL;
@@ -2485,6 +2583,26 @@ static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
return rc;
}
+static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int rc = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_dbg_grc_config(hwfn, ptt, cfg_id, val);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
{
return QED_AFFIN_HWFN_IDX(cdev);
@@ -2538,6 +2656,8 @@ const struct qed_common_ops qed_common_ops_pass = {
.db_recovery_del = &qed_db_recovery_del,
.read_module_eeprom = &qed_read_module_eeprom,
.get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
+ .read_nvm_cfg = &qed_nvm_flash_cfg_read,
+ .set_grc_config = &qed_set_grc_config,
};
void qed_get_protocol_stats(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 758702c1ce9c..36ddb89856a8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -3750,3 +3750,64 @@ int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return 0;
}
+
+int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 *p_len)
+{
+ u32 mb_param = 0, resp, param;
+ int rc;
+
+ QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
+ if (flags & QED_NVM_CFG_OPTION_INIT)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
+ if (flags & QED_NVM_CFG_OPTION_FREE)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
+ if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
+ entity_id);
+ }
+
+ rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_GET_NVM_CFG_OPTION,
+ mb_param, &resp, &param, p_len, (u32 *)p_buf);
+
+ return rc;
+}
+
+int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 len)
+{
+ u32 mb_param = 0, resp, param;
+
+ QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
+ if (flags & QED_NVM_CFG_OPTION_ALL)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1);
+ if (flags & QED_NVM_CFG_OPTION_INIT)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
+ if (flags & QED_NVM_CFG_OPTION_COMMIT)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1);
+ if (flags & QED_NVM_CFG_OPTION_FREE)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
+ if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
+ entity_id);
+ }
+
+ return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_SET_NVM_CFG_OPTION,
+ mb_param, &resp, &param, len, (u32 *)p_buf);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index e4f8fe4bd062..9c4c2763de8d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -251,6 +251,12 @@ union qed_mfw_tlv_data {
struct qed_mfw_tlv_iscsi iscsi;
};
+#define QED_NVM_CFG_OPTION_ALL BIT(0)
+#define QED_NVM_CFG_OPTION_INIT BIT(1)
+#define QED_NVM_CFG_OPTION_COMMIT BIT(2)
+#define QED_NVM_CFG_OPTION_FREE BIT(3)
+#define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4)
+
/**
* @brief - returns the link params of the hw function
*
@@ -1202,4 +1208,33 @@ int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
*/
int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+/**
+ * @brief Get NVM config attribute value.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param option_id
+ * @param entity_id
+ * @param flags
+ * @param p_buf
+ * @param p_len
+ */
+int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 *p_len);
+
+/**
+ * @brief Set NVM config attribute value.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param option_id
+ * @param entity_id
+ * @param flags
+ * @param p_buf
+ * @param len
+ */
+int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 len);
#endif
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 0e931c04fecf..c303a92d5b06 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -177,6 +177,20 @@ enum qede_flags_bit {
QEDE_FLAGS_TX_TIMESTAMPING_EN
};
+#define QEDE_DUMP_MAX_ARGS 4
+enum qede_dump_cmd {
+ QEDE_DUMP_CMD_NONE = 0,
+ QEDE_DUMP_CMD_NVM_CFG,
+ QEDE_DUMP_CMD_GRCDUMP,
+ QEDE_DUMP_CMD_MAX
+};
+
+struct qede_dump_info {
+ enum qede_dump_cmd cmd;
+ u8 num_args;
+ u32 args[QEDE_DUMP_MAX_ARGS];
+};
+
struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
@@ -262,6 +276,7 @@ struct qede_dev {
struct qede_rdma_dev rdma_info;
struct bpf_prog *xdp_prog;
+ struct qede_dump_info dump_info;
};
enum QEDE_STATE {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index e85f9fef930c..ec27a43230d7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -48,6 +48,9 @@
{QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
#define QEDE_SELFTEST_POLL_COUNT 100
+#define QEDE_DUMP_VERSION 0x1
+#define QEDE_DUMP_NVM_BUF_LEN 32
+#define QEDE_DUMP_NVM_ARG_COUNT 2
static const struct {
u64 offset;
@@ -424,12 +427,13 @@ struct qede_link_mode_mapping {
};
static const struct qede_link_mode_mapping qed_lm_map[] = {
+ {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT},
{QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT},
{QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT},
{QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT},
{QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
{QED_LM_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT},
- {QED_LM_2500baseX_Full_BIT, ETHTOOL_LINK_MODE_2500baseX_Full_BIT},
+ {QED_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT},
{QED_LM_Backplane_BIT, ETHTOOL_LINK_MODE_Backplane_BIT},
{QED_LM_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
{QED_LM_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT},
@@ -1972,6 +1976,114 @@ static int qede_get_module_eeprom(struct net_device *dev,
return rc;
}
+static int qede_set_dump(struct net_device *dev, struct ethtool_dump *val)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int rc = 0;
+
+ if (edev->dump_info.cmd == QEDE_DUMP_CMD_NONE) {
+ if (val->flag > QEDE_DUMP_CMD_MAX) {
+ DP_ERR(edev, "Invalid command %d\n", val->flag);
+ return -EINVAL;
+ }
+ edev->dump_info.cmd = val->flag;
+ edev->dump_info.num_args = 0;
+ return 0;
+ }
+
+ if (edev->dump_info.num_args == QEDE_DUMP_MAX_ARGS) {
+ DP_ERR(edev, "Arg count = %d\n", edev->dump_info.num_args);
+ return -EINVAL;
+ }
+
+ switch (edev->dump_info.cmd) {
+ case QEDE_DUMP_CMD_NVM_CFG:
+ edev->dump_info.args[edev->dump_info.num_args] = val->flag;
+ edev->dump_info.num_args++;
+ break;
+ case QEDE_DUMP_CMD_GRCDUMP:
+ rc = edev->ops->common->set_grc_config(edev->cdev,
+ val->flag, 1);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static int qede_get_dump_flag(struct net_device *dev,
+ struct ethtool_dump *dump)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops || !edev->ops->common) {
+ DP_ERR(edev, "Edev ops not populated\n");
+ return -EINVAL;
+ }
+
+ dump->version = QEDE_DUMP_VERSION;
+ switch (edev->dump_info.cmd) {
+ case QEDE_DUMP_CMD_NVM_CFG:
+ dump->flag = QEDE_DUMP_CMD_NVM_CFG;
+ dump->len = QEDE_DUMP_NVM_BUF_LEN;
+ break;
+ case QEDE_DUMP_CMD_GRCDUMP:
+ dump->flag = QEDE_DUMP_CMD_GRCDUMP;
+ dump->len = edev->ops->common->dbg_all_data_size(edev->cdev);
+ break;
+ default:
+ DP_ERR(edev, "Invalid cmd = %d\n", edev->dump_info.cmd);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "dump->version = 0x%x dump->flag = %d dump->len = %d\n",
+ dump->version, dump->flag, dump->len);
+ return 0;
+}
+
+static int qede_get_dump_data(struct net_device *dev,
+ struct ethtool_dump *dump, void *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int rc = 0;
+
+ if (!edev->ops || !edev->ops->common) {
+ DP_ERR(edev, "Edev ops not populated\n");
+ edev->dump_info.cmd = QEDE_DUMP_CMD_NONE;
+ edev->dump_info.num_args = 0;
+ return -EINVAL;
+ }
+
+ switch (edev->dump_info.cmd) {
+ case QEDE_DUMP_CMD_NVM_CFG:
+ if (edev->dump_info.num_args != QEDE_DUMP_NVM_ARG_COUNT) {
+ DP_ERR(edev, "Arg count = %d required = %d\n",
+ edev->dump_info.num_args,
+ QEDE_DUMP_NVM_ARG_COUNT);
+ return -EINVAL;
+ }
+ rc = edev->ops->common->read_nvm_cfg(edev->cdev, (u8 **)&buf,
+ edev->dump_info.args[0],
+ edev->dump_info.args[1]);
+ break;
+ case QEDE_DUMP_CMD_GRCDUMP:
+ memset(buf, 0, dump->len);
+ rc = edev->ops->common->dbg_all_data(edev->cdev, buf);
+ break;
+ default:
+ DP_ERR(edev, "Invalid cmd = %d\n", edev->dump_info.cmd);
+ rc = -EINVAL;
+ break;
+ }
+
+ edev->dump_info.cmd = QEDE_DUMP_CMD_NONE;
+ edev->dump_info.num_args = 0;
+
+ return rc;
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
.get_link_ksettings = qede_get_link_ksettings,
.set_link_ksettings = qede_set_link_ksettings,
@@ -2013,6 +2125,9 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_tunable = qede_get_tunable,
.set_tunable = qede_set_tunable,
.flash_device = qede_flash_device,
+ .get_dump_flag = qede_get_dump_flag,
+ .get_dump_data = qede_get_dump_data,
+ .set_dump = qede_set_dump,
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 14f26bf3b388..ac61f614de37 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -581,7 +581,7 @@ static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
struct qlcnic_cmd_buffer *pbuf)
{
struct qlcnic_skb_frag *nf;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
int i, nr_frags;
dma_addr_t map;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 707665b62eb7..bebe38d74d66 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1385,15 +1385,13 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
}
for (i = 0; i < nr_frags; i++) {
- struct skb_frag_struct *frag;
-
- frag = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
- tpbuf->length = frag->size;
- tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
- frag->page.p, frag->page_offset,
- tpbuf->length, DMA_TO_DEVICE);
+ tpbuf->length = skb_frag_size(frag);
+ tpbuf->dma_addr = skb_frag_dma_map(adpt->netdev->dev.parent,
+ frag, 0, tpbuf->length,
+ DMA_TO_DEVICE);
ret = dma_mapping_error(adpt->netdev->dev.parent,
tpbuf->dma_addr);
if (ret)
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 59c2349b59df..c84ab052ef26 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -544,7 +544,6 @@ static int emac_probe_resources(struct platform_device *pdev,
struct emac_adapter *adpt)
{
struct net_device *netdev = adpt->netdev;
- struct resource *res;
char maddr[ETH_ALEN];
int ret = 0;
@@ -556,22 +555,17 @@ static int emac_probe_resources(struct platform_device *pdev,
/* Core 0 interrupt */
ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "error: missing core0 irq resource (error=%i)\n", ret);
+ if (ret < 0)
return ret;
- }
adpt->irq.irq = ret;
/* base register address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adpt->base = devm_ioremap_resource(&pdev->dev, res);
+ adpt->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adpt->base))
return PTR_ERR(adpt->base);
/* CSR register address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- adpt->csr = devm_ioremap_resource(&pdev->dev, res);
+ adpt->csr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(adpt->csr))
return PTR_ERR(adpt->csr);
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index bcb890b18a94..702aa217a27a 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -131,17 +131,10 @@ DEFINE_SHOW_ATTRIBUTE(qcaspi_info);
void
qcaspi_init_device_debugfs(struct qcaspi *qca)
{
- struct dentry *device_root;
+ qca->device_root = debugfs_create_dir(dev_name(&qca->net_dev->dev),
+ NULL);
- device_root = debugfs_create_dir(dev_name(&qca->net_dev->dev), NULL);
- qca->device_root = device_root;
-
- if (IS_ERR(device_root) || !device_root) {
- pr_warn("failed to create debugfs directory for %s\n",
- dev_name(&qca->net_dev->dev));
- return;
- }
- debugfs_create_file("info", S_IFREG | 0444, device_root, qca,
+ debugfs_create_file("info", S_IFREG | 0444, qca->device_root, qca,
&qcaspi_info_fops);
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index b28360bc2255..5ecf61df78bd 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -837,8 +837,7 @@ qcaspi_netdev_uninit(struct net_device *dev)
kfree(qca->rx_buffer);
qca->buffer_size = 0;
- if (qca->rx_skb)
- dev_kfree_skb(qca->rx_skb);
+ dev_kfree_skb(qca->rx_skb);
}
static const struct net_device_ops qcaspi_netdev_ops = {
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 590616846cd1..0981068504fa 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -285,8 +285,7 @@ static void qcauart_netdev_uninit(struct net_device *dev)
{
struct qcauart *qca = netdev_priv(dev);
- if (qca->rx_skb)
- dev_kfree_skb(qca->rx_skb);
+ dev_kfree_skb(qca->rx_skb);
}
static const struct net_device_ops qcauart_netdev_ops = {
diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig
index b18e7a91d5cd..5e0b9d2f14f7 100644
--- a/drivers/net/ethernet/realtek/Kconfig
+++ b/drivers/net/ethernet/realtek/Kconfig
@@ -96,14 +96,19 @@ config 8139_OLD_RX_RESET
old RX-reset behavior. If unsure, say N.
config R8169
- tristate "Realtek 8169 gigabit ethernet support"
+ tristate "Realtek 8169/8168/8101/8125 ethernet support"
depends on PCI
select FW_LOADER
select CRC32
select PHYLIB
select REALTEK_PHY
---help---
- Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter.
+ Say Y here if you have a Realtek Ethernet adapter belonging to
+ the following families:
+ RTL8169 Gigabit Ethernet
+ RTL8168 Gigabit Ethernet
+ RTL8101 Fast Ethernet
+ RTL8125 2.5GBit Ethernet
To compile this driver as a module, choose M here: the module
will be called r8169. This is recommended.
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index bae0074ab9aa..0ef01db1f8b8 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -55,13 +55,14 @@
#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw"
#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw"
#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw"
+#define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw"
#define R8169_MSG_DEFAULT \
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
-static const int multicast_filter_limit = 32;
+#define MC_FILTER_LIMIT 32
#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -135,6 +136,8 @@ enum mac_version {
RTL_GIGA_MAC_VER_49,
RTL_GIGA_MAC_VER_50,
RTL_GIGA_MAC_VER_51,
+ RTL_GIGA_MAC_VER_60,
+ RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_NONE
};
@@ -200,6 +203,8 @@ static const struct {
[RTL_GIGA_MAC_VER_49] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
+ [RTL_GIGA_MAC_VER_60] = {"RTL8125" },
+ [RTL_GIGA_MAC_VER_61] = {"RTL8125", FIRMWARE_8125A_3},
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -220,6 +225,8 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VDEVICE(USR, 0x0116) },
{ PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 },
{ 0x0001, 0x8168, PCI_ANY_ID, 0x2410 },
+ { PCI_VDEVICE(REALTEK, 0x8125) },
+ { PCI_VDEVICE(REALTEK, 0x3000) },
{}
};
@@ -271,7 +278,6 @@ enum rtl_registers {
Config3 = 0x54,
Config4 = 0x55,
Config5 = 0x56,
- MultiIntr = 0x5c,
PHYAR = 0x60,
PHYstatus = 0x6c,
RxMaxSize = 0xda,
@@ -385,6 +391,19 @@ enum rtl8168_registers {
#define EARLY_TALLY_EN (1 << 16)
};
+enum rtl8125_registers {
+ IntrMask_8125 = 0x38,
+ IntrStatus_8125 = 0x3c,
+ TxPoll_8125 = 0x90,
+ MAC0_BKP = 0x19e0,
+};
+
+#define RX_VLAN_INNER_8125 BIT(22)
+#define RX_VLAN_OUTER_8125 BIT(23)
+#define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125)
+
+#define RX_FETCH_DFLT_8125 (8 << 27)
+
enum rtl_register_content {
/* InterruptStatusBits */
SYSErr = 0x8000,
@@ -539,11 +558,11 @@ enum rtl_tx_desc_bit_1 {
TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */
TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */
#define GTTCPHO_SHIFT 18
-#define GTTCPHO_MAX 0x7fU
+#define GTTCPHO_MAX 0x7f
/* Second doubleword. */
#define TCPHO_SHIFT 18
-#define TCPHO_MAX 0x3ffU
+#define TCPHO_MAX 0x3ff
#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */
TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */
@@ -569,6 +588,11 @@ enum rtl_rx_desc_bit {
#define RsvdMask 0x3fffc000
+#define RTL_GSO_MAX_SIZE_V1 32000
+#define RTL_GSO_MAX_SEGS_V1 24
+#define RTL_GSO_MAX_SIZE_V2 64000
+#define RTL_GSO_MAX_SEGS_V2 64
+
struct TxDesc {
__le32 opts1;
__le32 opts2;
@@ -638,10 +662,10 @@ struct rtl8169_private {
struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
dma_addr_t TxPhyAddr;
dma_addr_t RxPhyAddr;
- void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
+ struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
u16 cp_cmd;
- u16 irq_mask;
+ u32 irq_mask;
struct clk *clk;
struct {
@@ -691,6 +715,7 @@ MODULE_FIRMWARE(FIRMWARE_8168H_1);
MODULE_FIRMWARE(FIRMWARE_8168H_2);
MODULE_FIRMWARE(FIRMWARE_8107E_1);
MODULE_FIRMWARE(FIRMWARE_8107E_2);
+MODULE_FIRMWARE(FIRMWARE_8125A_3);
static inline struct device *tp_to_dev(struct rtl8169_private *tp)
{
@@ -723,12 +748,33 @@ static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force)
PCI_EXP_DEVCTL_READRQ, force);
}
+static bool rtl_is_8125(struct rtl8169_private *tp)
+{
+ return tp->mac_version >= RTL_GIGA_MAC_VER_60;
+}
+
static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_39 &&
+ tp->mac_version <= RTL_GIGA_MAC_VER_51;
+}
+
+static bool rtl_supports_eee(struct rtl8169_private *tp)
+{
+ return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_37 &&
tp->mac_version != RTL_GIGA_MAC_VER_39;
}
+static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg)
+{
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ mac[i] = RTL_R8(tp, reg + i);
+}
+
struct rtl_cond {
bool (*check)(struct rtl8169_private *);
const char *msg;
@@ -846,6 +892,14 @@ static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
return RTL_R32(tp, OCPDR);
}
+static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
+ u16 set)
+{
+ u16 data = r8168_mac_ocp_read(tp, reg);
+
+ r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
+}
+
#define OCP_STD_PHY_BASE 0xa400
static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
@@ -995,7 +1049,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61:
r8168g_mdio_write(tp, location, val);
break;
default:
@@ -1012,7 +1066,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61:
return r8168g_mdio_read(tp, location);
default:
return r8169_mdio_read(tp, location);
@@ -1294,14 +1348,28 @@ static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
}
-static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
+static u32 rtl_get_events(struct rtl8169_private *tp)
+{
+ if (rtl_is_8125(tp))
+ return RTL_R32(tp, IntrStatus_8125);
+ else
+ return RTL_R16(tp, IntrStatus);
+}
+
+static void rtl_ack_events(struct rtl8169_private *tp, u32 bits)
{
- RTL_W16(tp, IntrStatus, bits);
+ if (rtl_is_8125(tp))
+ RTL_W32(tp, IntrStatus_8125, bits);
+ else
+ RTL_W16(tp, IntrStatus, bits);
}
static void rtl_irq_disable(struct rtl8169_private *tp)
{
- RTL_W16(tp, IntrMask, 0);
+ if (rtl_is_8125(tp))
+ RTL_W32(tp, IntrMask_8125, 0);
+ else
+ RTL_W16(tp, IntrMask, 0);
tp->irq_enabled = 0;
}
@@ -1312,13 +1380,16 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
static void rtl_irq_enable(struct rtl8169_private *tp)
{
tp->irq_enabled = 1;
- RTL_W16(tp, IntrMask, tp->irq_mask);
+ if (rtl_is_8125(tp))
+ RTL_W32(tp, IntrMask_8125, tp->irq_mask);
+ else
+ RTL_W16(tp, IntrMask, tp->irq_mask);
}
static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{
rtl_irq_disable(tp);
- rtl_ack_events(tp, 0xffff);
+ rtl_ack_events(tp, 0xffffffff);
/* PCI commit */
RTL_R8(tp, ChipCmd);
}
@@ -1377,7 +1448,6 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
{
- unsigned int i, tmp;
static const struct {
u32 opt;
u16 reg;
@@ -1390,20 +1460,25 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
{ WAKE_ANY, Config5, LanWake },
{ WAKE_MAGIC, Config3, MagicPacket }
};
+ unsigned int i, tmp = ARRAY_SIZE(cfg);
u8 options;
rtl_unlock_config_regs(tp);
if (rtl_is_8168evl_up(tp)) {
- tmp = ARRAY_SIZE(cfg) - 1;
+ tmp--;
if (wolopts & WAKE_MAGIC)
rtl_eri_set_bits(tp, 0x0dc, ERIAR_MASK_0100,
MagicPacket_v2);
else
rtl_eri_clear_bits(tp, 0x0dc, ERIAR_MASK_0100,
MagicPacket_v2);
- } else {
- tmp = ARRAY_SIZE(cfg);
+ } else if (rtl_is_8125(tp)) {
+ tmp--;
+ if (wolopts & WAKE_MAGIC)
+ r8168_mac_ocp_modify(tp, 0xc0b6, 0, BIT(0));
+ else
+ r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0);
}
for (i = 0; i < tmp; i++) {
@@ -1414,18 +1489,22 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
}
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
options = RTL_R8(tp, Config1) & ~PMEnable;
if (wolopts)
options |= PMEnable;
RTL_W8(tp, Config1, options);
break;
- default:
+ case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_51:
options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
if (wolopts)
options |= PME_SIGNAL;
RTL_W8(tp, Config2, options);
break;
+ default:
+ break;
}
rtl_lock_config_regs(tp);
@@ -1505,6 +1584,13 @@ static int rtl8169_set_features(struct net_device *dev,
else
rx_config &= ~(AcceptErr | AcceptRunt);
+ if (rtl_is_8125(tp)) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ rx_config |= RX_VLAN_8125;
+ else
+ rx_config &= ~RX_VLAN_8125;
+ }
+
RTL_W32(tp, RxConfig, rx_config);
if (features & NETIF_F_RXCSUM)
@@ -1512,10 +1598,12 @@ static int rtl8169_set_features(struct net_device *dev,
else
tp->cp_cmd &= ~RxChkSum;
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- tp->cp_cmd |= RxVlan;
- else
- tp->cp_cmd &= ~RxVlan;
+ if (!rtl_is_8125(tp)) {
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ tp->cp_cmd |= RxVlan;
+ else
+ tp->cp_cmd &= ~RxVlan;
+ }
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
RTL_R16(tp, CPlusCmd);
@@ -1814,6 +1902,9 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
int i;
u16 w;
+ if (rtl_is_8125(tp))
+ return -EOPNOTSUPP;
+
memset(ec, 0, sizeof(*ec));
/* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */
@@ -1882,6 +1973,9 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
u16 w = 0, cp01;
int i;
+ if (rtl_is_8125(tp))
+ return -EOPNOTSUPP;
+
scale = rtl_coalesce_choose_scale(dev,
max(p[0].usecs, p[1].usecs) * 1000, &cp01);
if (IS_ERR(scale))
@@ -1929,144 +2023,40 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
return 0;
}
-static int rtl_get_eee_supp(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int ret;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_38:
- ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
- break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- ret = phy_read_paged(phydev, 0x0a5c, 0x12);
- break;
- default:
- ret = -EPROTONOSUPPORT;
- break;
- }
-
- return ret;
-}
-
-static int rtl_get_eee_lpadv(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int ret;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_38:
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
- break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- ret = phy_read_paged(phydev, 0x0a5d, 0x11);
- break;
- default:
- ret = -EPROTONOSUPPORT;
- break;
- }
-
- return ret;
-}
-
-static int rtl_get_eee_adv(struct rtl8169_private *tp)
-{
- struct phy_device *phydev = tp->phydev;
- int ret;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_38:
- ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
- break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- ret = phy_read_paged(phydev, 0x0a5d, 0x10);
- break;
- default:
- ret = -EPROTONOSUPPORT;
- break;
- }
-
- return ret;
-}
-
-static int rtl_set_eee_adv(struct rtl8169_private *tp, int val)
-{
- struct phy_device *phydev = tp->phydev;
- int ret = 0;
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_38:
- ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
- break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- phy_write_paged(phydev, 0x0a5d, 0x10, val);
- break;
- default:
- ret = -EPROTONOSUPPORT;
- break;
- }
-
- return ret;
-}
-
static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct device *d = tp_to_dev(tp);
int ret;
+ if (!rtl_supports_eee(tp))
+ return -EOPNOTSUPP;
+
pm_runtime_get_noresume(d);
if (!pm_runtime_active(d)) {
ret = -EOPNOTSUPP;
- goto out;
+ } else {
+ ret = phy_ethtool_get_eee(tp->phydev, data);
}
- /* Get Supported EEE */
- ret = rtl_get_eee_supp(tp);
- if (ret < 0)
- goto out;
- data->supported = mmd_eee_cap_to_ethtool_sup_t(ret);
-
- /* Get advertisement EEE */
- ret = rtl_get_eee_adv(tp);
- if (ret < 0)
- goto out;
- data->advertised = mmd_eee_adv_to_ethtool_adv_t(ret);
- data->eee_enabled = !!data->advertised;
-
- /* Get LP advertisement EEE */
- ret = rtl_get_eee_lpadv(tp);
- if (ret < 0)
- goto out;
- data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(ret);
- data->eee_active = !!(data->advertised & data->lp_advertised);
-out:
pm_runtime_put_noidle(d);
- return ret < 0 ? ret : 0;
+
+ return ret;
}
static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct device *d = tp_to_dev(tp);
- int old_adv, adv = 0, cap, ret;
+ int ret;
+
+ if (!rtl_supports_eee(tp))
+ return -EOPNOTSUPP;
pm_runtime_get_noresume(d);
- if (!dev->phydev || !pm_runtime_active(d)) {
+ if (!pm_runtime_active(d)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -2077,38 +2067,10 @@ static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
goto out;
}
- /* Get Supported EEE */
- ret = rtl_get_eee_supp(tp);
- if (ret < 0)
- goto out;
- cap = ret;
-
- ret = rtl_get_eee_adv(tp);
- if (ret < 0)
- goto out;
- old_adv = ret;
-
- if (data->eee_enabled) {
- adv = !data->advertised ? cap :
- ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
- /* Mask prohibited EEE modes */
- adv &= ~dev->phydev->eee_broken_modes;
- }
-
- if (old_adv != adv) {
- ret = rtl_set_eee_adv(tp, adv);
- if (ret < 0)
- goto out;
-
- /* Restart autonegotiation so the new modes get sent to the
- * link partner.
- */
- ret = phy_restart_aneg(dev->phydev);
- }
-
+ ret = phy_ethtool_set_eee(tp->phydev, data);
out:
pm_runtime_put_noidle(d);
- return ret < 0 ? ret : 0;
+ return ret;
}
static const struct ethtool_ops rtl8169_ethtool_ops = {
@@ -2135,10 +2097,11 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
static void rtl_enable_eee(struct rtl8169_private *tp)
{
- int supported = rtl_get_eee_supp(tp);
+ struct phy_device *phydev = tp->phydev;
+ int supported = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
if (supported > 0)
- rtl_set_eee_adv(tp, supported);
+ phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, supported);
}
static void rtl8169_get_mac_version(struct rtl8169_private *tp)
@@ -2159,6 +2122,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp)
u16 val;
u16 mac_version;
} mac_info[] = {
+ /* 8125 family. */
+ { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
+ { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
+
/* 8168EP family. */
{ 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
{ 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
@@ -2304,6 +2271,12 @@ static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_1111, 0x0003);
}
+static void rtl8125_config_eee_mac(struct rtl8169_private *tp)
+{
+ r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
+ r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
+}
+
static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
{
struct phy_device *phydev = tp->phydev;
@@ -2324,6 +2297,26 @@ static void rtl8168g_config_eee_phy(struct rtl8169_private *tp)
phy_modify_paged(tp->phydev, 0x0a43, 0x11, 0, BIT(4));
}
+static void rtl8168h_config_eee_phy(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+
+ rtl8168g_config_eee_phy(tp);
+
+ phy_modify_paged(phydev, 0xa4a, 0x11, 0x0000, 0x0200);
+ phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080);
+}
+
+static void rtl8125_config_eee_phy(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+
+ rtl8168h_config_eee_phy(tp);
+
+ phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000);
+ phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000);
+}
+
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3391,7 +3384,7 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp)
phy_modify_paged(tp->phydev, 0x0a44, 0x11, BIT(7), 0);
rtl8168g_disable_aldps(tp);
- rtl8168g_config_eee_phy(tp);
+ rtl8168h_config_eee_phy(tp);
rtl_enable_eee(tp);
}
@@ -3644,6 +3637,134 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
}
+static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+
+ phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084);
+ phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
+ phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x0006);
+ phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
+ phy_modify_paged(phydev, 0xac0, 0x14, 0x0000, 0x1100);
+ phy_modify_paged(phydev, 0xac8, 0x15, 0xf000, 0x7000);
+ phy_modify_paged(phydev, 0xad1, 0x14, 0x0000, 0x0400);
+ phy_modify_paged(phydev, 0xad1, 0x15, 0x0000, 0x03ff);
+ phy_modify_paged(phydev, 0xad1, 0x16, 0x0000, 0x03ff);
+
+ phy_write(phydev, 0x1f, 0x0a43);
+ phy_write(phydev, 0x13, 0x80ea);
+ phy_modify(phydev, 0x14, 0xff00, 0xc400);
+ phy_write(phydev, 0x13, 0x80eb);
+ phy_modify(phydev, 0x14, 0x0700, 0x0300);
+ phy_write(phydev, 0x13, 0x80f8);
+ phy_modify(phydev, 0x14, 0xff00, 0x1c00);
+ phy_write(phydev, 0x13, 0x80f1);
+ phy_modify(phydev, 0x14, 0xff00, 0x3000);
+ phy_write(phydev, 0x13, 0x80fe);
+ phy_modify(phydev, 0x14, 0xff00, 0xa500);
+ phy_write(phydev, 0x13, 0x8102);
+ phy_modify(phydev, 0x14, 0xff00, 0x5000);
+ phy_write(phydev, 0x13, 0x8105);
+ phy_modify(phydev, 0x14, 0xff00, 0x3300);
+ phy_write(phydev, 0x13, 0x8100);
+ phy_modify(phydev, 0x14, 0xff00, 0x7000);
+ phy_write(phydev, 0x13, 0x8104);
+ phy_modify(phydev, 0x14, 0xff00, 0xf000);
+ phy_write(phydev, 0x13, 0x8106);
+ phy_modify(phydev, 0x14, 0xff00, 0x6500);
+ phy_write(phydev, 0x13, 0x80dc);
+ phy_modify(phydev, 0x14, 0xff00, 0xed00);
+ phy_write(phydev, 0x13, 0x80df);
+ phy_set_bits(phydev, 0x14, BIT(8));
+ phy_write(phydev, 0x13, 0x80e1);
+ phy_clear_bits(phydev, 0x14, BIT(8));
+ phy_write(phydev, 0x1f, 0x0000);
+
+ phy_modify_paged(phydev, 0xbf0, 0x13, 0x003f, 0x0038);
+ phy_write_paged(phydev, 0xa43, 0x13, 0x819f);
+ phy_write_paged(phydev, 0xa43, 0x14, 0xd0b6);
+
+ phy_write_paged(phydev, 0xbc3, 0x12, 0x5555);
+ phy_modify_paged(phydev, 0xbf0, 0x15, 0x0e00, 0x0a00);
+ phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000);
+ phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
+
+ rtl8125_config_eee_phy(tp);
+ rtl_enable_eee(tp);
+}
+
+static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp)
+{
+ struct phy_device *phydev = tp->phydev;
+ int i;
+
+ phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
+ phy_modify_paged(phydev, 0xad1, 0x13, 0x03ff, 0x03ff);
+ phy_modify_paged(phydev, 0xad3, 0x11, 0x003f, 0x0006);
+ phy_modify_paged(phydev, 0xac0, 0x14, 0x1100, 0x0000);
+ phy_modify_paged(phydev, 0xacc, 0x10, 0x0003, 0x0002);
+ phy_modify_paged(phydev, 0xad4, 0x10, 0x00e7, 0x0044);
+ phy_modify_paged(phydev, 0xac1, 0x12, 0x0080, 0x0000);
+ phy_modify_paged(phydev, 0xac8, 0x10, 0x0300, 0x0000);
+ phy_modify_paged(phydev, 0xac5, 0x17, 0x0007, 0x0002);
+ phy_write_paged(phydev, 0xad4, 0x16, 0x00a8);
+ phy_write_paged(phydev, 0xac5, 0x16, 0x01ff);
+ phy_modify_paged(phydev, 0xac8, 0x15, 0x00f0, 0x0030);
+
+ phy_write(phydev, 0x1f, 0x0b87);
+ phy_write(phydev, 0x16, 0x80a2);
+ phy_write(phydev, 0x17, 0x0153);
+ phy_write(phydev, 0x16, 0x809c);
+ phy_write(phydev, 0x17, 0x0153);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ phy_write(phydev, 0x1f, 0x0a43);
+ phy_write(phydev, 0x13, 0x81B3);
+ phy_write(phydev, 0x14, 0x0043);
+ phy_write(phydev, 0x14, 0x00A7);
+ phy_write(phydev, 0x14, 0x00D6);
+ phy_write(phydev, 0x14, 0x00EC);
+ phy_write(phydev, 0x14, 0x00F6);
+ phy_write(phydev, 0x14, 0x00FB);
+ phy_write(phydev, 0x14, 0x00FD);
+ phy_write(phydev, 0x14, 0x00FF);
+ phy_write(phydev, 0x14, 0x00BB);
+ phy_write(phydev, 0x14, 0x0058);
+ phy_write(phydev, 0x14, 0x0029);
+ phy_write(phydev, 0x14, 0x0013);
+ phy_write(phydev, 0x14, 0x0009);
+ phy_write(phydev, 0x14, 0x0004);
+ phy_write(phydev, 0x14, 0x0002);
+ for (i = 0; i < 25; i++)
+ phy_write(phydev, 0x14, 0x0000);
+
+ phy_write(phydev, 0x13, 0x8257);
+ phy_write(phydev, 0x14, 0x020F);
+
+ phy_write(phydev, 0x13, 0x80EA);
+ phy_write(phydev, 0x14, 0x7843);
+ phy_write(phydev, 0x1f, 0x0000);
+
+ rtl_apply_firmware(tp);
+
+ phy_modify_paged(phydev, 0xd06, 0x14, 0x0000, 0x2000);
+
+ phy_write(phydev, 0x1f, 0x0a43);
+ phy_write(phydev, 0x13, 0x81a2);
+ phy_set_bits(phydev, 0x14, BIT(8));
+ phy_write(phydev, 0x1f, 0x0000);
+
+ phy_modify_paged(phydev, 0xb54, 0x16, 0xff00, 0xdb00);
+ phy_modify_paged(phydev, 0xa45, 0x12, 0x0001, 0x0000);
+ phy_modify_paged(phydev, 0xa5d, 0x12, 0x0000, 0x0020);
+ phy_modify_paged(phydev, 0xad4, 0x17, 0x0010, 0x0000);
+ phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
+ phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
+
+ rtl8125_config_eee_phy(tp);
+ rtl_enable_eee(tp);
+}
+
static void rtl_hw_phy_config(struct net_device *dev)
{
static const rtl_generic_fct phy_configs[] = {
@@ -3699,6 +3820,8 @@ static void rtl_hw_phy_config(struct net_device *dev)
[RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
[RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_60] = rtl8125_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_61] = rtl8125_2_hw_phy_config,
};
struct rtl8169_private *tp = netdev_priv(dev);
@@ -3826,6 +3949,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_60:
+ case RTL_GIGA_MAC_VER_61:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
break;
case RTL_GIGA_MAC_VER_40:
@@ -3855,6 +3980,8 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_48:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
+ case RTL_GIGA_MAC_VER_60:
+ case RTL_GIGA_MAC_VER_61:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
break;
case RTL_GIGA_MAC_VER_40:
@@ -3887,6 +4014,10 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
+ case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
+ RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_VLAN_8125 |
+ RX_DMA_BURST);
+ break;
default:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
break;
@@ -4146,54 +4277,46 @@ static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_versi
static void rtl_set_rx_mode(struct net_device *dev)
{
+ u32 rx_mode = AcceptBroadcast | AcceptMyPhys | AcceptMulticast;
+ /* Multicast hash filter */
+ u32 mc_filter[2] = { 0xffffffff, 0xffffffff };
struct rtl8169_private *tp = netdev_priv(dev);
- u32 mc_filter[2]; /* Multicast hash filter */
- int rx_mode;
- u32 tmp = 0;
+ u32 tmp;
if (dev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
- rx_mode =
- AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
- AcceptAllPhys;
- mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
- (dev->flags & IFF_ALLMULTI)) {
- /* Too many to filter perfectly -- accept all multicasts. */
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
- mc_filter[1] = mc_filter[0] = 0xffffffff;
+ rx_mode |= AcceptAllPhys;
+ } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
+ dev->flags & IFF_ALLMULTI ||
+ tp->mac_version == RTL_GIGA_MAC_VER_35) {
+ /* accept all multicasts */
+ } else if (netdev_mc_empty(dev)) {
+ rx_mode &= ~AcceptMulticast;
} else {
struct netdev_hw_addr *ha;
- rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
netdev_for_each_mc_addr(ha, dev) {
- int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- rx_mode |= AcceptMulticast;
+ u32 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ mc_filter[bit_nr >> 5] |= BIT(bit_nr & 31);
+ }
+
+ if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
+ tmp = mc_filter[0];
+ mc_filter[0] = swab32(mc_filter[1]);
+ mc_filter[1] = swab32(tmp);
}
}
if (dev->features & NETIF_F_RXALL)
rx_mode |= (AcceptErr | AcceptRunt);
- tmp = (RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
-
- if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
- u32 data = mc_filter[0];
-
- mc_filter[0] = swab32(mc_filter[1]);
- mc_filter[1] = swab32(data);
- }
-
- if (tp->mac_version == RTL_GIGA_MAC_VER_35)
- mc_filter[1] = mc_filter[0] = 0xffffffff;
-
RTL_W32(tp, MAR0 + 4, mc_filter[1]);
RTL_W32(tp, MAR0 + 0, mc_filter[0]);
- RTL_W32(tp, RxConfig, tmp);
+ tmp = RTL_R32(tp, RxConfig);
+ RTL_W32(tp, RxConfig, (tmp & ~RX_CONFIG_ACCEPT_MASK) | rx_mode);
}
DECLARE_RTL_COND(rtl_csiar_cond)
@@ -4407,7 +4530,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8168c_2[] = {
{ 0x01, 0, 0x0001 },
- { 0x03, 0x0400, 0x0220 }
+ { 0x03, 0x0400, 0x0020 }
};
rtl_set_def_aspm_entry_latency(tp);
@@ -4454,7 +4577,8 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
static const struct ephy_info e_info_8168d_4[] = {
{ 0x0b, 0x0000, 0x0048 },
{ 0x19, 0x0020, 0x0050 },
- { 0x0c, 0x0100, 0x0020 }
+ { 0x0c, 0x0100, 0x0020 },
+ { 0x10, 0x0004, 0x0000 },
};
rtl_set_def_aspm_entry_latency(tp);
@@ -4504,7 +4628,9 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8168e_2[] = {
{ 0x09, 0x0000, 0x0080 },
- { 0x19, 0x0000, 0x0224 }
+ { 0x19, 0x0000, 0x0224 },
+ { 0x00, 0x0000, 0x0004 },
+ { 0x0c, 0x3df0, 0x0200 },
};
rtl_set_def_aspm_entry_latency(tp);
@@ -4566,7 +4692,9 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
{ 0x06, 0x00c0, 0x0020 },
{ 0x08, 0x0001, 0x0002 },
{ 0x09, 0x0000, 0x0080 },
- { 0x19, 0x0000, 0x0224 }
+ { 0x19, 0x0000, 0x0224 },
+ { 0x00, 0x0000, 0x0004 },
+ { 0x0c, 0x3df0, 0x0200 },
};
rtl_hw_start_8168f(tp);
@@ -4581,8 +4709,9 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
static const struct ephy_info e_info_8168f_1[] = {
{ 0x06, 0x00c0, 0x0020 },
{ 0x0f, 0xffff, 0x5200 },
- { 0x1e, 0x0000, 0x4000 },
- { 0x19, 0x0000, 0x0224 }
+ { 0x19, 0x0000, 0x0224 },
+ { 0x00, 0x0000, 0x0004 },
+ { 0x0c, 0x3df0, 0x0200 },
};
rtl_hw_start_8168f(tp);
@@ -4621,8 +4750,8 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8168g_1[] = {
- { 0x00, 0x0000, 0x0008 },
- { 0x0c, 0x37d0, 0x0820 },
+ { 0x00, 0x0008, 0x0000 },
+ { 0x0c, 0x3ff0, 0x0820 },
{ 0x1e, 0x0000, 0x0001 },
{ 0x19, 0x8000, 0x0000 }
};
@@ -4638,10 +4767,15 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8168g_2[] = {
- { 0x00, 0x0000, 0x0008 },
- { 0x0c, 0x3df0, 0x0200 },
- { 0x19, 0xffff, 0xfc00 },
- { 0x1e, 0xffff, 0x20eb }
+ { 0x00, 0x0008, 0x0000 },
+ { 0x0c, 0x3ff0, 0x0820 },
+ { 0x19, 0xffff, 0x7c00 },
+ { 0x1e, 0xffff, 0x20eb },
+ { 0x0d, 0xffff, 0x1666 },
+ { 0x00, 0xffff, 0x10a3 },
+ { 0x06, 0xffff, 0xf050 },
+ { 0x04, 0x0000, 0x0010 },
+ { 0x1d, 0x4000, 0x0000 },
};
rtl_hw_start_8168g(tp);
@@ -4655,11 +4789,16 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8411_2[] = {
- { 0x00, 0x0000, 0x0008 },
- { 0x0c, 0x3df0, 0x0200 },
- { 0x0f, 0xffff, 0x5200 },
- { 0x19, 0x0020, 0x0000 },
- { 0x1e, 0x0000, 0x2000 }
+ { 0x00, 0x0008, 0x0000 },
+ { 0x0c, 0x37d0, 0x0820 },
+ { 0x1e, 0x0000, 0x0001 },
+ { 0x19, 0x8021, 0x0000 },
+ { 0x1e, 0x0000, 0x2000 },
+ { 0x0d, 0x0100, 0x0200 },
+ { 0x00, 0x0000, 0x0080 },
+ { 0x06, 0x0000, 0x0010 },
+ { 0x04, 0x0000, 0x0010 },
+ { 0x1d, 0x0000, 0x4000 },
};
rtl_hw_start_8168g(tp);
@@ -4809,16 +4948,15 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
{
- int rg_saw_cnt;
- u32 data;
static const struct ephy_info e_info_8168h_1[] = {
{ 0x1e, 0x0800, 0x0001 },
{ 0x1d, 0x0000, 0x0800 },
{ 0x05, 0xffff, 0x2089 },
{ 0x06, 0xffff, 0x5881 },
- { 0x04, 0xffff, 0x154a },
+ { 0x04, 0xffff, 0x854a },
{ 0x01, 0xffff, 0x068b }
};
+ int rg_saw_cnt;
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
@@ -4863,31 +5001,13 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
sw_cnt_1ms_ini &= 0x0fff;
- data = r8168_mac_ocp_read(tp, 0xd412);
- data &= ~0x0fff;
- data |= sw_cnt_1ms_ini;
- r8168_mac_ocp_write(tp, 0xd412, data);
+ r8168_mac_ocp_modify(tp, 0xd412, 0x0fff, sw_cnt_1ms_ini);
}
- data = r8168_mac_ocp_read(tp, 0xe056);
- data &= ~0xf0;
- data |= 0x70;
- r8168_mac_ocp_write(tp, 0xe056, data);
-
- data = r8168_mac_ocp_read(tp, 0xe052);
- data &= ~0x6000;
- data |= 0x8008;
- r8168_mac_ocp_write(tp, 0xe052, data);
-
- data = r8168_mac_ocp_read(tp, 0xe0d6);
- data &= ~0x01ff;
- data |= 0x017f;
- r8168_mac_ocp_write(tp, 0xe0d6, data);
-
- data = r8168_mac_ocp_read(tp, 0xd420);
- data &= ~0x0fff;
- data |= 0x047f;
- r8168_mac_ocp_write(tp, 0xd420, data);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0070);
+ r8168_mac_ocp_modify(tp, 0xe052, 0x6000, 0x8008);
+ r8168_mac_ocp_modify(tp, 0xe0d6, 0x01ff, 0x017f);
+ r8168_mac_ocp_modify(tp, 0xd420, 0x0fff, 0x047f);
r8168_mac_ocp_write(tp, 0xe63e, 0x0001);
r8168_mac_ocp_write(tp, 0xe63e, 0x0000);
@@ -4969,12 +5089,11 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
{
- u32 data;
static const struct ephy_info e_info_8168ep_3[] = {
- { 0x00, 0xffff, 0x10a3 },
- { 0x19, 0xffff, 0x7c00 },
- { 0x1e, 0xffff, 0x20eb },
- { 0x0d, 0xffff, 0x1666 }
+ { 0x00, 0x0000, 0x0080 },
+ { 0x0d, 0x0100, 0x0200 },
+ { 0x19, 0x8021, 0x0000 },
+ { 0x1e, 0x0000, 0x2000 },
};
/* disable aspm and clock request before access ephy */
@@ -4986,18 +5105,9 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
- data = r8168_mac_ocp_read(tp, 0xd3e2);
- data &= 0xf000;
- data |= 0x0271;
- r8168_mac_ocp_write(tp, 0xd3e2, data);
-
- data = r8168_mac_ocp_read(tp, 0xd3e4);
- data &= 0xff00;
- r8168_mac_ocp_write(tp, 0xd3e4, data);
-
- data = r8168_mac_ocp_read(tp, 0xe860);
- data |= 0x0080;
- r8168_mac_ocp_write(tp, 0xe860, data);
+ r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x0271);
+ r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000);
+ r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080);
rtl_hw_aspm_clkreq_enable(tp, true);
}
@@ -5125,6 +5235,128 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, true);
}
+DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
+{
+ return r8168_mac_ocp_read(tp, 0xe00e) & BIT(13);
+}
+
+static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
+{
+ rtl_pcie_state_l2l3_disable(tp);
+
+ RTL_W16(tp, 0x382, 0x221b);
+ RTL_W8(tp, 0x4500, 0);
+ RTL_W16(tp, 0x4800, 0);
+
+ /* disable UPS */
+ r8168_mac_ocp_modify(tp, 0xd40a, 0x0010, 0x0000);
+
+ RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10);
+
+ r8168_mac_ocp_write(tp, 0xc140, 0xffff);
+ r8168_mac_ocp_write(tp, 0xc142, 0xffff);
+
+ r8168_mac_ocp_modify(tp, 0xd3e2, 0x0fff, 0x03a9);
+ r8168_mac_ocp_modify(tp, 0xd3e4, 0x00ff, 0x0000);
+ r8168_mac_ocp_modify(tp, 0xe860, 0x0000, 0x0080);
+
+ /* disable new tx descriptor format */
+ r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
+
+ r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
+ r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020);
+ r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c);
+ r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033);
+ r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040);
+ r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
+ r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
+ r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403);
+ r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0067);
+ r8168_mac_ocp_modify(tp, 0xc0ac, 0x0080, 0x1f00);
+ r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f);
+ r8168_mac_ocp_modify(tp, 0xe84c, 0x0000, 0x00c0);
+ r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
+ r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001);
+ udelay(1);
+ r8168_mac_ocp_modify(tp, 0xeb54, 0x0001, 0x0000);
+ RTL_W16(tp, 0x1880, RTL_R16(tp, 0x1880) & ~0x0030);
+
+ r8168_mac_ocp_write(tp, 0xe098, 0xc302);
+
+ rtl_udelay_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
+
+ rtl8125_config_eee_mac(tp);
+
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ udelay(10);
+}
+
+static void rtl_hw_start_8125_1(struct rtl8169_private *tp)
+{
+ static const struct ephy_info e_info_8125_1[] = {
+ { 0x01, 0xffff, 0xa812 },
+ { 0x09, 0xffff, 0x520c },
+ { 0x04, 0xffff, 0xd000 },
+ { 0x0d, 0xffff, 0xf702 },
+ { 0x0a, 0xffff, 0x8653 },
+ { 0x06, 0xffff, 0x001e },
+ { 0x08, 0xffff, 0x3595 },
+ { 0x20, 0xffff, 0x9455 },
+ { 0x21, 0xffff, 0x99ff },
+ { 0x02, 0xffff, 0x6046 },
+ { 0x29, 0xffff, 0xfe00 },
+ { 0x23, 0xffff, 0xab62 },
+
+ { 0x41, 0xffff, 0xa80c },
+ { 0x49, 0xffff, 0x520c },
+ { 0x44, 0xffff, 0xd000 },
+ { 0x4d, 0xffff, 0xf702 },
+ { 0x4a, 0xffff, 0x8653 },
+ { 0x46, 0xffff, 0x001e },
+ { 0x48, 0xffff, 0x3595 },
+ { 0x60, 0xffff, 0x9455 },
+ { 0x61, 0xffff, 0x99ff },
+ { 0x42, 0xffff, 0x6046 },
+ { 0x69, 0xffff, 0xfe00 },
+ { 0x63, 0xffff, 0xab62 },
+ };
+
+ rtl_set_def_aspm_entry_latency(tp);
+
+ /* disable aspm and clock request before access ephy */
+ rtl_hw_aspm_clkreq_enable(tp, false);
+ rtl_ephy_init(tp, e_info_8125_1);
+
+ rtl_hw_start_8125_common(tp);
+}
+
+static void rtl_hw_start_8125_2(struct rtl8169_private *tp)
+{
+ static const struct ephy_info e_info_8125_2[] = {
+ { 0x04, 0xffff, 0xd000 },
+ { 0x0a, 0xffff, 0x8653 },
+ { 0x23, 0xffff, 0xab66 },
+ { 0x20, 0xffff, 0x9455 },
+ { 0x21, 0xffff, 0x99ff },
+ { 0x29, 0xffff, 0xfe04 },
+
+ { 0x44, 0xffff, 0xd000 },
+ { 0x4a, 0xffff, 0x8653 },
+ { 0x63, 0xffff, 0xab66 },
+ { 0x60, 0xffff, 0x9455 },
+ { 0x61, 0xffff, 0x99ff },
+ { 0x69, 0xffff, 0xfe04 },
+ };
+
+ rtl_set_def_aspm_entry_latency(tp);
+
+ /* disable aspm and clock request before access ephy */
+ rtl_hw_aspm_clkreq_enable(tp, false);
+ rtl_ephy_init(tp, e_info_8125_2);
+
+ rtl_hw_start_8125_common(tp);
+}
+
static void rtl_hw_config(struct rtl8169_private *tp)
{
static const rtl_generic_fct hw_configs[] = {
@@ -5173,12 +5405,25 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1,
[RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
+ [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125_1,
+ [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125_2,
};
if (hw_configs[tp->mac_version])
hw_configs[tp->mac_version](tp);
}
+static void rtl_hw_start_8125(struct rtl8169_private *tp)
+{
+ int i;
+
+ /* disable interrupt coalescing */
+ for (i = 0xa00; i < 0xb00; i += 4)
+ RTL_W32(tp, i, 0);
+
+ rtl_hw_config(tp);
+}
+
static void rtl_hw_start_8168(struct rtl8169_private *tp)
{
if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
@@ -5192,6 +5437,9 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
rtl_hw_config(tp);
+
+ /* disable interrupt coalescing */
+ RTL_W16(tp, IntrMitigate, 0x0000);
}
static void rtl_hw_start_8169(struct rtl8169_private *tp)
@@ -5215,6 +5463,9 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
rtl8169_set_magic_reg(tp, tp->mac_version);
RTL_W32(tp, RxMissed, 0);
+
+ /* disable interrupt coalescing */
+ RTL_W16(tp, IntrMitigate, 0x0000);
}
static void rtl_hw_start(struct rtl8169_private *tp)
@@ -5226,6 +5477,8 @@ static void rtl_hw_start(struct rtl8169_private *tp)
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
rtl_hw_start_8169(tp);
+ else if (rtl_is_8125(tp))
+ rtl_hw_start_8125(tp);
else
rtl_hw_start_8168(tp);
@@ -5233,17 +5486,12 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_tx_desc_registers(tp);
rtl_lock_config_regs(tp);
- /* disable interrupt coalescing */
- RTL_W16(tp, IntrMitigate, 0x0000);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
- RTL_R8(tp, IntrMask);
+ RTL_R16(tp, CPlusCmd);
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_init_rxcfg(tp);
rtl_set_tx_config_registers(tp);
-
rtl_set_rx_mode(tp->dev);
- /* no early-rx interrupts */
- RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
rtl_irq_enable(tp);
}
@@ -5268,17 +5516,6 @@ static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
}
-static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
- void **data_buff, struct RxDesc *desc)
-{
- dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr),
- R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
-
- kfree(*data_buff);
- *data_buff = NULL;
- rtl8169_make_unusable_by_asic(desc);
-}
-
static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
{
u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
@@ -5289,49 +5526,43 @@ static inline void rtl8169_mark_to_asic(struct RxDesc *desc)
desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE);
}
-static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
- struct RxDesc *desc)
+static struct page *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
+ struct RxDesc *desc)
{
- void *data;
- dma_addr_t mapping;
struct device *d = tp_to_dev(tp);
int node = dev_to_node(d);
+ dma_addr_t mapping;
+ struct page *data;
- data = kmalloc_node(R8169_RX_BUF_SIZE, GFP_KERNEL, node);
+ data = alloc_pages_node(node, GFP_KERNEL, get_order(R8169_RX_BUF_SIZE));
if (!data)
return NULL;
- /* Memory should be properly aligned, but better check. */
- if (!IS_ALIGNED((unsigned long)data, 8)) {
- netdev_err_once(tp->dev, "RX buffer not 8-byte-aligned\n");
- goto err_out;
- }
-
- mapping = dma_map_single(d, data, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(d, mapping))) {
if (net_ratelimit())
netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
- goto err_out;
+ __free_pages(data, get_order(R8169_RX_BUF_SIZE));
+ return NULL;
}
desc->addr = cpu_to_le64(mapping);
rtl8169_mark_to_asic(desc);
- return data;
-err_out:
- kfree(data);
- return NULL;
+ return data;
}
static void rtl8169_rx_clear(struct rtl8169_private *tp)
{
unsigned int i;
- for (i = 0; i < NUM_RX_DESC; i++) {
- if (tp->Rx_databuff[i]) {
- rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
- tp->RxDescArray + i);
- }
+ for (i = 0; i < NUM_RX_DESC && tp->Rx_databuff[i]; i++) {
+ dma_unmap_page(tp_to_dev(tp),
+ le64_to_cpu(tp->RxDescArray[i].addr),
+ R8169_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ __free_pages(tp->Rx_databuff[i], get_order(R8169_RX_BUF_SIZE));
+ tp->Rx_databuff[i] = NULL;
+ rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
}
}
@@ -5345,7 +5576,7 @@ static int rtl8169_rx_fill(struct rtl8169_private *tp)
unsigned int i;
for (i = 0; i < NUM_RX_DESC; i++) {
- void *data;
+ struct page *data;
data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
if (!data) {
@@ -5507,44 +5738,6 @@ static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
}
-static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-/* r8169_csum_workaround()
- * The hw limites the value the transport offset. When the offset is out of the
- * range, calculate the checksum by sw.
- */
-static void r8169_csum_workaround(struct rtl8169_private *tp,
- struct sk_buff *skb)
-{
- if (skb_is_gso(skb)) {
- netdev_features_t features = tp->dev->features;
- struct sk_buff *segs, *nskb;
-
- features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
- segs = skb_gso_segment(skb, features);
- if (IS_ERR(segs) || !segs)
- goto drop;
-
- do {
- nskb = segs;
- segs = segs->next;
- nskb->next = NULL;
- rtl8169_start_xmit(nskb, tp->dev);
- } while (segs);
-
- dev_consume_skb_any(skb);
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (skb_checksum_help(skb) < 0)
- goto drop;
-
- rtl8169_start_xmit(skb, tp->dev);
- } else {
-drop:
- tp->dev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
- }
-}
-
/* msdn_giant_send_check()
* According to the document of microsoft, the TCP Pseudo Header excludes the
* packet length for IPv6 TCP large packets.
@@ -5594,13 +5787,6 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
u32 mss = skb_shinfo(skb)->gso_size;
if (mss) {
- if (transport_offset > GTTCPHO_MAX) {
- netif_warn(tp, tx_err, tp->dev,
- "Invalid transport offset 0x%x for TSO\n",
- transport_offset);
- return false;
- }
-
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
opts[0] |= TD1_GTSENV4;
@@ -5623,16 +5809,6 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ip_protocol;
- if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
- return !(skb_checksum_help(skb) || eth_skb_pad(skb));
-
- if (transport_offset > TCPHO_MAX) {
- netif_warn(tp, tx_err, tp->dev,
- "Invalid transport offset 0x%x\n",
- transport_offset);
- return false;
- }
-
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
opts[1] |= TD1_IPv4_CS;
@@ -5686,6 +5862,14 @@ static bool rtl_chip_supports_csum_v2(struct rtl8169_private *tp)
}
}
+static void rtl8169_doorbell(struct rtl8169_private *tp)
+{
+ if (rtl_is_8125(tp))
+ RTL_W16(tp, TxPoll_8125, BIT(0));
+ else
+ RTL_W8(tp, TxPoll, NPQ);
+}
+
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -5695,6 +5879,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct device *d = tp_to_dev(tp);
dma_addr_t mapping;
u32 opts[2], len;
+ bool stop_queue;
+ bool door_bell;
int frags;
if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
@@ -5709,10 +5895,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
opts[0] = DescOwn;
if (rtl_chip_supports_csum_v2(tp)) {
- if (!rtl8169_tso_csum_v2(tp, skb, opts)) {
- r8169_csum_workaround(tp, skb);
- return NETDEV_TX_OK;
- }
+ if (!rtl8169_tso_csum_v2(tp, skb, opts))
+ goto err_dma_0;
} else {
rtl8169_tso_csum_v1(skb, opts);
}
@@ -5740,13 +5924,13 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
- netdev_sent_queue(dev, skb->len);
-
skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */
dma_wmb();
+ door_bell = __netdev_sent_queue(dev, skb->len, netdev_xmit_more());
+
txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
/* Force all memory writes to complete before notifying device */
@@ -5754,14 +5938,20 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->cur_tx += frags + 1;
- RTL_W8(tp, TxPoll, NPQ);
-
- if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
+ stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
+ if (unlikely(stop_queue)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
smp_wmb();
netif_stop_queue(dev);
+ door_bell = true;
+ }
+
+ if (door_bell)
+ rtl8169_doorbell(tp);
+
+ if (unlikely(stop_queue)) {
/* Sync with rtl_tx:
* - publish queue status and cur_tx ring index (write barrier)
* - refresh dirty_tx ring index (read barrier).
@@ -5789,6 +5979,39 @@ err_stop_0:
return NETDEV_TX_BUSY;
}
+static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ int transport_offset = skb_transport_offset(skb);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ if (skb_is_gso(skb)) {
+ if (transport_offset > GTTCPHO_MAX &&
+ rtl_chip_supports_csum_v2(tp))
+ features &= ~NETIF_F_ALL_TSO;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->len < ETH_ZLEN) {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_34:
+ features &= ~NETIF_F_CSUM_MASK;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (transport_offset > TCPHO_MAX &&
+ rtl_chip_supports_csum_v2(tp))
+ features &= ~NETIF_F_CSUM_MASK;
+ }
+
+ return vlan_features_check(skb, features);
+}
+
static void rtl8169_pcierr_interrupt(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -5850,7 +6073,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
rtl8169_unmap_tx_skb(tp_to_dev(tp), tx_skb,
tp->TxDescArray + entry);
- if (status & LastFrag) {
+ if (tx_skb->skb) {
pkts_compl++;
bytes_compl += tx_skb->skb->len;
napi_consume_skb(tx_skb->skb, budget);
@@ -5888,7 +6111,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
* it is slow enough). -- FR
*/
if (tp->cur_tx != dirty_tx)
- RTL_W8(tp, TxPoll, NPQ);
+ rtl8169_doorbell(tp);
}
}
@@ -5908,24 +6131,6 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
skb_checksum_none_assert(skb);
}
-static struct sk_buff *rtl8169_try_rx_copy(void *data,
- struct rtl8169_private *tp,
- int pkt_size,
- dma_addr_t addr)
-{
- struct sk_buff *skb;
- struct device *d = tp_to_dev(tp);
-
- dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
- prefetch(data);
- skb = napi_alloc_skb(&tp->napi, pkt_size);
- if (skb)
- skb_copy_to_linear_data(skb, data, pkt_size);
- dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
-
- return skb;
-}
-
static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
{
unsigned int cur_rx, rx_left;
@@ -5935,6 +6140,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
unsigned int entry = cur_rx % NUM_RX_DESC;
+ const void *rx_buf = page_address(tp->Rx_databuff[entry]);
struct RxDesc *desc = tp->RxDescArray + entry;
u32 status;
@@ -5961,17 +6167,13 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
goto process_pkt;
}
} else {
+ unsigned int pkt_size;
struct sk_buff *skb;
- dma_addr_t addr;
- int pkt_size;
process_pkt:
- addr = le64_to_cpu(desc->addr);
+ pkt_size = status & GENMASK(13, 0);
if (likely(!(dev->features & NETIF_F_RXFCS)))
- pkt_size = (status & 0x00003fff) - 4;
- else
- pkt_size = status & 0x00003fff;
-
+ pkt_size -= ETH_FCS_LEN;
/*
* The driver does not support incoming fragmented
* frames. They are seen as a symptom of over-mtu
@@ -5983,15 +6185,25 @@ process_pkt:
goto release_descriptor;
}
- skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
- tp, pkt_size, addr);
- if (!skb) {
+ skb = napi_alloc_skb(&tp->napi, pkt_size);
+ if (unlikely(!skb)) {
dev->stats.rx_dropped++;
goto release_descriptor;
}
+ dma_sync_single_for_cpu(tp_to_dev(tp),
+ le64_to_cpu(desc->addr),
+ pkt_size, DMA_FROM_DEVICE);
+ prefetch(rx_buf);
+ skb_copy_to_linear_data(skb, rx_buf, pkt_size);
+ skb->tail += pkt_size;
+ skb->len = pkt_size;
+
+ dma_sync_single_for_device(tp_to_dev(tp),
+ le64_to_cpu(desc->addr),
+ pkt_size, DMA_FROM_DEVICE);
+
rtl8169_rx_csum(skb, status);
- skb_put(skb, pkt_size);
skb->protocol = eth_type_trans(skb, dev);
rtl8169_rx_vlan_tag(desc, skb);
@@ -6020,9 +6232,10 @@ release_descriptor:
static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
{
struct rtl8169_private *tp = dev_instance;
- u16 status = RTL_R16(tp, IntrStatus);
+ u32 status = rtl_get_events(tp);
- if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask))
+ if (!tp->irq_enabled || (status & 0xffff) == 0xffff ||
+ !(status & tp->irq_mask))
return IRQ_NONE;
if (unlikely(status & SYSErr)) {
@@ -6332,7 +6545,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->multicast = dev->stats.multicast;
/*
- * Fetch additonal counter values missing in stats collected by driver
+ * Fetch additional counter values missing in stats collected by driver
* from tally counters.
*/
if (pm_runtime_active(&pdev->dev))
@@ -6556,6 +6769,7 @@ static const struct net_device_ops rtl_netdev_ops = {
.ndo_stop = rtl8169_close,
.ndo_get_stats64 = rtl8169_get_stats64,
.ndo_start_xmit = rtl8169_start_xmit,
+ .ndo_features_check = rtl8169_features_check,
.ndo_tx_timeout = rtl8169_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = rtl8169_change_mtu,
@@ -6619,6 +6833,8 @@ static void rtl_read_mac_address(struct rtl8169_private *tp,
value = rtl_eri_read(tp, 0xe4);
mac_addr[4] = (value >> 0) & 0xff;
mac_addr[5] = (value >> 8) & 0xff;
+ } else if (rtl_is_8125(tp)) {
+ rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP);
}
}
@@ -6692,8 +6908,6 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
static void rtl_hw_init_8168g(struct rtl8169_private *tp)
{
- u32 data;
-
tp->ocp_base = OCP_STD_PHY_BASE;
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
@@ -6708,16 +6922,37 @@ static void rtl_hw_init_8168g(struct rtl8169_private *tp)
msleep(1);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
- data = r8168_mac_ocp_read(tp, 0xe8de);
- data &= ~(1 << 14);
- r8168_mac_ocp_write(tp, 0xe8de, data);
+ r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
return;
- data = r8168_mac_ocp_read(tp, 0xe8de);
- data |= (1 << 15);
- r8168_mac_ocp_write(tp, 0xe8de, data);
+ r8168_mac_ocp_modify(tp, 0xe8de, 0, BIT(15));
+
+ rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
+}
+
+static void rtl_hw_init_8125(struct rtl8169_private *tp)
+{
+ tp->ocp_base = OCP_STD_PHY_BASE;
+
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
+ return;
+
+ RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
+ msleep(1);
+ RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
+
+ r8168_mac_ocp_modify(tp, 0xe8de, BIT(14), 0);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
+ return;
+
+ r8168_mac_ocp_write(tp, 0xc0aa, 0x07d0);
+ r8168_mac_ocp_write(tp, 0xc0a6, 0x0150);
+ r8168_mac_ocp_write(tp, 0xc01e, 0x5555);
rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42);
}
@@ -6731,6 +6966,9 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
rtl_hw_init_8168g(tp);
break;
+ case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
+ rtl_hw_init_8125(tp);
+ break;
default:
break;
}
@@ -6794,7 +7032,7 @@ static void rtl_init_mac_address(struct rtl8169_private *tp)
{
struct net_device *dev = tp->dev;
u8 *mac_addr = dev->dev_addr;
- int rc, i;
+ int rc;
rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr);
if (!rc)
@@ -6804,8 +7042,7 @@ static void rtl_init_mac_address(struct rtl8169_private *tp)
if (is_valid_ether_addr(mac_addr))
goto done;
- for (i = 0; i < ETH_ALEN; i++)
- mac_addr[i] = RTL_R8(tp, MAC0 + i);
+ rtl_read_mac_from_reg(tp, mac_addr, MAC0);
if (is_valid_ether_addr(mac_addr))
goto done;
@@ -6917,11 +7154,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
- /* don't enable SG, IP_CSUM and TSO by default - it might not work
- * properly for all devices */
- dev->features |= NETIF_F_RXCSUM |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
-
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
@@ -6929,8 +7164,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HIGHDMA;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- tp->cp_cmd |= RxChkSum | RxVlan;
-
+ tp->cp_cmd |= RxChkSum;
+ /* RTL8125 uses register RxConfig for VLAN offloading config */
+ if (!rtl_is_8125(tp))
+ tp->cp_cmd |= RxVlan;
/*
* Pretend we are using VLANs; This bypasses a nasty bug where
* Interrupts stop flowing on high load on 8110SCd controllers.
@@ -6939,8 +7176,22 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
- if (rtl_chip_supports_csum_v2(tp))
+ if (rtl_chip_supports_csum_v2(tp)) {
dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+ dev->features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+ dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
+ dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
+ } else {
+ dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
+ dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
+ }
+
+ /* RTL8168e-vl has a HW issue with TSO */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_34) {
+ dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
+ dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
+ dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
+ }
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index d2c48116f181..2412c87561e0 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -78,7 +78,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
{
int ret;
int i, chan;
- struct resource *res;
struct device *dev = &pdev->dev;
void __iomem *addr;
struct sxgbe_priv_data *priv = NULL;
@@ -88,8 +87,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
struct device_node *node = dev->of_node;
/* Get memory resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(dev, res);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr))
return PTR_ERR(addr);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 16d6952c312a..0ec13f520e90 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -508,7 +508,7 @@ static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n",
((efx->mcdi->fn_flags) &
@@ -520,7 +520,7 @@ static ssize_t efx_ef10_show_primary_flag(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n",
((efx->mcdi->fn_flags) &
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index ab58b837df47..2fef7402233e 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2517,7 +2517,7 @@ static struct notifier_block efx_netdev_notifier = {
static ssize_t
show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", efx->phy_type);
}
static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
@@ -2526,7 +2526,7 @@ static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
@@ -2534,7 +2534,7 @@ static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
bool enable = count > 0 && *buf != '0';
@@ -3654,7 +3654,7 @@ static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
static int efx_pm_freeze(struct device *dev)
{
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
rtnl_lock();
@@ -3675,7 +3675,7 @@ static int efx_pm_freeze(struct device *dev)
static int efx_pm_thaw(struct device *dev)
{
int rc;
- struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_nic *efx = dev_get_drvdata(dev);
rtnl_lock();
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 9b15c39ac670..eecc348b1c32 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2256,7 +2256,7 @@ static struct notifier_block ef4_netdev_notifier = {
static ssize_t
show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", efx->phy_type);
}
static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
@@ -2999,7 +2999,7 @@ static int ef4_pci_probe(struct pci_dev *pci_dev,
static int ef4_pm_freeze(struct device *dev)
{
- struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = dev_get_drvdata(dev);
rtnl_lock();
@@ -3020,7 +3020,7 @@ static int ef4_pm_freeze(struct device *dev)
static int ef4_pm_thaw(struct device *dev)
{
int rc;
- struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = dev_get_drvdata(dev);
rtnl_lock();
diff --git a/drivers/net/ethernet/sfc/falcon/falcon_boards.c b/drivers/net/ethernet/sfc/falcon/falcon_boards.c
index 839189dab98e..605f486fa675 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon_boards.c
@@ -357,7 +357,7 @@ fail_on:
static ssize_t show_phy_flash_cfg(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
}
@@ -365,7 +365,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct ef4_nic *efx = dev_get_drvdata(dev);
enum ef4_phy_mode old_mode, new_mode;
int err;
@@ -454,13 +454,13 @@ static int sfe4001_init(struct ef4_nic *efx)
#if IS_ENABLED(CONFIG_SENSORS_LM90)
board->hwmon_client =
- i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info);
+ i2c_new_client_device(&board->i2c_adap, &sfe4001_hwmon_info);
#else
board->hwmon_client =
- i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr);
+ i2c_new_dummy_device(&board->i2c_adap, sfe4001_hwmon_info.addr);
#endif
- if (!board->hwmon_client)
- return -EIO;
+ if (IS_ERR(board->hwmon_client))
+ return PTR_ERR(board->hwmon_client);
/* Raise board/PHY high limit from 85 to 90 degrees Celsius */
rc = i2c_smbus_write_byte_data(board->hwmon_client,
@@ -468,9 +468,9 @@ static int sfe4001_init(struct ef4_nic *efx)
if (rc)
goto fail_hwmon;
- board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539);
- if (!board->ioexp_client) {
- rc = -EIO;
+ board->ioexp_client = i2c_new_dummy_device(&board->i2c_adap, PCA9539);
+ if (IS_ERR(board->ioexp_client)) {
+ rc = PTR_ERR(board->ioexp_client);
goto fail_hwmon;
}
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index fd850d3d8ec0..05ea3523890a 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -424,7 +424,6 @@ ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh)
{
struct napi_struct *napi = &channel->napi_str;
- gro_result_t gro_result;
struct ef4_nic *efx = channel->efx;
struct sk_buff *skb;
@@ -460,9 +459,7 @@ ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
skb_record_rx_queue(skb, channel->rx_queue.core_index);
- gro_result = napi_gro_frags(napi);
- if (gro_result != GRO_DROP)
- channel->irq_mod_score += 2;
+ napi_gro_frags(napi);
}
/* Allocate and construct an SKB around page fragments */
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index d5db045535d3..85ec07f5a674 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -412,7 +412,6 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh)
{
struct napi_struct *napi = &channel->napi_str;
- gro_result_t gro_result;
struct efx_nic *efx = channel->efx;
struct sk_buff *skb;
@@ -449,9 +448,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
skb_record_rx_queue(skb, channel->rx_queue.core_index);
- gro_result = napi_gro_frags(napi);
- if (gro_result != GRO_DROP)
- channel->irq_mod_score += 2;
+ napi_gro_frags(napi);
}
/* Allocate and construct an SKB around page fragments */
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 31ec56091a5d..65e81ec1b314 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -274,7 +274,7 @@ static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
vaddr = kmap_atomic(skb_frag_page(f));
- efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
+ efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
skb_frag_size(f), copy_buf);
kunmap_atomic(vaddr);
}
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 358e66b81926..deb636d653f3 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1,9 +1,5 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
+// SPDX-License-Identifier: GPL-2.0
+/* Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
*
* Copyright (C) 1999, 2000, 01, 03, 06 Ralf Baechle
* Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc.
@@ -15,11 +11,8 @@
*
* To do:
*
- * o Handle allocation failures in ioc3_alloc_skb() more gracefully.
- * o Handle allocation failures in ioc3_init_rings().
* o Use prefetching for large packets. What is a good lower limit for
* prefetching?
- * o We're probably allocating a bit too much memory.
* o Use hardware checksums.
* o Convert to using a IOC3 meta driver.
* o Which PHYs might possibly be attached to the IOC3 in real live,
@@ -39,10 +32,10 @@
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/in.h>
+#include <linux/io.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
-#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#ifdef CONFIG_SERIAL_8250
@@ -55,32 +48,52 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
+#include <linux/dma-direct.h>
+
#include <net/ip.h>
#include <asm/byteorder.h>
-#include <asm/io.h>
#include <asm/pgtable.h>
#include <linux/uaccess.h>
#include <asm/sn/types.h>
#include <asm/sn/ioc3.h>
#include <asm/pci/bridge.h>
-/*
- * 64 RX buffers. This is tunable in the range of 16 <= x < 512. The
- * value must be a power of two.
+/* Number of RX buffers. This is tunable in the range of 16 <= x < 512.
+ * The value must be a power of two.
*/
-#define RX_BUFFS 64
+#define RX_BUFFS 64
+#define RX_RING_ENTRIES 512 /* fixed in hardware */
+#define RX_RING_MASK (RX_RING_ENTRIES - 1)
+#define RX_RING_SIZE (RX_RING_ENTRIES * sizeof(u64))
+
+/* 128 TX buffers (not tunable) */
+#define TX_RING_ENTRIES 128
+#define TX_RING_MASK (TX_RING_ENTRIES - 1)
+#define TX_RING_SIZE (TX_RING_ENTRIES * sizeof(struct ioc3_etxd))
+
+/* IOC3 does dma transfers in 128 byte blocks */
+#define IOC3_DMA_XFER_LEN 128UL
+
+/* Every RX buffer starts with 8 byte descriptor data */
+#define RX_OFFSET (sizeof(struct ioc3_erxbuf) + NET_IP_ALIGN)
+#define RX_BUF_SIZE (13 * IOC3_DMA_XFER_LEN)
-#define ETCSR_FD ((17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21)
-#define ETCSR_HD ((21<<ETCSR_IPGR2_SHIFT) | (21<<ETCSR_IPGR1_SHIFT) | 21)
+#define ETCSR_FD ((21 << ETCSR_IPGR2_SHIFT) | (21 << ETCSR_IPGR1_SHIFT) | 21)
+#define ETCSR_HD ((17 << ETCSR_IPGR2_SHIFT) | (11 << ETCSR_IPGR1_SHIFT) | 21)
/* Private per NIC data of the driver. */
struct ioc3_private {
- struct ioc3 *regs;
+ struct ioc3_ethregs *regs;
+ struct ioc3 *all_regs;
+ struct device *dma_dev;
+ u32 *ssram;
unsigned long *rxr; /* pointer to receiver ring */
struct ioc3_etxd *txr;
- struct sk_buff *rx_skbs[512];
- struct sk_buff *tx_skbs[128];
+ dma_addr_t rxr_dma;
+ dma_addr_t txr_dma;
+ struct sk_buff *rx_skbs[RX_RING_ENTRIES];
+ struct sk_buff *tx_skbs[TX_RING_ENTRIES];
int rx_ci; /* RX consumer index */
int rx_pi; /* RX producer index */
int tx_ci; /* TX consumer index */
@@ -102,190 +115,138 @@ static void ioc3_set_multicast_list(struct net_device *dev);
static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void ioc3_timeout(struct net_device *dev);
static inline unsigned int ioc3_hash(const unsigned char *addr);
+static void ioc3_start(struct ioc3_private *ip);
static inline void ioc3_stop(struct ioc3_private *ip);
static void ioc3_init(struct net_device *dev);
+static int ioc3_alloc_rx_bufs(struct net_device *dev);
+static void ioc3_free_rx_bufs(struct ioc3_private *ip);
+static inline void ioc3_clean_tx_ring(struct ioc3_private *ip);
static const char ioc3_str[] = "IOC3 Ethernet";
static const struct ethtool_ops ioc3_ethtool_ops;
-/* We use this to acquire receive skb's that we can DMA directly into. */
-
-#define IOC3_CACHELINE 128UL
static inline unsigned long aligned_rx_skb_addr(unsigned long addr)
{
- return (~addr + 1) & (IOC3_CACHELINE - 1UL);
+ return (~addr + 1) & (IOC3_DMA_XFER_LEN - 1UL);
}
-static inline struct sk_buff * ioc3_alloc_skb(unsigned long length,
- unsigned int gfp_mask)
+static inline int ioc3_alloc_skb(struct ioc3_private *ip, struct sk_buff **skb,
+ struct ioc3_erxbuf **rxb, dma_addr_t *rxb_dma)
{
- struct sk_buff *skb;
+ struct sk_buff *new_skb;
+ dma_addr_t d;
+ int offset;
+
+ new_skb = alloc_skb(RX_BUF_SIZE + IOC3_DMA_XFER_LEN - 1, GFP_ATOMIC);
+ if (!new_skb)
+ return -ENOMEM;
+
+ /* ensure buffer is aligned to IOC3_DMA_XFER_LEN */
+ offset = aligned_rx_skb_addr((unsigned long)new_skb->data);
+ if (offset)
+ skb_reserve(new_skb, offset);
- skb = alloc_skb(length + IOC3_CACHELINE - 1, gfp_mask);
- if (likely(skb)) {
- int offset = aligned_rx_skb_addr((unsigned long) skb->data);
- if (offset)
- skb_reserve(skb, offset);
+ d = dma_map_single(ip->dma_dev, new_skb->data,
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(ip->dma_dev, d)) {
+ dev_kfree_skb_any(new_skb);
+ return -ENOMEM;
}
+ *rxb_dma = d;
+ *rxb = (struct ioc3_erxbuf *)new_skb->data;
+ skb_reserve(new_skb, RX_OFFSET);
+ *skb = new_skb;
- return skb;
+ return 0;
}
-static inline unsigned long ioc3_map(void *ptr, unsigned long vdev)
+#ifdef CONFIG_PCI_XTALK_BRIDGE
+static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr)
{
-#ifdef CONFIG_SGI_IP27
- vdev <<= 57; /* Shift to PCI64_ATTR_VIRTUAL */
+ return (addr & ~PCI64_ATTR_BAR) | attr;
+}
- return vdev | (0xaUL << PCI64_ATTR_TARG_SHFT) | PCI64_ATTR_PREF |
- ((unsigned long)ptr & TO_PHYS_MASK);
+#define ERBAR_VAL (ERBAR_BARRIER_BIT << ERBAR_RXBARR_SHIFT)
#else
- return virt_to_bus(ptr);
-#endif
+static inline unsigned long ioc3_map(dma_addr_t addr, unsigned long attr)
+{
+ return addr;
}
-/* BEWARE: The IOC3 documentation documents the size of rx buffers as
- 1644 while it's actually 1664. This one was nasty to track down ... */
-#define RX_OFFSET 10
-#define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + IOC3_CACHELINE)
-
-/* DMA barrier to separate cached and uncached accesses. */
-#define BARRIER() \
- __asm__("sync" ::: "memory")
-
+#define ERBAR_VAL 0
+#endif
#define IOC3_SIZE 0x100000
-/*
- * IOC3 is a big endian device
- *
- * Unorthodox but makes the users of these macros more readable - the pointer
- * to the IOC3's memory mapped registers is expected as struct ioc3 * ioc3
- * in the environment.
- */
-#define ioc3_r_mcr() be32_to_cpu(ioc3->mcr)
-#define ioc3_w_mcr(v) do { ioc3->mcr = cpu_to_be32(v); } while (0)
-#define ioc3_w_gpcr_s(v) do { ioc3->gpcr_s = cpu_to_be32(v); } while (0)
-#define ioc3_r_emcr() be32_to_cpu(ioc3->emcr)
-#define ioc3_w_emcr(v) do { ioc3->emcr = cpu_to_be32(v); } while (0)
-#define ioc3_r_eisr() be32_to_cpu(ioc3->eisr)
-#define ioc3_w_eisr(v) do { ioc3->eisr = cpu_to_be32(v); } while (0)
-#define ioc3_r_eier() be32_to_cpu(ioc3->eier)
-#define ioc3_w_eier(v) do { ioc3->eier = cpu_to_be32(v); } while (0)
-#define ioc3_r_ercsr() be32_to_cpu(ioc3->ercsr)
-#define ioc3_w_ercsr(v) do { ioc3->ercsr = cpu_to_be32(v); } while (0)
-#define ioc3_r_erbr_h() be32_to_cpu(ioc3->erbr_h)
-#define ioc3_w_erbr_h(v) do { ioc3->erbr_h = cpu_to_be32(v); } while (0)
-#define ioc3_r_erbr_l() be32_to_cpu(ioc3->erbr_l)
-#define ioc3_w_erbr_l(v) do { ioc3->erbr_l = cpu_to_be32(v); } while (0)
-#define ioc3_r_erbar() be32_to_cpu(ioc3->erbar)
-#define ioc3_w_erbar(v) do { ioc3->erbar = cpu_to_be32(v); } while (0)
-#define ioc3_r_ercir() be32_to_cpu(ioc3->ercir)
-#define ioc3_w_ercir(v) do { ioc3->ercir = cpu_to_be32(v); } while (0)
-#define ioc3_r_erpir() be32_to_cpu(ioc3->erpir)
-#define ioc3_w_erpir(v) do { ioc3->erpir = cpu_to_be32(v); } while (0)
-#define ioc3_r_ertr() be32_to_cpu(ioc3->ertr)
-#define ioc3_w_ertr(v) do { ioc3->ertr = cpu_to_be32(v); } while (0)
-#define ioc3_r_etcsr() be32_to_cpu(ioc3->etcsr)
-#define ioc3_w_etcsr(v) do { ioc3->etcsr = cpu_to_be32(v); } while (0)
-#define ioc3_r_ersr() be32_to_cpu(ioc3->ersr)
-#define ioc3_w_ersr(v) do { ioc3->ersr = cpu_to_be32(v); } while (0)
-#define ioc3_r_etcdc() be32_to_cpu(ioc3->etcdc)
-#define ioc3_w_etcdc(v) do { ioc3->etcdc = cpu_to_be32(v); } while (0)
-#define ioc3_r_ebir() be32_to_cpu(ioc3->ebir)
-#define ioc3_w_ebir(v) do { ioc3->ebir = cpu_to_be32(v); } while (0)
-#define ioc3_r_etbr_h() be32_to_cpu(ioc3->etbr_h)
-#define ioc3_w_etbr_h(v) do { ioc3->etbr_h = cpu_to_be32(v); } while (0)
-#define ioc3_r_etbr_l() be32_to_cpu(ioc3->etbr_l)
-#define ioc3_w_etbr_l(v) do { ioc3->etbr_l = cpu_to_be32(v); } while (0)
-#define ioc3_r_etcir() be32_to_cpu(ioc3->etcir)
-#define ioc3_w_etcir(v) do { ioc3->etcir = cpu_to_be32(v); } while (0)
-#define ioc3_r_etpir() be32_to_cpu(ioc3->etpir)
-#define ioc3_w_etpir(v) do { ioc3->etpir = cpu_to_be32(v); } while (0)
-#define ioc3_r_emar_h() be32_to_cpu(ioc3->emar_h)
-#define ioc3_w_emar_h(v) do { ioc3->emar_h = cpu_to_be32(v); } while (0)
-#define ioc3_r_emar_l() be32_to_cpu(ioc3->emar_l)
-#define ioc3_w_emar_l(v) do { ioc3->emar_l = cpu_to_be32(v); } while (0)
-#define ioc3_r_ehar_h() be32_to_cpu(ioc3->ehar_h)
-#define ioc3_w_ehar_h(v) do { ioc3->ehar_h = cpu_to_be32(v); } while (0)
-#define ioc3_r_ehar_l() be32_to_cpu(ioc3->ehar_l)
-#define ioc3_w_ehar_l(v) do { ioc3->ehar_l = cpu_to_be32(v); } while (0)
-#define ioc3_r_micr() be32_to_cpu(ioc3->micr)
-#define ioc3_w_micr(v) do { ioc3->micr = cpu_to_be32(v); } while (0)
-#define ioc3_r_midr_r() be32_to_cpu(ioc3->midr_r)
-#define ioc3_w_midr_r(v) do { ioc3->midr_r = cpu_to_be32(v); } while (0)
-#define ioc3_r_midr_w() be32_to_cpu(ioc3->midr_w)
-#define ioc3_w_midr_w(v) do { ioc3->midr_w = cpu_to_be32(v); } while (0)
-
static inline u32 mcr_pack(u32 pulse, u32 sample)
{
return (pulse << 10) | (sample << 2);
}
-static int nic_wait(struct ioc3 *ioc3)
+static int nic_wait(u32 __iomem *mcr)
{
- u32 mcr;
+ u32 m;
- do {
- mcr = ioc3_r_mcr();
- } while (!(mcr & 2));
+ do {
+ m = readl(mcr);
+ } while (!(m & 2));
- return mcr & 1;
+ return m & 1;
}
-static int nic_reset(struct ioc3 *ioc3)
+static int nic_reset(u32 __iomem *mcr)
{
- int presence;
+ int presence;
- ioc3_w_mcr(mcr_pack(500, 65));
- presence = nic_wait(ioc3);
+ writel(mcr_pack(500, 65), mcr);
+ presence = nic_wait(mcr);
- ioc3_w_mcr(mcr_pack(0, 500));
- nic_wait(ioc3);
+ writel(mcr_pack(0, 500), mcr);
+ nic_wait(mcr);
- return presence;
+ return presence;
}
-static inline int nic_read_bit(struct ioc3 *ioc3)
+static inline int nic_read_bit(u32 __iomem *mcr)
{
int result;
- ioc3_w_mcr(mcr_pack(6, 13));
- result = nic_wait(ioc3);
- ioc3_w_mcr(mcr_pack(0, 100));
- nic_wait(ioc3);
+ writel(mcr_pack(6, 13), mcr);
+ result = nic_wait(mcr);
+ writel(mcr_pack(0, 100), mcr);
+ nic_wait(mcr);
return result;
}
-static inline void nic_write_bit(struct ioc3 *ioc3, int bit)
+static inline void nic_write_bit(u32 __iomem *mcr, int bit)
{
if (bit)
- ioc3_w_mcr(mcr_pack(6, 110));
+ writel(mcr_pack(6, 110), mcr);
else
- ioc3_w_mcr(mcr_pack(80, 30));
+ writel(mcr_pack(80, 30), mcr);
- nic_wait(ioc3);
+ nic_wait(mcr);
}
-/*
- * Read a byte from an iButton device
+/* Read a byte from an iButton device
*/
-static u32 nic_read_byte(struct ioc3 *ioc3)
+static u32 nic_read_byte(u32 __iomem *mcr)
{
u32 result = 0;
int i;
for (i = 0; i < 8; i++)
- result = (result >> 1) | (nic_read_bit(ioc3) << 7);
+ result = (result >> 1) | (nic_read_bit(mcr) << 7);
return result;
}
-/*
- * Write a byte to an iButton device
+/* Write a byte to an iButton device
*/
-static void nic_write_byte(struct ioc3 *ioc3, int byte)
+static void nic_write_byte(u32 __iomem *mcr, int byte)
{
int i, bit;
@@ -293,26 +254,26 @@ static void nic_write_byte(struct ioc3 *ioc3, int byte)
bit = byte & 1;
byte >>= 1;
- nic_write_bit(ioc3, bit);
+ nic_write_bit(mcr, bit);
}
}
-static u64 nic_find(struct ioc3 *ioc3, int *last)
+static u64 nic_find(u32 __iomem *mcr, int *last)
{
int a, b, index, disc;
u64 address = 0;
- nic_reset(ioc3);
+ nic_reset(mcr);
/* Search ROM. */
- nic_write_byte(ioc3, 0xf0);
+ nic_write_byte(mcr, 0xf0);
/* Algorithm from ``Book of iButton Standards''. */
for (index = 0, disc = 0; index < 64; index++) {
- a = nic_read_bit(ioc3);
- b = nic_read_bit(ioc3);
+ a = nic_read_bit(mcr);
+ b = nic_read_bit(mcr);
if (a && b) {
- printk("NIC search failed (not fatal).\n");
+ pr_warn("NIC search failed (not fatal).\n");
*last = 0;
return 0;
}
@@ -323,16 +284,17 @@ static u64 nic_find(struct ioc3 *ioc3, int *last)
} else if (index > *last) {
address &= ~(1UL << index);
disc = index;
- } else if ((address & (1UL << index)) == 0)
+ } else if ((address & (1UL << index)) == 0) {
disc = index;
- nic_write_bit(ioc3, address & (1UL << index));
+ }
+ nic_write_bit(mcr, address & (1UL << index));
continue;
} else {
if (a)
address |= 1UL << index;
else
address &= ~(1UL << index);
- nic_write_bit(ioc3, a);
+ nic_write_bit(mcr, a);
continue;
}
}
@@ -342,7 +304,7 @@ static u64 nic_find(struct ioc3 *ioc3, int *last)
return address;
}
-static int nic_init(struct ioc3 *ioc3)
+static int nic_init(u32 __iomem *mcr)
{
const char *unknown = "unknown";
const char *type = unknown;
@@ -352,7 +314,8 @@ static int nic_init(struct ioc3 *ioc3)
while (1) {
u64 reg;
- reg = nic_find(ioc3, &save);
+
+ reg = nic_find(mcr, &save);
switch (reg & 0xff) {
case 0x91:
@@ -366,12 +329,12 @@ static int nic_init(struct ioc3 *ioc3)
continue;
}
- nic_reset(ioc3);
+ nic_reset(mcr);
/* Match ROM. */
- nic_write_byte(ioc3, 0x55);
+ nic_write_byte(mcr, 0x55);
for (i = 0; i < 8; i++)
- nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff);
+ nic_write_byte(mcr, (reg >> (i << 3)) & 0xff);
reg >>= 8; /* Shift out type. */
for (i = 0; i < 6; i++) {
@@ -382,52 +345,50 @@ static int nic_init(struct ioc3 *ioc3)
break;
}
- printk("Found %s NIC", type);
+ pr_info("Found %s NIC", type);
if (type != unknown)
- printk (" registration number %pM, CRC %02x", serial, crc);
- printk(".\n");
+ pr_cont(" registration number %pM, CRC %02x", serial, crc);
+ pr_cont(".\n");
return 0;
}
-/*
- * Read the NIC (Number-In-a-Can) device used to store the MAC address on
+/* Read the NIC (Number-In-a-Can) device used to store the MAC address on
* SN0 / SN00 nodeboards and PCI cards.
*/
static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
{
- struct ioc3 *ioc3 = ip->regs;
- u8 nic[14];
+ u32 __iomem *mcr = &ip->all_regs->mcr;
int tries = 2; /* There may be some problem with the battery? */
+ u8 nic[14];
int i;
- ioc3_w_gpcr_s(1 << 21);
+ writel(1 << 21, &ip->all_regs->gpcr_s);
while (tries--) {
- if (!nic_init(ioc3))
+ if (!nic_init(mcr))
break;
udelay(500);
}
if (tries < 0) {
- printk("Failed to read MAC address\n");
+ pr_err("Failed to read MAC address\n");
return;
}
/* Read Memory. */
- nic_write_byte(ioc3, 0xf0);
- nic_write_byte(ioc3, 0x00);
- nic_write_byte(ioc3, 0x00);
+ nic_write_byte(mcr, 0xf0);
+ nic_write_byte(mcr, 0x00);
+ nic_write_byte(mcr, 0x00);
for (i = 13; i >= 0; i--)
- nic[i] = nic_read_byte(ioc3);
+ nic[i] = nic_read_byte(mcr);
for (i = 2; i < 8; i++)
ip->dev->dev_addr[i - 2] = nic[i];
}
-/*
- * Ok, this is hosed by design. It's necessary to know what machine the
+/* Ok, this is hosed by design. It's necessary to know what machine the
* NIC is in in order to know how to read the NIC address. We also have
* to know if it's a PCI card or a NIC in on the node board ...
*/
@@ -435,17 +396,21 @@ static void ioc3_get_eaddr(struct ioc3_private *ip)
{
ioc3_get_eaddr_nic(ip);
- printk("Ethernet address is %pM.\n", ip->dev->dev_addr);
+ pr_info("Ethernet address is %pM.\n", ip->dev->dev_addr);
}
static void __ioc3_set_mac_address(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
- ioc3_w_emar_h((dev->dev_addr[5] << 8) | dev->dev_addr[4]);
- ioc3_w_emar_l((dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) |
- (dev->dev_addr[1] << 8) | dev->dev_addr[0]);
+ writel((dev->dev_addr[5] << 8) |
+ dev->dev_addr[4],
+ &ip->regs->emar_h);
+ writel((dev->dev_addr[3] << 24) |
+ (dev->dev_addr[2] << 16) |
+ (dev->dev_addr[1] << 8) |
+ dev->dev_addr[0],
+ &ip->regs->emar_l);
}
static int ioc3_set_mac_address(struct net_device *dev, void *addr)
@@ -462,31 +427,35 @@ static int ioc3_set_mac_address(struct net_device *dev, void *addr)
return 0;
}
-/*
- * Caller must hold the ioc3_lock ever for MII readers. This is also
+/* Caller must hold the ioc3_lock ever for MII readers. This is also
* used to protect the transmitter side but it's low contention.
*/
static int ioc3_mdio_read(struct net_device *dev, int phy, int reg)
{
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
+ struct ioc3_ethregs *regs = ip->regs;
- while (ioc3_r_micr() & MICR_BUSY);
- ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG);
- while (ioc3_r_micr() & MICR_BUSY);
+ while (readl(&regs->micr) & MICR_BUSY)
+ ;
+ writel((phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG,
+ &regs->micr);
+ while (readl(&regs->micr) & MICR_BUSY)
+ ;
- return ioc3_r_midr_r() & MIDR_DATA_MASK;
+ return readl(&regs->midr_r) & MIDR_DATA_MASK;
}
static void ioc3_mdio_write(struct net_device *dev, int phy, int reg, int data)
{
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
-
- while (ioc3_r_micr() & MICR_BUSY);
- ioc3_w_midr_w(data);
- ioc3_w_micr((phy << MICR_PHYADDR_SHIFT) | reg);
- while (ioc3_r_micr() & MICR_BUSY);
+ struct ioc3_ethregs *regs = ip->regs;
+
+ while (readl(&regs->micr) & MICR_BUSY)
+ ;
+ writel(data, &regs->midr_w);
+ writel((phy << MICR_PHYADDR_SHIFT) | reg, &regs->micr);
+ while (readl(&regs->micr) & MICR_BUSY)
+ ;
}
static int ioc3_mii_init(struct ioc3_private *ip);
@@ -494,23 +463,22 @@ static int ioc3_mii_init(struct ioc3_private *ip);
static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
+ struct ioc3_ethregs *regs = ip->regs;
- dev->stats.collisions += (ioc3_r_etcdc() & ETCDC_COLLCNT_MASK);
+ dev->stats.collisions += readl(&regs->etcdc) & ETCDC_COLLCNT_MASK;
return &dev->stats;
}
-static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
+static void ioc3_tcpudp_checksum(struct sk_buff *skb, u32 hwsum, int len)
{
struct ethhdr *eh = eth_hdr(skb);
- uint32_t csum, ehsum;
unsigned int proto;
- struct iphdr *ih;
- uint16_t *ew;
unsigned char *cp;
+ struct iphdr *ih;
+ u32 csum, ehsum;
+ u16 *ew;
- /*
- * Did hardware handle the checksum at all? The cases we can handle
+ /* Did hardware handle the checksum at all? The cases we can handle
* are:
*
* - TCP and UDP checksums of IPv4 only.
@@ -526,7 +494,7 @@ static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
if (eh->h_proto != htons(ETH_P_IP))
return;
- ih = (struct iphdr *) ((char *)eh + ETH_HLEN);
+ ih = (struct iphdr *)((char *)eh + ETH_HLEN);
if (ip_is_fragment(ih))
return;
@@ -537,12 +505,12 @@ static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
/* Same as tx - compute csum of pseudo header */
csum = hwsum +
(ih->tot_len - (ih->ihl << 2)) +
- htons((uint16_t)ih->protocol) +
+ htons((u16)ih->protocol) +
(ih->saddr >> 16) + (ih->saddr & 0xffff) +
(ih->daddr >> 16) + (ih->daddr & 0xffff);
/* Sum up ethernet dest addr, src addr and protocol */
- ew = (uint16_t *) eh;
+ ew = (u16 *)eh;
ehsum = ew[0] + ew[1] + ew[2] + ew[3] + ew[4] + ew[5] + ew[6];
ehsum = (ehsum & 0xffff) + (ehsum >> 16);
@@ -551,14 +519,15 @@ static void ioc3_tcpudp_checksum(struct sk_buff *skb, uint32_t hwsum, int len)
csum += 0xffff ^ ehsum;
/* In the next step we also subtract the 1's complement
- checksum of the trailing ethernet CRC. */
+ * checksum of the trailing ethernet CRC.
+ */
cp = (char *)eh + len; /* points at trailing CRC */
if (len & 1) {
- csum += 0xffff ^ (uint16_t) ((cp[1] << 8) | cp[0]);
- csum += 0xffff ^ (uint16_t) ((cp[3] << 8) | cp[2]);
+ csum += 0xffff ^ (u16)((cp[1] << 8) | cp[0]);
+ csum += 0xffff ^ (u16)((cp[3] << 8) | cp[2]);
} else {
- csum += 0xffff ^ (uint16_t) ((cp[0] << 8) | cp[1]);
- csum += 0xffff ^ (uint16_t) ((cp[2] << 8) | cp[3]);
+ csum += 0xffff ^ (u16)((cp[0] << 8) | cp[1]);
+ csum += 0xffff ^ (u16)((cp[2] << 8) | cp[3]);
}
csum = (csum & 0xffff) + (csum >> 16);
@@ -572,10 +541,10 @@ static inline void ioc3_rx(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
struct sk_buff *skb, *new_skb;
- struct ioc3 *ioc3 = ip->regs;
int rx_entry, n_entry, len;
struct ioc3_erxbuf *rxb;
unsigned long *rxr;
+ dma_addr_t d;
u32 w0, err;
rxr = ip->rxr; /* Ring base */
@@ -583,64 +552,67 @@ static inline void ioc3_rx(struct net_device *dev)
n_entry = ip->rx_pi;
skb = ip->rx_skbs[rx_entry];
- rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
+ rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET);
w0 = be32_to_cpu(rxb->w0);
while (w0 & ERXBUF_V) {
err = be32_to_cpu(rxb->err); /* It's valid ... */
if (err & ERXBUF_GOODPKT) {
len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
- skb_trim(skb, len);
+ skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
- new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
- if (!new_skb) {
+ if (ioc3_alloc_skb(ip, &new_skb, &rxb, &d)) {
/* Ouch, drop packet and just recycle packet
- to keep the ring filled. */
+ * to keep the ring filled.
+ */
dev->stats.rx_dropped++;
new_skb = skb;
+ d = rxr[rx_entry];
goto next;
}
if (likely(dev->features & NETIF_F_RXCSUM))
ioc3_tcpudp_checksum(skb,
- w0 & ERXBUF_IPCKSUM_MASK, len);
+ w0 & ERXBUF_IPCKSUM_MASK,
+ len);
+
+ dma_unmap_single(ip->dma_dev, rxr[rx_entry],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
netif_rx(skb);
ip->rx_skbs[rx_entry] = NULL; /* Poison */
- /* Because we reserve afterwards. */
- skb_put(new_skb, (1664 + RX_OFFSET));
- rxb = (struct ioc3_erxbuf *) new_skb->data;
- skb_reserve(new_skb, RX_OFFSET);
-
dev->stats.rx_packets++; /* Statistics */
dev->stats.rx_bytes += len;
} else {
/* The frame is invalid and the skb never
- reached the network layer so we can just
- recycle it. */
+ * reached the network layer so we can just
+ * recycle it.
+ */
new_skb = skb;
+ d = rxr[rx_entry];
dev->stats.rx_errors++;
}
if (err & ERXBUF_CRCERR) /* Statistics */
dev->stats.rx_crc_errors++;
if (err & ERXBUF_FRAMERR)
dev->stats.rx_frame_errors++;
+
next:
ip->rx_skbs[n_entry] = new_skb;
- rxr[n_entry] = cpu_to_be64(ioc3_map(rxb, 1));
+ rxr[n_entry] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR));
rxb->w0 = 0; /* Clear valid flag */
- n_entry = (n_entry + 1) & 511; /* Update erpir */
+ n_entry = (n_entry + 1) & RX_RING_MASK; /* Update erpir */
/* Now go on to the next ring entry. */
- rx_entry = (rx_entry + 1) & 511;
+ rx_entry = (rx_entry + 1) & RX_RING_MASK;
skb = ip->rx_skbs[rx_entry];
- rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
+ rxb = (struct ioc3_erxbuf *)(skb->data - RX_OFFSET);
w0 = be32_to_cpu(rxb->w0);
}
- ioc3_w_erpir((n_entry << 3) | ERPIR_ARM);
+ writel((n_entry << 3) | ERPIR_ARM, &ip->regs->erpir);
ip->rx_pi = n_entry;
ip->rx_ci = rx_entry;
}
@@ -648,16 +620,16 @@ next:
static inline void ioc3_tx(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
+ struct ioc3_ethregs *regs = ip->regs;
unsigned long packets, bytes;
- struct ioc3 *ioc3 = ip->regs;
int tx_entry, o_entry;
struct sk_buff *skb;
u32 etcir;
spin_lock(&ip->ioc3_lock);
- etcir = ioc3_r_etcir();
+ etcir = readl(&regs->etcir);
- tx_entry = (etcir >> 7) & 127;
+ tx_entry = (etcir >> 7) & TX_RING_MASK;
o_entry = ip->tx_ci;
packets = 0;
bytes = 0;
@@ -669,25 +641,24 @@ static inline void ioc3_tx(struct net_device *dev)
dev_consume_skb_irq(skb);
ip->tx_skbs[o_entry] = NULL;
- o_entry = (o_entry + 1) & 127; /* Next */
+ o_entry = (o_entry + 1) & TX_RING_MASK; /* Next */
- etcir = ioc3_r_etcir(); /* More pkts sent? */
- tx_entry = (etcir >> 7) & 127;
+ etcir = readl(&regs->etcir); /* More pkts sent? */
+ tx_entry = (etcir >> 7) & TX_RING_MASK;
}
dev->stats.tx_packets += packets;
dev->stats.tx_bytes += bytes;
ip->txqlen -= packets;
- if (ip->txqlen < 128)
+ if (netif_queue_stopped(dev) && ip->txqlen < TX_RING_ENTRIES)
netif_wake_queue(dev);
ip->tx_ci = o_entry;
spin_unlock(&ip->ioc3_lock);
}
-/*
- * Deal with fatal IOC3 errors. This condition might be caused by a hard or
+/* Deal with fatal IOC3 errors. This condition might be caused by a hard or
* software problems, so we should try to recover
* more gracefully if this ever happens. In theory we might be flooded
* with such error interrupts if something really goes wrong, so we might
@@ -696,25 +667,33 @@ static inline void ioc3_tx(struct net_device *dev)
static void ioc3_error(struct net_device *dev, u32 eisr)
{
struct ioc3_private *ip = netdev_priv(dev);
- unsigned char *iface = dev->name;
spin_lock(&ip->ioc3_lock);
if (eisr & EISR_RXOFLO)
- printk(KERN_ERR "%s: RX overflow.\n", iface);
+ net_err_ratelimited("%s: RX overflow.\n", dev->name);
if (eisr & EISR_RXBUFOFLO)
- printk(KERN_ERR "%s: RX buffer overflow.\n", iface);
+ net_err_ratelimited("%s: RX buffer overflow.\n", dev->name);
if (eisr & EISR_RXMEMERR)
- printk(KERN_ERR "%s: RX PCI error.\n", iface);
+ net_err_ratelimited("%s: RX PCI error.\n", dev->name);
if (eisr & EISR_RXPARERR)
- printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface);
+ net_err_ratelimited("%s: RX SSRAM parity error.\n", dev->name);
if (eisr & EISR_TXBUFUFLO)
- printk(KERN_ERR "%s: TX buffer underflow.\n", iface);
+ net_err_ratelimited("%s: TX buffer underflow.\n", dev->name);
if (eisr & EISR_TXMEMERR)
- printk(KERN_ERR "%s: TX PCI error.\n", iface);
+ net_err_ratelimited("%s: TX PCI error.\n", dev->name);
ioc3_stop(ip);
+ ioc3_free_rx_bufs(ip);
+ ioc3_clean_tx_ring(ip);
+
ioc3_init(dev);
+ if (ioc3_alloc_rx_bufs(dev)) {
+ netdev_err(dev, "%s: rx buffer allocation failed\n", __func__);
+ spin_unlock(&ip->ioc3_lock);
+ return;
+ }
+ ioc3_start(ip);
ioc3_mii_init(ip);
netif_wake_queue(dev);
@@ -723,45 +702,45 @@ static void ioc3_error(struct net_device *dev, u32 eisr)
}
/* The interrupt handler does all of the Rx thread work and cleans up
- after the Tx thread. */
-static irqreturn_t ioc3_interrupt(int irq, void *_dev)
+ * after the Tx thread.
+ */
+static irqreturn_t ioc3_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *)_dev;
- struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
- const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
- EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
- EISR_TXEXPLICIT | EISR_TXMEMERR;
+ struct ioc3_private *ip = netdev_priv(dev_id);
+ struct ioc3_ethregs *regs = ip->regs;
u32 eisr;
- eisr = ioc3_r_eisr() & enabled;
-
- ioc3_w_eisr(eisr);
- (void) ioc3_r_eisr(); /* Flush */
+ eisr = readl(&regs->eisr);
+ writel(eisr, &regs->eisr);
+ readl(&regs->eisr); /* Flush */
if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
- EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
- ioc3_error(dev, eisr);
+ EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
+ ioc3_error(dev_id, eisr);
if (eisr & EISR_RXTIMERINT)
- ioc3_rx(dev);
+ ioc3_rx(dev_id);
if (eisr & EISR_TXEXPLICIT)
- ioc3_tx(dev);
+ ioc3_tx(dev_id);
return IRQ_HANDLED;
}
static inline void ioc3_setup_duplex(struct ioc3_private *ip)
{
- struct ioc3 *ioc3 = ip->regs;
+ struct ioc3_ethregs *regs = ip->regs;
+
+ spin_lock_irq(&ip->ioc3_lock);
if (ip->mii.full_duplex) {
- ioc3_w_etcsr(ETCSR_FD);
+ writel(ETCSR_FD, &regs->etcsr);
ip->emcr |= EMCR_DUPLEX;
} else {
- ioc3_w_etcsr(ETCSR_HD);
+ writel(ETCSR_HD, &regs->etcsr);
ip->emcr &= ~EMCR_DUPLEX;
}
- ioc3_w_emcr(ip->emcr);
+ writel(ip->emcr, &regs->emcr);
+
+ spin_unlock_irq(&ip->ioc3_lock);
}
static void ioc3_timer(struct timer_list *t)
@@ -772,12 +751,11 @@ static void ioc3_timer(struct timer_list *t)
mii_check_media(&ip->mii, 1, 0);
ioc3_setup_duplex(ip);
- ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2s */
+ ip->ioc3_timer.expires = jiffies + ((12 * HZ) / 10); /* 1.2s */
add_timer(&ip->ioc3_timer);
}
-/*
- * Try to find a PHY. There is no apparent relation between the MII addresses
+/* Try to find a PHY. There is no apparent relation between the MII addresses
* in the SGI documentation and what we find in reality, so we simply probe
* for the PHY. It seems IOC3 PHYs usually live on address 31. One of my
* onboard IOC3s has the special oddity that probing doesn't seem to find it
@@ -786,8 +764,8 @@ static void ioc3_timer(struct timer_list *t)
*/
static int ioc3_mii_init(struct ioc3_private *ip)
{
- int i, found = 0, res = 0;
int ioc3_phy_workaround = 1;
+ int i, found = 0, res = 0;
u16 word;
for (i = 0; i < 32; i++) {
@@ -800,9 +778,9 @@ static int ioc3_mii_init(struct ioc3_private *ip)
}
if (!found) {
- if (ioc3_phy_workaround)
+ if (ioc3_phy_workaround) {
i = 31;
- else {
+ } else {
ip->mii.phy_id = -1;
res = -ENODEV;
goto out;
@@ -817,27 +795,27 @@ out:
static void ioc3_mii_start(struct ioc3_private *ip)
{
- ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
+ ip->ioc3_timer.expires = jiffies + (12 * HZ) / 10; /* 1.2 sec. */
add_timer(&ip->ioc3_timer);
}
-static inline void ioc3_clean_rx_ring(struct ioc3_private *ip)
+static inline void ioc3_tx_unmap(struct ioc3_private *ip, int entry)
{
- struct sk_buff *skb;
- int i;
-
- for (i = ip->rx_ci; i & 15; i++) {
- ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci];
- ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++];
+ struct ioc3_etxd *desc;
+ u32 cmd, bufcnt, len;
+
+ desc = &ip->txr[entry];
+ cmd = be32_to_cpu(desc->cmd);
+ bufcnt = be32_to_cpu(desc->bufcnt);
+ if (cmd & ETXD_B1V) {
+ len = (bufcnt & ETXD_B1CNT_MASK) >> ETXD_B1CNT_SHIFT;
+ dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p1),
+ len, DMA_TO_DEVICE);
}
- ip->rx_pi &= 511;
- ip->rx_ci &= 511;
-
- for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) {
- struct ioc3_erxbuf *rxb;
- skb = ip->rx_skbs[i];
- rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
- rxb->w0 = 0;
+ if (cmd & ETXD_B2V) {
+ len = (bufcnt & ETXD_B2CNT_MASK) >> ETXD_B2CNT_SHIFT;
+ dma_unmap_single(ip->dma_dev, be64_to_cpu(desc->p2),
+ len, DMA_TO_DEVICE);
}
}
@@ -846,9 +824,10 @@ static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
struct sk_buff *skb;
int i;
- for (i=0; i < 128; i++) {
+ for (i = 0; i < TX_RING_ENTRIES; i++) {
skb = ip->tx_skbs[i];
if (skb) {
+ ioc3_tx_unmap(ip, i);
ip->tx_skbs[i] = NULL;
dev_kfree_skb_any(skb);
}
@@ -858,179 +837,137 @@ static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
ip->tx_ci = 0;
}
-static void ioc3_free_rings(struct ioc3_private *ip)
+static void ioc3_free_rx_bufs(struct ioc3_private *ip)
{
- struct sk_buff *skb;
int rx_entry, n_entry;
+ struct sk_buff *skb;
- if (ip->txr) {
- ioc3_clean_tx_ring(ip);
- free_pages((unsigned long)ip->txr, 2);
- ip->txr = NULL;
- }
-
- if (ip->rxr) {
- n_entry = ip->rx_ci;
- rx_entry = ip->rx_pi;
-
- while (n_entry != rx_entry) {
- skb = ip->rx_skbs[n_entry];
- if (skb)
- dev_kfree_skb_any(skb);
+ n_entry = ip->rx_ci;
+ rx_entry = ip->rx_pi;
- n_entry = (n_entry + 1) & 511;
+ while (n_entry != rx_entry) {
+ skb = ip->rx_skbs[n_entry];
+ if (skb) {
+ dma_unmap_single(ip->dma_dev,
+ be64_to_cpu(ip->rxr[n_entry]),
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
}
- free_page((unsigned long)ip->rxr);
- ip->rxr = NULL;
+ n_entry = (n_entry + 1) & RX_RING_MASK;
}
}
-static void ioc3_alloc_rings(struct net_device *dev)
+static int ioc3_alloc_rx_bufs(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3_erxbuf *rxb;
- unsigned long *rxr;
+ dma_addr_t d;
int i;
- if (ip->rxr == NULL) {
- /* Allocate and initialize rx ring. 4kb = 512 entries */
- ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
- rxr = ip->rxr;
- if (!rxr)
- printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
-
- /* Now the rx buffers. The RX ring may be larger but
- we only allocate 16 buffers for now. Need to tune
- this for performance and memory later. */
- for (i = 0; i < RX_BUFFS; i++) {
- struct sk_buff *skb;
-
- skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
- if (!skb) {
- show_free_areas(0, NULL);
- continue;
- }
-
- ip->rx_skbs[i] = skb;
-
- /* Because we reserve afterwards. */
- skb_put(skb, (1664 + RX_OFFSET));
- rxb = (struct ioc3_erxbuf *) skb->data;
- rxr[i] = cpu_to_be64(ioc3_map(rxb, 1));
- skb_reserve(skb, RX_OFFSET);
- }
- ip->rx_ci = 0;
- ip->rx_pi = RX_BUFFS;
- }
+ /* Now the rx buffers. The RX ring may be larger but
+ * we only allocate 16 buffers for now. Need to tune
+ * this for performance and memory later.
+ */
+ for (i = 0; i < RX_BUFFS; i++) {
+ if (ioc3_alloc_skb(ip, &ip->rx_skbs[i], &rxb, &d))
+ return -ENOMEM;
- if (ip->txr == NULL) {
- /* Allocate and initialize tx rings. 16kb = 128 bufs. */
- ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
- if (!ip->txr)
- printk("ioc3_alloc_rings(): __get_free_pages() failed!\n");
- ip->tx_pi = 0;
- ip->tx_ci = 0;
+ rxb->w0 = 0; /* Clear valid flag */
+ ip->rxr[i] = cpu_to_be64(ioc3_map(d, PCI64_ATTR_BAR));
}
-}
-
-static void ioc3_init_rings(struct net_device *dev)
-{
- struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
- unsigned long ring;
-
- ioc3_free_rings(ip);
- ioc3_alloc_rings(dev);
-
- ioc3_clean_rx_ring(ip);
- ioc3_clean_tx_ring(ip);
+ ip->rx_ci = 0;
+ ip->rx_pi = RX_BUFFS;
- /* Now the rx ring base, consume & produce registers. */
- ring = ioc3_map(ip->rxr, 0);
- ioc3_w_erbr_h(ring >> 32);
- ioc3_w_erbr_l(ring & 0xffffffff);
- ioc3_w_ercir(ip->rx_ci << 3);
- ioc3_w_erpir((ip->rx_pi << 3) | ERPIR_ARM);
-
- ring = ioc3_map(ip->txr, 0);
-
- ip->txqlen = 0; /* nothing queued */
-
- /* Now the tx ring base, consume & produce registers. */
- ioc3_w_etbr_h(ring >> 32);
- ioc3_w_etbr_l(ring & 0xffffffff);
- ioc3_w_etpir(ip->tx_pi << 7);
- ioc3_w_etcir(ip->tx_ci << 7);
- (void) ioc3_r_etcir(); /* Flush */
+ return 0;
}
static inline void ioc3_ssram_disc(struct ioc3_private *ip)
{
- struct ioc3 *ioc3 = ip->regs;
- volatile u32 *ssram0 = &ioc3->ssram[0x0000];
- volatile u32 *ssram1 = &ioc3->ssram[0x4000];
- unsigned int pattern = 0x5555;
+ struct ioc3_ethregs *regs = ip->regs;
+ u32 *ssram0 = &ip->ssram[0x0000];
+ u32 *ssram1 = &ip->ssram[0x4000];
+ u32 pattern = 0x5555;
/* Assume the larger size SSRAM and enable parity checking */
- ioc3_w_emcr(ioc3_r_emcr() | (EMCR_BUFSIZ | EMCR_RAMPAR));
+ writel(readl(&regs->emcr) | (EMCR_BUFSIZ | EMCR_RAMPAR), &regs->emcr);
+ readl(&regs->emcr); /* Flush */
- *ssram0 = pattern;
- *ssram1 = ~pattern & IOC3_SSRAM_DM;
+ writel(pattern, ssram0);
+ writel(~pattern & IOC3_SSRAM_DM, ssram1);
- if ((*ssram0 & IOC3_SSRAM_DM) != pattern ||
- (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
+ if ((readl(ssram0) & IOC3_SSRAM_DM) != pattern ||
+ (readl(ssram1) & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
/* set ssram size to 64 KB */
- ip->emcr = EMCR_RAMPAR;
- ioc3_w_emcr(ioc3_r_emcr() & ~EMCR_BUFSIZ);
- } else
- ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR;
+ ip->emcr |= EMCR_RAMPAR;
+ writel(readl(&regs->emcr) & ~EMCR_BUFSIZ, &regs->emcr);
+ } else {
+ ip->emcr |= EMCR_BUFSIZ | EMCR_RAMPAR;
+ }
}
static void ioc3_init(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
+ struct ioc3_ethregs *regs = ip->regs;
del_timer_sync(&ip->ioc3_timer); /* Kill if running */
- ioc3_w_emcr(EMCR_RST); /* Reset */
- (void) ioc3_r_emcr(); /* Flush WB */
+ writel(EMCR_RST, &regs->emcr); /* Reset */
+ readl(&regs->emcr); /* Flush WB */
udelay(4); /* Give it time ... */
- ioc3_w_emcr(0);
- (void) ioc3_r_emcr();
+ writel(0, &regs->emcr);
+ readl(&regs->emcr);
/* Misc registers */
-#ifdef CONFIG_SGI_IP27
- ioc3_w_erbar(PCI64_ATTR_BAR >> 32); /* Barrier on last store */
-#else
- ioc3_w_erbar(0); /* Let PCI API get it right */
-#endif
- (void) ioc3_r_etcdc(); /* Clear on read */
- ioc3_w_ercsr(15); /* RX low watermark */
- ioc3_w_ertr(0); /* Interrupt immediately */
+ writel(ERBAR_VAL, &regs->erbar);
+ readl(&regs->etcdc); /* Clear on read */
+ writel(15, &regs->ercsr); /* RX low watermark */
+ writel(0, &regs->ertr); /* Interrupt immediately */
__ioc3_set_mac_address(dev);
- ioc3_w_ehar_h(ip->ehar_h);
- ioc3_w_ehar_l(ip->ehar_l);
- ioc3_w_ersr(42); /* XXX should be random */
+ writel(ip->ehar_h, &regs->ehar_h);
+ writel(ip->ehar_l, &regs->ehar_l);
+ writel(42, &regs->ersr); /* XXX should be random */
+}
- ioc3_init_rings(dev);
+static void ioc3_start(struct ioc3_private *ip)
+{
+ struct ioc3_ethregs *regs = ip->regs;
+ unsigned long ring;
+
+ /* Now the rx ring base, consume & produce registers. */
+ ring = ioc3_map(ip->rxr_dma, PCI64_ATTR_PREC);
+ writel(ring >> 32, &regs->erbr_h);
+ writel(ring & 0xffffffff, &regs->erbr_l);
+ writel(ip->rx_ci << 3, &regs->ercir);
+ writel((ip->rx_pi << 3) | ERPIR_ARM, &regs->erpir);
+
+ ring = ioc3_map(ip->txr_dma, PCI64_ATTR_PREC);
+
+ ip->txqlen = 0; /* nothing queued */
+
+ /* Now the tx ring base, consume & produce registers. */
+ writel(ring >> 32, &regs->etbr_h);
+ writel(ring & 0xffffffff, &regs->etbr_l);
+ writel(ip->tx_pi << 7, &regs->etpir);
+ writel(ip->tx_ci << 7, &regs->etcir);
+ readl(&regs->etcir); /* Flush */
ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
- EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN;
- ioc3_w_emcr(ip->emcr);
- ioc3_w_eier(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
- EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
- EISR_TXEXPLICIT | EISR_TXMEMERR);
- (void) ioc3_r_eier();
+ EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN | EMCR_PADEN;
+ writel(ip->emcr, &regs->emcr);
+ writel(EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
+ EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
+ EISR_TXEXPLICIT | EISR_TXMEMERR, &regs->eier);
+ readl(&regs->eier);
}
static inline void ioc3_stop(struct ioc3_private *ip)
{
- struct ioc3 *ioc3 = ip->regs;
+ struct ioc3_ethregs *regs = ip->regs;
- ioc3_w_emcr(0); /* Shutup */
- ioc3_w_eier(0); /* Disable interrupts */
- (void) ioc3_r_eier(); /* Flush */
+ writel(0, &regs->emcr); /* Shutup */
+ writel(0, &regs->eier); /* Disable interrupts */
+ readl(&regs->eier); /* Flush */
}
static int ioc3_open(struct net_device *dev)
@@ -1038,14 +975,20 @@ static int ioc3_open(struct net_device *dev)
struct ioc3_private *ip = netdev_priv(dev);
if (request_irq(dev->irq, ioc3_interrupt, IRQF_SHARED, ioc3_str, dev)) {
- printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
+ netdev_err(dev, "Can't get irq %d\n", dev->irq);
return -EAGAIN;
}
ip->ehar_h = 0;
ip->ehar_l = 0;
+
ioc3_init(dev);
+ if (ioc3_alloc_rx_bufs(dev)) {
+ netdev_err(dev, "%s: rx buffer allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+ ioc3_start(ip);
ioc3_mii_start(ip);
netif_start_queue(dev);
@@ -1063,12 +1006,13 @@ static int ioc3_close(struct net_device *dev)
ioc3_stop(ip);
free_irq(dev->irq, dev);
- ioc3_free_rings(ip);
+ ioc3_free_rx_bufs(ip);
+ ioc3_clean_tx_ring(ip);
+
return 0;
}
-/*
- * MENET cards have four IOC3 chips, which are attached to two sets of
+/* MENET cards have four IOC3 chips, which are attached to two sets of
* PCI slot resources each: the primary connections are on slots
* 0..3 and the secondaries are on 4..7
*
@@ -1085,7 +1029,7 @@ static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot)
if (dev) {
if (dev->vendor == PCI_VENDOR_ID_SGI &&
- dev->device == PCI_DEVICE_ID_SGI_IOC3)
+ dev->device == PCI_DEVICE_ID_SGI_IOC3)
ret = 1;
pci_dev_put(dev);
}
@@ -1095,15 +1039,14 @@ static int ioc3_adjacent_is_ioc3(struct pci_dev *pdev, int slot)
static int ioc3_is_menet(struct pci_dev *pdev)
{
- return pdev->bus->parent == NULL &&
+ return !pdev->bus->parent &&
ioc3_adjacent_is_ioc3(pdev, 0) &&
ioc3_adjacent_is_ioc3(pdev, 1) &&
ioc3_adjacent_is_ioc3(pdev, 2);
}
#ifdef CONFIG_SERIAL_8250
-/*
- * Note about serial ports and consoles:
+/* Note about serial ports and consoles:
* For console output, everyone uses the IOC3 UARTA (offset 0x178)
* connected to the master node (look in ip27_setup_console() and
* ip27prom_console_write()).
@@ -1140,31 +1083,32 @@ static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
#define COSMISC_CONSTANT 6
struct uart_8250_port port = {
- .port = {
+ .port = {
.irq = 0,
.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF,
.iotype = UPIO_MEM,
.regshift = 0,
.uartclk = (22000000 << 1) / COSMISC_CONSTANT,
- .membase = (unsigned char __iomem *) uart,
- .mapbase = (unsigned long) uart,
- }
+ .membase = (unsigned char __iomem *)uart,
+ .mapbase = (unsigned long)uart,
+ }
};
unsigned char lcr;
- lcr = uart->iu_lcr;
- uart->iu_lcr = lcr | UART_LCR_DLAB;
- uart->iu_scr = COSMISC_CONSTANT,
- uart->iu_lcr = lcr;
- uart->iu_lcr;
+ lcr = readb(&uart->iu_lcr);
+ writeb(lcr | UART_LCR_DLAB, &uart->iu_lcr);
+ writeb(COSMISC_CONSTANT, &uart->iu_scr);
+ writeb(lcr, &uart->iu_lcr);
+ readb(&uart->iu_lcr);
serial8250_register_8250_port(&port);
}
static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
{
- /*
- * We need to recognice and treat the fourth MENET serial as it
+ u32 sio_iec;
+
+ /* We need to recognice and treat the fourth MENET serial as it
* does not have an SuperIO chip attached to it, therefore attempting
* to access it will result in bus errors. We call something an
* MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3
@@ -1175,33 +1119,34 @@ static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
return;
- /*
- * Switch IOC3 to PIO mode. It probably already was but let's be
+ /* Switch IOC3 to PIO mode. It probably already was but let's be
* paranoid
*/
- ioc3->gpcr_s = GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL;
- ioc3->gpcr_s;
- ioc3->gppr_6 = 0;
- ioc3->gppr_6;
- ioc3->gppr_7 = 0;
- ioc3->gppr_7;
- ioc3->sscr_a = ioc3->sscr_a & ~SSCR_DMA_EN;
- ioc3->sscr_a;
- ioc3->sscr_b = ioc3->sscr_b & ~SSCR_DMA_EN;
- ioc3->sscr_b;
+ writel(GPCR_UARTA_MODESEL | GPCR_UARTB_MODESEL, &ioc3->gpcr_s);
+ readl(&ioc3->gpcr_s);
+ writel(0, &ioc3->gppr[6]);
+ readl(&ioc3->gppr[6]);
+ writel(0, &ioc3->gppr[7]);
+ readl(&ioc3->gppr[7]);
+ writel(readl(&ioc3->port_a.sscr) & ~SSCR_DMA_EN, &ioc3->port_a.sscr);
+ readl(&ioc3->port_a.sscr);
+ writel(readl(&ioc3->port_b.sscr) & ~SSCR_DMA_EN, &ioc3->port_b.sscr);
+ readl(&ioc3->port_b.sscr);
/* Disable all SA/B interrupts except for SA/B_INT in SIO_IEC. */
- ioc3->sio_iec &= ~ (SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL |
- SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER |
- SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS |
- SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR);
- ioc3->sio_iec |= SIO_IR_SA_INT;
- ioc3->sscr_a = 0;
- ioc3->sio_iec &= ~ (SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL |
- SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER |
- SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS |
- SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR);
- ioc3->sio_iec |= SIO_IR_SB_INT;
- ioc3->sscr_b = 0;
+ sio_iec = readl(&ioc3->sio_iec);
+ sio_iec &= ~(SIO_IR_SA_TX_MT | SIO_IR_SA_RX_FULL |
+ SIO_IR_SA_RX_HIGH | SIO_IR_SA_RX_TIMER |
+ SIO_IR_SA_DELTA_DCD | SIO_IR_SA_DELTA_CTS |
+ SIO_IR_SA_TX_EXPLICIT | SIO_IR_SA_MEMERR);
+ sio_iec |= SIO_IR_SA_INT;
+ sio_iec &= ~(SIO_IR_SB_TX_MT | SIO_IR_SB_RX_FULL |
+ SIO_IR_SB_RX_HIGH | SIO_IR_SB_RX_TIMER |
+ SIO_IR_SB_DELTA_DCD | SIO_IR_SB_DELTA_CTS |
+ SIO_IR_SB_TX_EXPLICIT | SIO_IR_SB_MEMERR);
+ sio_iec |= SIO_IR_SB_INT;
+ writel(sio_iec, &ioc3->sio_iec);
+ writel(0, &ioc3->port_a.sscr);
+ writel(0, &ioc3->port_b.sscr);
ioc3_8250_register(&ioc3->sregs.uarta);
ioc3_8250_register(&ioc3->sregs.uartb);
@@ -1236,15 +1181,15 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_using_dac = 1;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err < 0) {
- printk(KERN_ERR "%s: Unable to obtain 64 bit DMA "
- "for consistent allocations\n", pci_name(pdev));
+ pr_err("%s: Unable to obtain 64 bit DMA for consistent allocations\n",
+ pci_name(pdev));
goto out;
}
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR "%s: No usable DMA configuration, "
- "aborting.\n", pci_name(pdev));
+ pr_err("%s: No usable DMA configuration, aborting.\n",
+ pci_name(pdev));
goto out;
}
pci_using_dac = 0;
@@ -1270,19 +1215,22 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ip = netdev_priv(dev);
ip->dev = dev;
+ ip->dma_dev = &pdev->dev;
dev->irq = pdev->irq;
ioc3_base = pci_resource_start(pdev, 0);
ioc3_size = pci_resource_len(pdev, 0);
- ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size);
+ ioc3 = (struct ioc3 *)ioremap(ioc3_base, ioc3_size);
if (!ioc3) {
- printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n",
+ pr_err("ioc3eth(%s): ioremap failed, goodbye.\n",
pci_name(pdev));
err = -ENOMEM;
goto out_res;
}
- ip->regs = ioc3;
+ ip->regs = &ioc3->eth;
+ ip->ssram = ioc3->ssram;
+ ip->all_regs = ioc3;
#ifdef CONFIG_SERIAL_8250
ioc3_serial_probe(pdev, ioc3);
@@ -1292,6 +1240,26 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&ip->ioc3_timer, ioc3_timer, 0);
ioc3_stop(ip);
+
+ /* Allocate rx ring. 4kb = 512 entries, must be 4kb aligned */
+ ip->rxr = dma_direct_alloc_pages(ip->dma_dev, RX_RING_SIZE,
+ &ip->rxr_dma, GFP_ATOMIC, 0);
+ if (!ip->rxr) {
+ pr_err("ioc3-eth: rx ring allocation failed\n");
+ err = -ENOMEM;
+ goto out_stop;
+ }
+
+ /* Allocate tx rings. 16kb = 128 bufs, must be 16kb aligned */
+ ip->txr = dma_direct_alloc_pages(ip->dma_dev, TX_RING_SIZE,
+ &ip->txr_dma,
+ GFP_KERNEL | __GFP_ZERO, 0);
+ if (!ip->txr) {
+ pr_err("ioc3-eth: tx ring allocation failed\n");
+ err = -ENOMEM;
+ goto out_stop;
+ }
+
ioc3_init(dev);
ip->pdev = pdev;
@@ -1305,7 +1273,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ioc3_mii_init(ip);
if (ip->mii.phy_id == -1) {
- printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
+ pr_err("ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
pci_name(pdev));
err = -ENODEV;
goto out_stop;
@@ -1335,24 +1303,27 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
vendor = (sw_physid1 << 12) | (sw_physid2 >> 4);
model = (sw_physid2 >> 4) & 0x3f;
rev = sw_physid2 & 0xf;
- printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, "
- "rev %d.\n", dev->name, ip->mii.phy_id, vendor, model, rev);
- printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name,
- ip->emcr & EMCR_BUFSIZ ? 128 : 64);
+ netdev_info(dev, "Using PHY %d, vendor 0x%x, model %d, rev %d.\n",
+ ip->mii.phy_id, vendor, model, rev);
+ netdev_info(dev, "IOC3 SSRAM has %d kbyte.\n",
+ ip->emcr & EMCR_BUFSIZ ? 128 : 64);
return 0;
out_stop:
- ioc3_stop(ip);
del_timer_sync(&ip->ioc3_timer);
- ioc3_free_rings(ip);
+ if (ip->rxr)
+ dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr,
+ ip->rxr_dma, 0);
+ if (ip->txr)
+ dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr,
+ ip->txr_dma, 0);
out_res:
pci_release_regions(pdev);
out_free:
free_netdev(dev);
out_disable:
- /*
- * We should call pci_disable_device(pdev); here if the IOC3 wasn't
+ /* We should call pci_disable_device(pdev); here if the IOC3 wasn't
* such a weird device ...
*/
out:
@@ -1363,16 +1334,19 @@ static void ioc3_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
+
+ dma_direct_free_pages(ip->dma_dev, RX_RING_SIZE, ip->rxr,
+ ip->rxr_dma, 0);
+ dma_direct_free_pages(ip->dma_dev, TX_RING_SIZE, ip->txr,
+ ip->txr_dma, 0);
unregister_netdev(dev);
del_timer_sync(&ip->ioc3_timer);
- iounmap(ioc3);
+ iounmap(ip->all_regs);
pci_release_regions(pdev);
free_netdev(dev);
- /*
- * We should call pci_disable_device(pdev); here if the IOC3 wasn't
+ /* We should call pci_disable_device(pdev); here if the IOC3 wasn't
* such a weird device ...
*/
}
@@ -1392,16 +1366,14 @@ static struct pci_driver ioc3_driver = {
static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- unsigned long data;
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
- unsigned int len;
struct ioc3_etxd *desc;
- uint32_t w0 = 0;
+ unsigned long data;
+ unsigned int len;
int produce;
+ u32 w0 = 0;
- /*
- * IOC3 has a fairly simple minded checksumming hardware which simply
+ /* IOC3 has a fairly simple minded checksumming hardware which simply
* adds up the 1's complement checksum for the entire packet and
* inserts it at an offset which can be specified in the descriptor
* into the transmit packet. This means we have to compensate for the
@@ -1412,25 +1384,23 @@ static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
const struct iphdr *ih = ip_hdr(skb);
const int proto = ntohs(ih->protocol);
unsigned int csoff;
- uint32_t csum, ehsum;
- uint16_t *eh;
+ u32 csum, ehsum;
+ u16 *eh;
/* The MAC header. skb->mac seem the logic approach
- to find the MAC header - except it's a NULL pointer ... */
- eh = (uint16_t *) skb->data;
+ * to find the MAC header - except it's a NULL pointer ...
+ */
+ eh = (u16 *)skb->data;
/* Sum up dest addr, src addr and protocol */
ehsum = eh[0] + eh[1] + eh[2] + eh[3] + eh[4] + eh[5] + eh[6];
- /* Fold ehsum. can't use csum_fold which negates also ... */
- ehsum = (ehsum & 0xffff) + (ehsum >> 16);
- ehsum = (ehsum & 0xffff) + (ehsum >> 16);
-
/* Skip IP header; it's sum is always zero and was
- already filled in by ip_output.c */
+ * already filled in by ip_output.c
+ */
csum = csum_tcpudp_nofold(ih->saddr, ih->daddr,
- ih->tot_len - (ih->ihl << 2),
- proto, 0xffff ^ ehsum);
+ ih->tot_len - (ih->ihl << 2),
+ proto, csum_fold(ehsum));
csum = (csum & 0xffff) + (csum >> 16); /* Fold again */
csum = (csum & 0xffff) + (csum >> 16);
@@ -1450,7 +1420,7 @@ static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irq(&ip->ioc3_lock);
- data = (unsigned long) skb->data;
+ data = (unsigned long)skb->data;
len = skb->len;
produce = ip->tx_pi;
@@ -1470,47 +1440,78 @@ static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long b2 = (data | 0x3fffUL) + 1UL;
unsigned long s1 = b2 - data;
unsigned long s2 = data + len - b2;
+ dma_addr_t d1, d2;
desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE |
- ETXD_B1V | ETXD_B2V | w0);
+ ETXD_B1V | ETXD_B2V | w0);
desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT) |
- (s2 << ETXD_B2CNT_SHIFT));
- desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
- desc->p2 = cpu_to_be64(ioc3_map((void *) b2, 1));
+ (s2 << ETXD_B2CNT_SHIFT));
+ d1 = dma_map_single(ip->dma_dev, skb->data, s1, DMA_TO_DEVICE);
+ if (dma_mapping_error(ip->dma_dev, d1))
+ goto drop_packet;
+ d2 = dma_map_single(ip->dma_dev, (void *)b2, s1, DMA_TO_DEVICE);
+ if (dma_mapping_error(ip->dma_dev, d2)) {
+ dma_unmap_single(ip->dma_dev, d1, len, DMA_TO_DEVICE);
+ goto drop_packet;
+ }
+ desc->p1 = cpu_to_be64(ioc3_map(d1, PCI64_ATTR_PREF));
+ desc->p2 = cpu_to_be64(ioc3_map(d2, PCI64_ATTR_PREF));
} else {
+ dma_addr_t d;
+
/* Normal sized packet that doesn't cross a page boundary. */
desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V | w0);
desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT);
- desc->p1 = cpu_to_be64(ioc3_map(skb->data, 1));
+ d = dma_map_single(ip->dma_dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ip->dma_dev, d))
+ goto drop_packet;
+ desc->p1 = cpu_to_be64(ioc3_map(d, PCI64_ATTR_PREF));
}
- BARRIER();
+ mb(); /* make sure all descriptor changes are visible */
ip->tx_skbs[produce] = skb; /* Remember skb */
- produce = (produce + 1) & 127;
+ produce = (produce + 1) & TX_RING_MASK;
ip->tx_pi = produce;
- ioc3_w_etpir(produce << 7); /* Fire ... */
+ writel(produce << 7, &ip->regs->etpir); /* Fire ... */
ip->txqlen++;
- if (ip->txqlen >= 127)
+ if (ip->txqlen >= (TX_RING_ENTRIES - 1))
netif_stop_queue(dev);
spin_unlock_irq(&ip->ioc3_lock);
return NETDEV_TX_OK;
+
+drop_packet:
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+
+ spin_unlock_irq(&ip->ioc3_lock);
+
+ return NETDEV_TX_OK;
}
static void ioc3_timeout(struct net_device *dev)
{
struct ioc3_private *ip = netdev_priv(dev);
- printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
spin_lock_irq(&ip->ioc3_lock);
ioc3_stop(ip);
+ ioc3_free_rx_bufs(ip);
+ ioc3_clean_tx_ring(ip);
+
ioc3_init(dev);
+ if (ioc3_alloc_rx_bufs(dev)) {
+ netdev_err(dev, "%s: rx buffer allocation failed\n", __func__);
+ spin_unlock_irq(&ip->ioc3_lock);
+ return;
+ }
+ ioc3_start(ip);
ioc3_mii_init(ip);
ioc3_mii_start(ip);
@@ -1519,16 +1520,14 @@ static void ioc3_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-/*
- * Given a multicast ethernet address, this routine calculates the
+/* Given a multicast ethernet address, this routine calculates the
* address's bit index in the logical address filter mask
*/
-
static inline unsigned int ioc3_hash(const unsigned char *addr)
{
unsigned int temp = 0;
- u32 crc;
int bits;
+ u32 crc;
crc = ether_crc_le(ETH_ALEN, addr);
@@ -1542,8 +1541,8 @@ static inline unsigned int ioc3_hash(const unsigned char *addr)
return temp;
}
-static void ioc3_get_drvinfo (struct net_device *dev,
- struct ethtool_drvinfo *info)
+static void ioc3_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
struct ioc3_private *ip = netdev_priv(dev);
@@ -1623,27 +1622,28 @@ static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void ioc3_set_multicast_list(struct net_device *dev)
{
- struct netdev_hw_addr *ha;
struct ioc3_private *ip = netdev_priv(dev);
- struct ioc3 *ioc3 = ip->regs;
+ struct ioc3_ethregs *regs = ip->regs;
+ struct netdev_hw_addr *ha;
u64 ehar = 0;
- netif_stop_queue(dev); /* Lock out others. */
+ spin_lock_irq(&ip->ioc3_lock);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
ip->emcr |= EMCR_PROMISC;
- ioc3_w_emcr(ip->emcr);
- (void) ioc3_r_emcr();
+ writel(ip->emcr, &regs->emcr);
+ readl(&regs->emcr);
} else {
ip->emcr &= ~EMCR_PROMISC;
- ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */
- (void) ioc3_r_emcr();
+ writel(ip->emcr, &regs->emcr); /* Clear promiscuous. */
+ readl(&regs->emcr);
if ((dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > 64)) {
/* Too many for hashing to make sense or we want all
- multicast packets anyway, so skip computing all the
- hashes and just accept all packets. */
+ * multicast packets anyway, so skip computing all the
+ * hashes and just accept all packets.
+ */
ip->ehar_h = 0xffffffff;
ip->ehar_l = 0xffffffff;
} else {
@@ -1653,11 +1653,11 @@ static void ioc3_set_multicast_list(struct net_device *dev)
ip->ehar_h = ehar >> 32;
ip->ehar_l = ehar & 0xffffffff;
}
- ioc3_w_ehar_h(ip->ehar_h);
- ioc3_w_ehar_l(ip->ehar_l);
+ writel(ip->ehar_h, &regs->ehar_h);
+ writel(ip->ehar_l, &regs->ehar_l);
}
- netif_wake_queue(dev); /* Let us get going again. */
+ spin_unlock_irq(&ip->ioc3_lock);
}
module_pci_driver(ioc3_driver);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 00660dd820e2..539bc5db989c 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -247,8 +247,7 @@ static void meth_free_tx_ring(struct meth_private *priv)
/* Remove any pending skb */
for (i = 0; i < TX_RING_ENTRIES; i++) {
- if (priv->tx_skbs[i])
- dev_kfree_skb(priv->tx_skbs[i]);
+ dev_kfree_skb(priv->tx_skbs[i]);
priv->tx_skbs[i] = NULL;
}
dma_free_coherent(&priv->pdev->dev, TX_RING_BUFFER_SIZE, priv->tx_ring,
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 6e07f5ebacfc..85eaccbbbac1 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -191,6 +191,8 @@ struct sis900_private {
unsigned int tx_full; /* The Tx queue is full. */
u8 host_bridge_rev;
u8 chipset_rev;
+ /* EEPROM data */
+ int eeprom_size;
};
MODULE_AUTHOR("Jim Huang <cmhuang@sis.com.tw>, Ollie Lho <ollie@sis.com.tw>");
@@ -475,6 +477,8 @@ static int sis900_probe(struct pci_dev *pci_dev,
sis_priv->pci_dev = pci_dev;
spin_lock_init(&sis_priv->lock);
+ sis_priv->eeprom_size = 24;
+
pci_set_drvdata(pci_dev, net_dev);
ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma);
@@ -2122,6 +2126,68 @@ static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *w
wol->supported = (WAKE_PHY | WAKE_MAGIC);
}
+static int sis900_get_eeprom_len(struct net_device *dev)
+{
+ struct sis900_private *sis_priv = netdev_priv(dev);
+
+ return sis_priv->eeprom_size;
+}
+
+static int sis900_read_eeprom(struct net_device *net_dev, u8 *buf)
+{
+ struct sis900_private *sis_priv = netdev_priv(net_dev);
+ void __iomem *ioaddr = sis_priv->ioaddr;
+ int wait, ret = -EAGAIN;
+ u16 signature;
+ u16 *ebuf = (u16 *)buf;
+ int i;
+
+ if (sis_priv->chipset_rev == SIS96x_900_REV) {
+ sw32(mear, EEREQ);
+ for (wait = 0; wait < 2000; wait++) {
+ if (sr32(mear) & EEGNT) {
+ /* read 16 bits, and index by 16 bits */
+ for (i = 0; i < sis_priv->eeprom_size / 2; i++)
+ ebuf[i] = (u16)read_eeprom(ioaddr, i);
+ ret = 0;
+ break;
+ }
+ udelay(1);
+ }
+ sw32(mear, EEDONE);
+ } else {
+ signature = (u16)read_eeprom(ioaddr, EEPROMSignature);
+ if (signature != 0xffff && signature != 0x0000) {
+ /* read 16 bits, and index by 16 bits */
+ for (i = 0; i < sis_priv->eeprom_size / 2; i++)
+ ebuf[i] = (u16)read_eeprom(ioaddr, i);
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
+#define SIS900_EEPROM_MAGIC 0xBABE
+static int sis900_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct sis900_private *sis_priv = netdev_priv(dev);
+ u8 *eebuf;
+ int res;
+
+ eebuf = kmalloc(sis_priv->eeprom_size, GFP_KERNEL);
+ if (!eebuf)
+ return -ENOMEM;
+
+ eeprom->magic = SIS900_EEPROM_MAGIC;
+ spin_lock_irq(&sis_priv->lock);
+ res = sis900_read_eeprom(dev, eebuf);
+ spin_unlock_irq(&sis_priv->lock);
+ if (!res)
+ memcpy(data, eebuf + eeprom->offset, eeprom->len);
+ kfree(eebuf);
+ return res;
+}
+
static const struct ethtool_ops sis900_ethtool_ops = {
.get_drvinfo = sis900_get_drvinfo,
.get_msglevel = sis900_get_msglevel,
@@ -2132,6 +2198,8 @@ static const struct ethtool_ops sis900_ethtool_ops = {
.set_wol = sis900_set_wol,
.get_link_ksettings = sis900_get_link_ksettings,
.set_link_ksettings = sis900_set_link_ksettings,
+ .get_eeprom_len = sis900_get_eeprom_len,
+ .get_eeprom = sis900_get_eeprom,
};
/**
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 601e76ad99a0..3a6761131f4c 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -378,8 +378,7 @@ static void smc_shutdown(struct net_device *dev)
pending_skb = lp->pending_tx_skb;
lp->pending_tx_skb = NULL;
spin_unlock_irq(&lp->lock);
- if (pending_skb)
- dev_kfree_skb(pending_skb);
+ dev_kfree_skb(pending_skb);
/* and tell the card to stay away from that nasty outside world */
SMC_SELECT_BANK(lp, 0);
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 51a7b48db4bc..10d0c3e478ab 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1553,7 +1553,6 @@ static int ave_probe(struct platform_device *pdev)
struct ave_private *priv;
struct net_device *ndev;
struct device_node *np;
- struct resource *res;
const void *mac_addr;
void __iomem *base;
const char *name;
@@ -1573,13 +1572,10 @@ static int ave_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "IRQ not found\n");
+ if (irq < 0)
return irq;
- }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 2325b40dff6e..338e25a6374e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -200,6 +200,7 @@ endif
config STMMAC_PCI
tristate "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
+ depends on COMMON_CLK
---help---
This selects the platform specific bus support for the stmmac driver.
This driver was tested on XLINX XC2V3000 FF1152AMT0221
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index ed872eed1cab..49aa56ca09cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -75,6 +75,7 @@ struct stmmac_extra_stats {
unsigned long rx_missed_cntr;
unsigned long rx_overflow_cntr;
unsigned long rx_vlan;
+ unsigned long rx_split_hdr_pkt_n;
/* Tx/Rx IRQ error info */
unsigned long tx_undeflow_irq;
unsigned long tx_process_stopped_irq;
@@ -354,6 +355,11 @@ struct dma_features {
unsigned int frpbs;
unsigned int frpes;
unsigned int addr64;
+ unsigned int rssen;
+ unsigned int vlhash;
+ unsigned int sphen;
+ unsigned int vlins;
+ unsigned int dvlan;
};
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
@@ -381,6 +387,16 @@ struct dma_features {
#define JUMBO_LEN 9000
+/* Receive Side Scaling */
+#define STMMAC_RSS_HASH_KEY_SIZE 40
+#define STMMAC_RSS_MAX_TABLE_SIZE 256
+
+/* VLAN */
+#define STMMAC_VLAN_NONE 0x0
+#define STMMAC_VLAN_REMOVE 0x1
+#define STMMAC_VLAN_INSERT 0x2
+#define STMMAC_VLAN_REPLACE 0x3
+
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 6ce3a7fb41ab..527f93320a5a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -62,12 +62,10 @@ static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
{
int phy_mode;
- struct resource *res;
void __iomem *ctl_block;
struct anarion_gmac *gmac;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ctl_block = devm_ioremap_resource(&pdev->dev, res);
+ ctl_block = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctl_block)) {
dev_err(&pdev->dev, "Cannot get reset region (%ld)!\n",
PTR_ERR(ctl_block));
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 3a14cdd01f5f..dd9967aeda22 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -333,6 +333,9 @@ static void *tegra_eqos_probe(struct platform_device *pdev,
usleep_range(2000, 4000);
gpiod_set_value(eqos->reset, 0);
+ /* MDIO bus was already reset just above */
+ data->mdio_bus_data->needs_reset = false;
+
eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
if (IS_ERR(eqos->rst)) {
err = PTR_ERR(eqos->rst);
@@ -415,7 +418,6 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
const struct dwc_eth_dwmac_data *data;
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
- struct resource *res;
void *priv;
int ret;
@@ -428,17 +430,11 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
* resource initialization is done in the glue logic.
*/
stmmac_res.irq = platform_get_irq(pdev, 0);
- if (stmmac_res.irq < 0) {
- if (stmmac_res.irq != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "IRQ configuration information not found\n");
-
+ if (stmmac_res.irq < 0)
return stmmac_res.irq;
- }
stmmac_res.wol_irq = stmmac_res.irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- stmmac_res.addr = devm_ioremap_resource(&pdev->dev, res);
+ stmmac_res.addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(stmmac_res.addr))
return PTR_ERR(stmmac_res.addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 88eb16954627..bbc16b5a410a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -46,7 +46,6 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
struct meson_dwmac *dwmac;
- struct resource *res;
int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
@@ -63,8 +62,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
+ dwmac->reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dwmac->reg)) {
ret = PTR_ERR(dwmac->reg);
goto err_remove_config_dt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 786ca4a7bf36..9cda29e4b89d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -308,7 +308,6 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
- struct resource *res;
struct meson8b_dwmac *dwmac;
int ret;
@@ -332,8 +331,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
ret = -EINVAL;
goto err_remove_config_dt;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dwmac->regs = devm_ioremap_resource(&pdev->dev, res);
+ dwmac->regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dwmac->regs)) {
ret = PTR_ERR(dwmac->regs);
goto err_remove_config_dt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 3174b701aa90..7357b8bdc128 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -32,6 +32,9 @@
#define XGMAC_CONFIG_ARPEN BIT(31)
#define XGMAC_CONFIG_GPSL GENMASK(29, 16)
#define XGMAC_CONFIG_GPSL_SHIFT 16
+#define XGMAC_CONFIG_HDSMS GENMASK(14, 12)
+#define XGMAC_CONFIG_HDSMS_SHIFT 12
+#define XGMAC_CONFIG_HDSMS_256 (0x2 << XGMAC_CONFIG_HDSMS_SHIFT)
#define XGMAC_CONFIG_S2KP BIT(11)
#define XGMAC_CONFIG_LM BIT(10)
#define XGMAC_CONFIG_IPC BIT(9)
@@ -44,6 +47,7 @@
#define XGMAC_CORE_INIT_RX 0
#define XGMAC_PACKET_FILTER 0x00000008
#define XGMAC_FILTER_RA BIT(31)
+#define XGMAC_FILTER_VTFE BIT(16)
#define XGMAC_FILTER_HPF BIT(10)
#define XGMAC_FILTER_PCF BIT(7)
#define XGMAC_FILTER_PM BIT(4)
@@ -51,6 +55,19 @@
#define XGMAC_FILTER_PR BIT(0)
#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
#define XGMAC_MAX_HASH_TABLE 8
+#define XGMAC_VLAN_TAG 0x00000050
+#define XGMAC_VLAN_EDVLP BIT(26)
+#define XGMAC_VLAN_VTHM BIT(25)
+#define XGMAC_VLAN_DOVLTC BIT(20)
+#define XGMAC_VLAN_ESVL BIT(18)
+#define XGMAC_VLAN_ETV BIT(16)
+#define XGMAC_VLAN_VID GENMASK(15, 0)
+#define XGMAC_VLAN_HASH_TABLE 0x00000058
+#define XGMAC_VLAN_INCL 0x00000060
+#define XGMAC_VLAN_VLTI BIT(20)
+#define XGMAC_VLAN_CSVL BIT(19)
+#define XGMAC_VLAN_VLC GENMASK(17, 16)
+#define XGMAC_VLAN_VLC_SHIFT 16
#define XGMAC_RXQ_CTRL0 0x000000a0
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
@@ -59,6 +76,7 @@
#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_PSRQ_SHIFT(x) ((x) * 8)
#define XGMAC_INT_STATUS 0x000000b0
+#define XGMAC_LPIIS BIT(5)
#define XGMAC_PMTIS BIT(4)
#define XGMAC_INT_EN 0x000000b4
#define XGMAC_TSIE BIT(12)
@@ -76,19 +94,34 @@
#define XGMAC_RWKPKTEN BIT(2)
#define XGMAC_MGKPKTEN BIT(1)
#define XGMAC_PWRDWN BIT(0)
+#define XGMAC_LPI_CTRL 0x000000d0
+#define XGMAC_TXCGE BIT(21)
+#define XGMAC_LPITXA BIT(19)
+#define XGMAC_PLS BIT(17)
+#define XGMAC_LPITXEN BIT(16)
+#define XGMAC_RLPIEX BIT(3)
+#define XGMAC_RLPIEN BIT(2)
+#define XGMAC_TLPIEX BIT(1)
+#define XGMAC_TLPIEN BIT(0)
+#define XGMAC_LPI_TIMER_CTRL 0x000000d4
#define XGMAC_HW_FEATURE0 0x0000011c
#define XGMAC_HWFEAT_SAVLANINS BIT(27)
#define XGMAC_HWFEAT_RXCOESEL BIT(16)
#define XGMAC_HWFEAT_TXCOESEL BIT(14)
+#define XGMAC_HWFEAT_EEESEL BIT(13)
#define XGMAC_HWFEAT_TSSEL BIT(12)
#define XGMAC_HWFEAT_AVSEL BIT(11)
#define XGMAC_HWFEAT_RAVSEL BIT(10)
#define XGMAC_HWFEAT_ARPOFFSEL BIT(9)
+#define XGMAC_HWFEAT_MMCSEL BIT(8)
#define XGMAC_HWFEAT_MGKSEL BIT(7)
#define XGMAC_HWFEAT_RWKSEL BIT(6)
+#define XGMAC_HWFEAT_VLHASH BIT(4)
#define XGMAC_HWFEAT_GMIISEL BIT(1)
#define XGMAC_HW_FEATURE1 0x00000120
+#define XGMAC_HWFEAT_RSSEN BIT(20)
#define XGMAC_HWFEAT_TSOEN BIT(18)
+#define XGMAC_HWFEAT_SPHEN BIT(17)
#define XGMAC_HWFEAT_ADDR64 GENMASK(15, 14)
#define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6)
#define XGMAC_HWFEAT_RXFIFOSIZE GENMASK(4, 0)
@@ -98,6 +131,16 @@
#define XGMAC_HWFEAT_RXCHCNT GENMASK(15, 12)
#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6)
#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0)
+#define XGMAC_HW_FEATURE3 0x00000128
+#define XGMAC_HWFEAT_ASP GENMASK(15, 14)
+#define XGMAC_HWFEAT_DVLAN BIT(13)
+#define XGMAC_HWFEAT_FRPES GENMASK(12, 11)
+#define XGMAC_HWFEAT_FRPPB GENMASK(10, 9)
+#define XGMAC_HWFEAT_FRPSEL BIT(3)
+#define XGMAC_MAC_DPP_FSM_INT_STATUS 0x00000150
+#define XGMAC_MAC_FSM_CONTROL 0x00000158
+#define XGMAC_PRTYEN BIT(1)
+#define XGMAC_TMOUTEN BIT(0)
#define XGMAC_MDIO_ADDR 0x00000200
#define XGMAC_MDIO_DATA 0x00000204
#define XGMAC_MDIO_C22P 0x00000220
@@ -108,14 +151,45 @@
#define XGMAC_DCS_SHIFT 16
#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8)
#define XGMAC_ARP_ADDR 0x00000c10
+#define XGMAC_RSS_CTRL 0x00000c80
+#define XGMAC_UDP4TE BIT(3)
+#define XGMAC_TCP4TE BIT(2)
+#define XGMAC_IP2TE BIT(1)
+#define XGMAC_RSSE BIT(0)
+#define XGMAC_RSS_ADDR 0x00000c88
+#define XGMAC_RSSIA_SHIFT 8
+#define XGMAC_ADDRT BIT(2)
+#define XGMAC_CT BIT(1)
+#define XGMAC_OB BIT(0)
+#define XGMAC_RSS_DATA 0x00000c8c
#define XGMAC_TIMESTAMP_STATUS 0x00000d20
#define XGMAC_TXTSC BIT(15)
#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30
#define XGMAC_TXTSSTSLO GENMASK(30, 0)
#define XGMAC_TXTIMESTAMP_SEC 0x00000d34
+#define XGMAC_PPS_CONTROL 0x00000d70
+#define XGMAC_PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
+#define XGMAC_PPS_MINIDX(x) ((x) * 8)
+#define XGMAC_PPSx_MASK(x) \
+ GENMASK(XGMAC_PPS_MAXIDX(x), XGMAC_PPS_MINIDX(x))
+#define XGMAC_TRGTMODSELx(x, val) \
+ GENMASK(XGMAC_PPS_MAXIDX(x) - 1, XGMAC_PPS_MAXIDX(x) - 2) & \
+ ((val) << (XGMAC_PPS_MAXIDX(x) - 2))
+#define XGMAC_PPSCMDx(x, val) \
+ GENMASK(XGMAC_PPS_MINIDX(x) + 3, XGMAC_PPS_MINIDX(x)) & \
+ ((val) << XGMAC_PPS_MINIDX(x))
+#define XGMAC_PPSCMD_START 0x2
+#define XGMAC_PPSCMD_STOP 0x5
+#define XGMAC_PPSEN0 BIT(4)
+#define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10)
+#define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10)
+#define XGMAC_TRGTBUSY0 BIT(31)
+#define XGMAC_PPSx_INTERVAL(x) (0x00000d88 + (x) * 0x10)
+#define XGMAC_PPSx_WIDTH(x) (0x00000d8c + (x) * 0x10)
/* MTL Registers */
#define XGMAC_MTL_OPMODE 0x00001000
+#define XGMAC_FRPE BIT(15)
#define XGMAC_ETSALG GENMASK(6, 5)
#define XGMAC_WRR (0x0 << 5)
#define XGMAC_WFQ (0x1 << 5)
@@ -124,8 +198,32 @@
#define XGMAC_MTL_INT_STATUS 0x00001020
#define XGMAC_MTL_RXQ_DMA_MAP0 0x00001030
#define XGMAC_MTL_RXQ_DMA_MAP1 0x00001034
-#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 3, (x) * 8)
+#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_QxMDMACH_SHIFT(x) ((x) * 8)
+#define XGMAC_QDDMACH BIT(7)
+#define XGMAC_TC_PRTY_MAP0 0x00001040
+#define XGMAC_TC_PRTY_MAP1 0x00001044
+#define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8)
+#define XGMAC_PSTC_SHIFT(x) ((x) * 8)
+#define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0
+#define XGMAC_RXPI BIT(31)
+#define XGMAC_NPE GENMASK(23, 16)
+#define XGMAC_NVE GENMASK(7, 0)
+#define XGMAC_MTL_RXP_IACC_CTRL_ST 0x000010b0
+#define XGMAC_STARTBUSY BIT(31)
+#define XGMAC_WRRDN BIT(16)
+#define XGMAC_ADDR GENMASK(9, 0)
+#define XGMAC_MTL_RXP_IACC_DATA 0x000010b4
+#define XGMAC_MTL_ECC_CONTROL 0x000010c0
+#define XGMAC_MTL_SAFETY_INT_STATUS 0x000010c4
+#define XGMAC_MEUIS BIT(1)
+#define XGMAC_MECIS BIT(0)
+#define XGMAC_MTL_ECC_INT_ENABLE 0x000010c8
+#define XGMAC_RPCEIE BIT(12)
+#define XGMAC_ECEIE BIT(8)
+#define XGMAC_RXCEIE BIT(4)
+#define XGMAC_TXCEIE BIT(0)
+#define XGMAC_MTL_ECC_INT_STATUS 0x000010cc
#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
#define XGMAC_TQS GENMASK(25, 16)
#define XGMAC_TQS_SHIFT 16
@@ -164,6 +262,7 @@
#define XGMAC_RXOVFIS BIT(16)
#define XGMAC_ABPSIS BIT(1)
#define XGMAC_TXUNFIS BIT(0)
+#define XGMAC_MAC_REGSIZE (XGMAC_MTL_QINT_STATUS(15) / 4)
/* DMA Registers */
#define XGMAC_DMA_MODE 0x00003000
@@ -190,7 +289,18 @@
#define XGMAC_TDPS GENMASK(29, 0)
#define XGMAC_RX_EDMA_CTRL 0x00003044
#define XGMAC_RDPS GENMASK(29, 0)
+#define XGMAC_DMA_SAFETY_INT_STATUS 0x00003064
+#define XGMAC_MCSIS BIT(31)
+#define XGMAC_MSUIS BIT(29)
+#define XGMAC_MSCIS BIT(28)
+#define XGMAC_DEUIS BIT(1)
+#define XGMAC_DECIS BIT(0)
+#define XGMAC_DMA_ECC_INT_ENABLE 0x00003068
+#define XGMAC_DCEIE BIT(1)
+#define XGMAC_TCEIE BIT(0)
+#define XGMAC_DMA_ECC_INT_STATUS 0x0000306c
#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
+#define XGMAC_SPH BIT(24)
#define XGMAC_PBLx8 BIT(16)
#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
#define XGMAC_TxPBL GENMASK(21, 16)
@@ -230,12 +340,17 @@
#define XGMAC_TBU BIT(2)
#define XGMAC_TPS BIT(1)
#define XGMAC_TI BIT(0)
+#define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4)
/* Descriptors */
+#define XGMAC_TDES2_IVT GENMASK(31, 16)
+#define XGMAC_TDES2_IVT_SHIFT 16
#define XGMAC_TDES2_IOC BIT(31)
#define XGMAC_TDES2_TTSE BIT(30)
#define XGMAC_TDES2_B2L GENMASK(29, 16)
#define XGMAC_TDES2_B2L_SHIFT 16
+#define XGMAC_TDES2_VTIR GENMASK(15, 14)
+#define XGMAC_TDES2_VTIR_SHIFT 14
#define XGMAC_TDES2_B1L GENMASK(13, 0)
#define XGMAC_TDES3_OWN BIT(31)
#define XGMAC_TDES3_CTXT BIT(30)
@@ -244,18 +359,33 @@
#define XGMAC_TDES3_CPC GENMASK(27, 26)
#define XGMAC_TDES3_CPC_SHIFT 26
#define XGMAC_TDES3_TCMSSV BIT(26)
+#define XGMAC_TDES3_SAIC GENMASK(25, 23)
+#define XGMAC_TDES3_SAIC_SHIFT 23
#define XGMAC_TDES3_THL GENMASK(22, 19)
#define XGMAC_TDES3_THL_SHIFT 19
+#define XGMAC_TDES3_IVTIR GENMASK(19, 18)
+#define XGMAC_TDES3_IVTIR_SHIFT 18
#define XGMAC_TDES3_TSE BIT(18)
+#define XGMAC_TDES3_IVLTV BIT(17)
#define XGMAC_TDES3_CIC GENMASK(17, 16)
#define XGMAC_TDES3_CIC_SHIFT 16
#define XGMAC_TDES3_TPL GENMASK(17, 0)
+#define XGMAC_TDES3_VLTV BIT(16)
+#define XGMAC_TDES3_VT GENMASK(15, 0)
#define XGMAC_TDES3_FL GENMASK(14, 0)
+#define XGMAC_RDES2_HL GENMASK(9, 0)
#define XGMAC_RDES3_OWN BIT(31)
#define XGMAC_RDES3_CTXT BIT(30)
#define XGMAC_RDES3_IOC BIT(30)
#define XGMAC_RDES3_LD BIT(28)
#define XGMAC_RDES3_CDA BIT(27)
+#define XGMAC_RDES3_RSV BIT(26)
+#define XGMAC_RDES3_L34T GENMASK(23, 20)
+#define XGMAC_RDES3_L34T_SHIFT 20
+#define XGMAC_L34T_IP4TCP 0x1
+#define XGMAC_L34T_IP4UDP 0x2
+#define XGMAC_L34T_IP6TCP 0x9
+#define XGMAC_L34T_IP6UDP 0xA
#define XGMAC_RDES3_ES BIT(15)
#define XGMAC_RDES3_PL GENMASK(13, 0)
#define XGMAC_RDES3_TSD BIT(6)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 85c68b7ee8c6..e534a3aaf4a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -6,7 +6,9 @@
#include <linux/bitrev.h>
#include <linux/crc32.h>
+#include <linux/iopoll.h>
#include "stmmac.h"
+#include "stmmac_ptp.h"
#include "dwxgmac2.h"
static void dwxgmac2_core_init(struct mac_device_info *hw,
@@ -118,6 +120,23 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
writel(value, ioaddr + reg);
}
+static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value, reg;
+
+ reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
+ if (queue >= 4)
+ queue -= 4;
+
+ value = readl(ioaddr + reg);
+ value &= ~XGMAC_PSTC(queue);
+ value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
+
+ writel(value, ioaddr + reg);
+}
+
static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
u32 rx_alg)
{
@@ -144,7 +163,9 @@ static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
u32 tx_alg)
{
void __iomem *ioaddr = hw->pcsr;
+ bool ets = true;
u32 value;
+ int i;
value = readl(ioaddr + XGMAC_MTL_OPMODE);
value &= ~XGMAC_ETSALG;
@@ -160,10 +181,28 @@ static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
value |= XGMAC_DWRR;
break;
default:
+ ets = false;
break;
}
writel(value, ioaddr + XGMAC_MTL_OPMODE);
+
+ /* Set ETS if desired */
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
+ value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
+ value &= ~XGMAC_TSA;
+ if (ets)
+ value |= XGMAC_ETS;
+ writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
+ }
+}
+
+static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
+ u32 weight, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
}
static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
@@ -200,11 +239,21 @@ static void dwxgmac2_config_cbs(struct mac_device_info *hw,
writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
}
+static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int i;
+
+ for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
+ reg_space[i] = readl(ioaddr + i * 4);
+}
+
static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
struct stmmac_extra_stats *x)
{
void __iomem *ioaddr = hw->pcsr;
u32 stat, en;
+ int ret = 0;
en = readl(ioaddr + XGMAC_INT_EN);
stat = readl(ioaddr + XGMAC_INT_STATUS);
@@ -216,7 +265,24 @@ static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
readl(ioaddr + XGMAC_PMT);
}
- return 0;
+ if (stat & XGMAC_LPIIS) {
+ u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
+
+ if (lpi & XGMAC_TLPIEN) {
+ ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
+ x->irq_tx_path_in_lpi_mode_n++;
+ }
+ if (lpi & XGMAC_TLPIEX) {
+ ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
+ x->irq_tx_path_exit_lpi_mode_n++;
+ }
+ if (lpi & XGMAC_RLPIEN)
+ x->irq_rx_path_in_lpi_mode_n++;
+ if (lpi & XGMAC_RLPIEX)
+ x->irq_rx_path_exit_lpi_mode_n++;
+ }
+
+ return ret;
}
static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
@@ -309,6 +375,53 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
addr[5] = (hi_addr >> 8) & 0xff;
}
+static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
+ bool en_tx_lpi_clockgating)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_LPI_CTRL);
+
+ value |= XGMAC_LPITXEN | XGMAC_LPITXA;
+ if (en_tx_lpi_clockgating)
+ value |= XGMAC_TXCGE;
+
+ writel(value, ioaddr + XGMAC_LPI_CTRL);
+}
+
+static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_LPI_CTRL);
+ value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
+ writel(value, ioaddr + XGMAC_LPI_CTRL);
+}
+
+static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_LPI_CTRL);
+ if (link)
+ value |= XGMAC_PLS;
+ else
+ value &= ~XGMAC_PLS;
+ writel(value, ioaddr + XGMAC_LPI_CTRL);
+}
+
+static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
+ writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
+}
+
static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
int mcbitslog2)
{
@@ -402,36 +515,694 @@ static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
+static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
+ u32 val)
+{
+ u32 ctrl = 0;
+
+ writel(val, ioaddr + XGMAC_RSS_DATA);
+ ctrl |= idx << XGMAC_RSSIA_SHIFT;
+ ctrl |= is_key ? XGMAC_ADDRT : 0x0;
+ ctrl |= XGMAC_OB;
+ writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
+
+ return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
+ !(ctrl & XGMAC_OB), 100, 10000);
+}
+
+static int dwxgmac2_rss_configure(struct mac_device_info *hw,
+ struct stmmac_rss *cfg, u32 num_rxq)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 *key = (u32 *)cfg->key;
+ int i, ret;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_RSS_CTRL);
+ if (!cfg->enable) {
+ value &= ~XGMAC_RSSE;
+ writel(value, ioaddr + XGMAC_RSS_CTRL);
+ return 0;
+ }
+
+ for (i = 0; i < (sizeof(cfg->key) / sizeof(u32)); i++) {
+ ret = dwxgmac2_rss_write_reg(ioaddr, true, i, *key++);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
+ ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < num_rxq; i++)
+ dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
+
+ value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
+ writel(value, ioaddr + XGMAC_RSS_CTRL);
+ return 0;
+}
+
+static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+ bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
+
+ if (hash) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
+ if (is_double) {
+ value |= XGMAC_VLAN_EDVLP;
+ value |= XGMAC_VLAN_ESVL;
+ value |= XGMAC_VLAN_DOVLTC;
+ }
+
+ writel(value, ioaddr + XGMAC_VLAN_TAG);
+ } else {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value &= ~XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + XGMAC_VLAN_TAG);
+
+ value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
+ value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
+ value &= ~XGMAC_VLAN_DOVLTC;
+ value &= ~XGMAC_VLAN_VID;
+
+ writel(value, ioaddr + XGMAC_VLAN_TAG);
+ }
+}
+
+struct dwxgmac3_error_desc {
+ bool valid;
+ const char *desc;
+ const char *detailed_desc;
+};
+
+#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
+
+static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
+ const char *module_name,
+ const struct dwxgmac3_error_desc *desc,
+ unsigned long field_offset,
+ struct stmmac_safety_stats *stats)
+{
+ unsigned long loc, mask;
+ u8 *bptr = (u8 *)stats;
+ unsigned long *ptr;
+
+ ptr = (unsigned long *)(bptr + field_offset);
+
+ mask = value;
+ for_each_set_bit(loc, &mask, 32) {
+ netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
+ "correctable" : "uncorrectable", module_name,
+ desc[loc].desc, desc[loc].detailed_desc);
+
+ /* Update counters */
+ ptr[loc]++;
+ }
+}
+
+static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
+ { true, "ATPES", "Application Transmit Interface Parity Check Error" },
+ { true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
+ { true, "TPES", "TSO Data Path Parity Check Error" },
+ { true, "TSOPES", "TSO Header Data Path Parity Check Error" },
+ { true, "MTPES", "MTL Data Path Parity Check Error" },
+ { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
+ { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
+ { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
+ { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
+ { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
+ { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
+ { true, "CWPES", "CSR Write Data Path Parity Check Error" },
+ { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
+ { true, "TTES", "TX FSM Timeout Error" },
+ { true, "RTES", "RX FSM Timeout Error" },
+ { true, "CTES", "CSR FSM Timeout Error" },
+ { true, "ATES", "APP FSM Timeout Error" },
+ { true, "PTES", "PTP FSM Timeout Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { true, "MSTTES", "Master Read/Write Timeout Error" },
+ { true, "SLVTES", "Slave Read/Write Timeout Error" },
+ { true, "ATITES", "Application Timeout on ATI Interface Error" },
+ { true, "ARITES", "Application Timeout on ARI Interface Error" },
+ { true, "FSMPES", "FSM State Parity Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { true, "CPI", "Control Register Parity Check Error" },
+};
+
+static void dwxgmac3_handle_mac_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
+ writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, correctable, "MAC",
+ dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
+}
+
+static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
+ { true, "TXCES", "MTL TX Memory Error" },
+ { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
+ { true, "TXUES", "MTL TX Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 3 */
+ { true, "RXCES", "MTL RX Memory Error" },
+ { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
+ { true, "RXUES", "MTL RX Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 7 */
+ { true, "ECES", "MTL EST Memory Error" },
+ { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
+ { true, "EUES", "MTL EST Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 11 */
+ { true, "RPCES", "MTL RX Parser Memory Error" },
+ { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
+ { true, "RPUES", "MTL RX Parser Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 15 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 16 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 17 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 21 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 22 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 23 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 24 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 25 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
+};
+
+static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
+ writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, correctable, "MTL",
+ dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
+}
+
+static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
+ { true, "TCES", "DMA TSO Memory Error" },
+ { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
+ { true, "TUES", "DMA TSO Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 3 */
+ { true, "DCES", "DMA DCACHE Memory Error" },
+ { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
+ { true, "DUES", "DMA DCACHE Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 7 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 8 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 9 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 10 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 11 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 12 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 13 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 14 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 15 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 16 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 17 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 21 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 22 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 23 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 24 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 25 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
+};
+
+static void dwxgmac3_handle_dma_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
+ writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, correctable, "DMA",
+ dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
+}
+
+static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
+{
+ u32 value;
+
+ if (!asp)
+ return -EINVAL;
+
+ /* 1. Enable Safety Features */
+ writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
+
+ /* 2. Enable MTL Safety Interrupts */
+ value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
+ value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
+ value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
+ value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
+ value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
+ writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
+
+ /* 3. Enable DMA Safety Interrupts */
+ value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
+ value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
+ value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
+ writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
+
+ /* Only ECC Protection for External Memory feature is selected */
+ if (asp <= 0x1)
+ return 0;
+
+ /* 4. Enable Parity and Timeout for FSM */
+ value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
+ value |= XGMAC_PRTYEN; /* FSM Parity Feature */
+ value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
+ writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
+
+ return 0;
+}
+
+static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
+ void __iomem *ioaddr,
+ unsigned int asp,
+ struct stmmac_safety_stats *stats)
+{
+ bool err, corr;
+ u32 mtl, dma;
+ int ret = 0;
+
+ if (!asp)
+ return -EINVAL;
+
+ mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
+ dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
+
+ err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
+ corr = false;
+ if (err) {
+ dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
+ (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
+ corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
+ if (err) {
+ dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
+ corr = dma & XGMAC_DECIS;
+ if (err) {
+ dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ return ret;
+}
+
+static const struct dwxgmac3_error {
+ const struct dwxgmac3_error_desc *desc;
+} dwxgmac3_all_errors[] = {
+ { dwxgmac3_mac_errors },
+ { dwxgmac3_mtl_errors },
+ { dwxgmac3_dma_errors },
+};
+
+static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
+ int index, unsigned long *count,
+ const char **desc)
+{
+ int module = index / 32, offset = index % 32;
+ unsigned long *ptr = (unsigned long *)stats;
+
+ if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
+ return -EINVAL;
+ if (!dwxgmac3_all_errors[module].desc[offset].valid)
+ return -EINVAL;
+ if (count)
+ *count = *(ptr + index);
+ if (desc)
+ *desc = dwxgmac3_all_errors[module].desc[offset].desc;
+ return 0;
+}
+
+static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
+{
+ u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
+
+ val &= ~XGMAC_FRPE;
+ writel(val, ioaddr + XGMAC_MTL_OPMODE);
+
+ return 0;
+}
+
+static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
+{
+ u32 val;
+
+ val = readl(ioaddr + XGMAC_MTL_OPMODE);
+ val |= XGMAC_FRPE;
+ writel(val, ioaddr + XGMAC_MTL_OPMODE);
+}
+
+static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
+ struct stmmac_tc_entry *entry,
+ int pos)
+{
+ int ret, i;
+
+ for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
+ int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
+ u32 val;
+
+ /* Wait for ready */
+ ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
+ val, !(val & XGMAC_STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+
+ /* Write data */
+ val = *((u32 *)&entry->val + i);
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
+
+ /* Write pos */
+ val = real_pos & XGMAC_ADDR;
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
+
+ /* Write OP */
+ val |= XGMAC_WRRDN;
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
+
+ /* Start Write */
+ val |= XGMAC_STARTBUSY;
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
+
+ /* Wait for done */
+ ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
+ val, !(val & XGMAC_STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct stmmac_tc_entry *
+dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
+ unsigned int count, u32 curr_prio)
+{
+ struct stmmac_tc_entry *entry;
+ u32 min_prio = ~0x0;
+ int i, min_prio_idx;
+ bool found = false;
+
+ for (i = count - 1; i >= 0; i--) {
+ entry = &entries[i];
+
+ /* Do not update unused entries */
+ if (!entry->in_use)
+ continue;
+ /* Do not update already updated entries (i.e. fragments) */
+ if (entry->in_hw)
+ continue;
+ /* Let last entry be updated last */
+ if (entry->is_last)
+ continue;
+ /* Do not return fragments */
+ if (entry->is_frag)
+ continue;
+ /* Check if we already checked this prio */
+ if (entry->prio < curr_prio)
+ continue;
+ /* Check if this is the minimum prio */
+ if (entry->prio < min_prio) {
+ min_prio = entry->prio;
+ min_prio_idx = i;
+ found = true;
+ }
+ }
+
+ if (found)
+ return &entries[min_prio_idx];
+ return NULL;
+}
+
+static int dwxgmac3_rxp_config(void __iomem *ioaddr,
+ struct stmmac_tc_entry *entries,
+ unsigned int count)
+{
+ struct stmmac_tc_entry *entry, *frag;
+ int i, ret, nve = 0;
+ u32 curr_prio = 0;
+ u32 old_val, val;
+
+ /* Force disable RX */
+ old_val = readl(ioaddr + XGMAC_RX_CONFIG);
+ val = old_val & ~XGMAC_CONFIG_RE;
+ writel(val, ioaddr + XGMAC_RX_CONFIG);
+
+ /* Disable RX Parser */
+ ret = dwxgmac3_rxp_disable(ioaddr);
+ if (ret)
+ goto re_enable;
+
+ /* Set all entries as NOT in HW */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ entry->in_hw = false;
+ }
+
+ /* Update entries by reverse order */
+ while (1) {
+ entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
+ if (!entry)
+ break;
+
+ curr_prio = entry->prio;
+ frag = entry->frag_ptr;
+
+ /* Set special fragment requirements */
+ if (frag) {
+ entry->val.af = 0;
+ entry->val.rf = 0;
+ entry->val.nc = 1;
+ entry->val.ok_index = nve + 2;
+ }
+
+ ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ entry->in_hw = true;
+
+ if (frag && !frag->in_hw) {
+ ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
+ if (ret)
+ goto re_enable;
+ frag->table_pos = nve++;
+ frag->in_hw = true;
+ }
+ }
+
+ if (!nve)
+ goto re_enable;
+
+ /* Update all pass entry */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ if (!entry->is_last)
+ continue;
+
+ ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ }
+
+ /* Assume n. of parsable entries == n. of valid entries */
+ val = (nve << 16) & XGMAC_NPE;
+ val |= nve & XGMAC_NVE;
+ writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
+
+ /* Enable RX Parser */
+ dwxgmac3_rxp_enable(ioaddr);
+
+re_enable:
+ /* Re-enable RX */
+ writel(old_val, ioaddr + XGMAC_RX_CONFIG);
+ return ret;
+}
+
+static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
+ value, value & XGMAC_TXTSC, 100, 10000))
+ return -EBUSY;
+
+ *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
+ *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
+ return 0;
+}
+
+static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
+ struct stmmac_pps_cfg *cfg, bool enable,
+ u32 sub_second_inc, u32 systime_flags)
+{
+ u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
+ u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
+ u64 period;
+
+ if (!cfg->available)
+ return -EINVAL;
+ if (tnsec & XGMAC_TRGTBUSY0)
+ return -EBUSY;
+ if (!sub_second_inc || !systime_flags)
+ return -EINVAL;
+
+ val &= ~XGMAC_PPSx_MASK(index);
+
+ if (!enable) {
+ val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
+ writel(val, ioaddr + XGMAC_PPS_CONTROL);
+ return 0;
+ }
+
+ val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
+ val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
+ val |= XGMAC_PPSEN0;
+
+ writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
+
+ if (!(systime_flags & PTP_TCR_TSCTRLSSR))
+ cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
+ writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
+
+ period = cfg->period.tv_sec * 1000000000;
+ period += cfg->period.tv_nsec;
+
+ do_div(period, sub_second_inc);
+
+ if (period <= 1)
+ return -EINVAL;
+
+ writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
+
+ period >>= 1;
+ if (period <= 1)
+ return -EINVAL;
+
+ writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
+
+ /* Finally, activate it */
+ writel(val, ioaddr + XGMAC_PPS_CONTROL);
+ return 0;
+}
+
+static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
+{
+ u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
+
+ value &= ~XGMAC_CONFIG_SARC;
+ value |= val << XGMAC_CONFIG_SARC_SHIFT;
+
+ writel(value, ioaddr + XGMAC_TX_CONFIG);
+}
+
+static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_VLAN_INCL);
+ value |= XGMAC_VLAN_VLTI;
+ value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
+ value &= ~XGMAC_VLAN_VLC;
+ value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
+ writel(value, ioaddr + XGMAC_VLAN_INCL);
+}
+
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
.rx_queue_prio = dwxgmac2_rx_queue_prio,
- .tx_queue_prio = NULL,
+ .tx_queue_prio = dwxgmac2_tx_queue_prio,
.rx_queue_routing = NULL,
.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
- .set_mtl_tx_queue_weight = NULL,
+ .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
.config_cbs = dwxgmac2_config_cbs,
- .dump_regs = NULL,
+ .dump_regs = dwxgmac2_dump_regs,
.host_irq_status = dwxgmac2_host_irq_status,
.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
.flow_ctrl = dwxgmac2_flow_ctrl,
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = NULL,
- .reset_eee_mode = NULL,
- .set_eee_timer = NULL,
- .set_eee_pls = NULL,
+ .set_eee_mode = dwxgmac2_set_eee_mode,
+ .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_eee_timer = dwxgmac2_set_eee_timer,
+ .set_eee_pls = dwxgmac2_set_eee_pls,
.pcs_ctrl_ane = NULL,
.pcs_rane = NULL,
.pcs_get_adv_lp = NULL,
.debug = NULL,
.set_filter = dwxgmac2_set_filter,
+ .safety_feat_config = dwxgmac3_safety_feat_config,
+ .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
+ .safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback,
+ .rss_configure = dwxgmac2_rss_configure,
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .rxp_config = dwxgmac3_rxp_config,
+ .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
+ .flex_pps_config = dwxgmac2_flex_pps_config,
+ .sarc_configure = dwxgmac2_sarc_configure,
+ .enable_vlan = dwxgmac2_enable_vlan,
};
int dwxgmac2_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index c4c45402b8f8..ae48154f933c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -26,16 +26,17 @@ static int dwxgmac2_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p)
{
unsigned int rdes3 = le32_to_cpu(p->des3);
- int ret = good_frame;
if (unlikely(rdes3 & XGMAC_RDES3_OWN))
return dma_own;
+ if (unlikely(rdes3 & XGMAC_RDES3_CTXT))
+ return discard_frame;
if (likely(!(rdes3 & XGMAC_RDES3_LD)))
+ return rx_not_ls;
+ if (unlikely((rdes3 & XGMAC_RDES3_ES) && (rdes3 & XGMAC_RDES3_LD)))
return discard_frame;
- if (unlikely(rdes3 & XGMAC_RDES3_ES))
- ret = discard_frame;
- return ret;
+ return good_frame;
}
static int dwxgmac2_get_tx_len(struct dma_desc *p)
@@ -55,7 +56,7 @@ static void dwxgmac2_set_tx_owner(struct dma_desc *p)
static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 = cpu_to_le32(XGMAC_RDES3_OWN);
+ p->des3 |= cpu_to_le32(XGMAC_RDES3_OWN);
if (!disable_rx_ic)
p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC);
@@ -98,11 +99,17 @@ static int dwxgmac2_rx_check_timestamp(void *desc)
unsigned int rdes3 = le32_to_cpu(p->des3);
bool desc_valid, ts_valid;
+ dma_rmb();
+
desc_valid = !(rdes3 & XGMAC_RDES3_OWN) && (rdes3 & XGMAC_RDES3_CTXT);
ts_valid = !(rdes3 & XGMAC_RDES3_TSD) && (rdes3 & XGMAC_RDES3_TSA);
- if (likely(desc_valid && ts_valid))
+ if (likely(desc_valid && ts_valid)) {
+ if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+ return -EINVAL;
return 0;
+ }
+
return -EINVAL;
}
@@ -113,13 +120,10 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
unsigned int rdes3 = le32_to_cpu(p->des3);
int ret = -EBUSY;
- if (likely(rdes3 & XGMAC_RDES3_CDA)) {
+ if (likely(rdes3 & XGMAC_RDES3_CDA))
ret = dwxgmac2_rx_check_timestamp(next_desc);
- if (ret)
- return ret;
- }
- return ret;
+ return !ret;
}
static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
@@ -144,7 +148,7 @@ static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L);
- tdes3 = tot_pkt_len & XGMAC_TDES3_FL;
+ tdes3 |= tot_pkt_len & XGMAC_TDES3_FL;
if (is_fs)
tdes3 |= XGMAC_TDES3_FD;
else
@@ -254,6 +258,86 @@ static void dwxgmac2_clear(struct dma_desc *p)
p->des3 = 0;
}
+static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
+ enum pkt_hash_types *type)
+{
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 ptype;
+
+ if (rdes3 & XGMAC_RDES3_RSV) {
+ ptype = (rdes3 & XGMAC_RDES3_L34T) >> XGMAC_RDES3_L34T_SHIFT;
+
+ switch (ptype) {
+ case XGMAC_L34T_IP4TCP:
+ case XGMAC_L34T_IP4UDP:
+ case XGMAC_L34T_IP6TCP:
+ case XGMAC_L34T_IP6UDP:
+ *type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ *type = PKT_HASH_TYPE_L3;
+ break;
+ }
+
+ *hash = le32_to_cpu(p->des1);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
+{
+ *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
+ return 0;
+}
+
+static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des2 = cpu_to_le32(lower_32_bits(addr));
+ p->des3 = cpu_to_le32(upper_32_bits(addr));
+}
+
+static void dwxgmac2_set_sarc(struct dma_desc *p, u32 sarc_type)
+{
+ sarc_type <<= XGMAC_TDES3_SAIC_SHIFT;
+
+ p->des3 |= cpu_to_le32(sarc_type & XGMAC_TDES3_SAIC);
+}
+
+static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
+ u32 inner_type)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+
+ /* Inner VLAN */
+ if (inner_type) {
+ u32 des = inner_tag << XGMAC_TDES2_IVT_SHIFT;
+
+ des &= XGMAC_TDES2_IVT;
+ p->des2 = cpu_to_le32(des);
+
+ des = inner_type << XGMAC_TDES3_IVTIR_SHIFT;
+ des &= XGMAC_TDES3_IVTIR;
+ p->des3 = cpu_to_le32(des | XGMAC_TDES3_IVLTV);
+ }
+
+ /* Outer VLAN */
+ p->des3 |= cpu_to_le32(tag & XGMAC_TDES3_VT);
+ p->des3 |= cpu_to_le32(XGMAC_TDES3_VLTV);
+
+ p->des3 |= cpu_to_le32(XGMAC_TDES3_CTXT);
+}
+
+static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type)
+{
+ type <<= XGMAC_TDES2_VTIR_SHIFT;
+ p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR);
+}
+
const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.tx_status = dwxgmac2_get_tx_status,
.rx_status = dwxgmac2_get_rx_status,
@@ -277,4 +361,10 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.get_addr = dwxgmac2_get_addr,
.set_addr = dwxgmac2_set_addr,
.clear = dwxgmac2_clear,
+ .get_rx_hash = dwxgmac2_get_rx_hash,
+ .get_rx_header_len = dwxgmac2_get_rx_header_len,
+ .set_sec_addr = dwxgmac2_set_sec_addr,
+ .set_sarc = dwxgmac2_set_sarc,
+ .set_vlan_tag = dwxgmac2_set_vlan_tag,
+ .set_vlan = dwxgmac2_set_vlan,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index a4f236e3593e..64956465c030 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -128,6 +128,14 @@ static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL);
}
+static void dwxgmac2_dma_dump_regs(void __iomem *ioaddr, u32 *reg_space)
+{
+ int i;
+
+ for (i = (XGMAC_DMA_MODE / 4); i < XGMAC_REGSIZE; i++)
+ reg_space[i] = readl(ioaddr + i * 4);
+}
+
static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
@@ -351,18 +359,24 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature 0 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
+ dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27;
dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
+ dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
+ dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
+ dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
+ dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
+ dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14;
switch (dma_cap->addr64) {
@@ -396,6 +410,14 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
dma_cap->number_rx_queues =
((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
+
+ /* MAC HW feature 3 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
+ dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14;
+ dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13;
+ dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
+ dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
+ dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
}
static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan)
@@ -462,6 +484,22 @@ static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
}
+static void dwxgmac2_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ value &= ~XGMAC_CONFIG_HDSMS;
+ value |= XGMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+
+ value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+ if (en)
+ value |= XGMAC_SPH;
+ else
+ value &= ~XGMAC_SPH;
+ writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+}
+
const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.reset = dwxgmac2_dma_reset,
.init = dwxgmac2_dma_init,
@@ -469,7 +507,7 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.init_rx_chan = dwxgmac2_dma_init_rx_chan,
.init_tx_chan = dwxgmac2_dma_init_tx_chan,
.axi = dwxgmac2_dma_axi,
- .dump_regs = NULL,
+ .dump_regs = dwxgmac2_dma_dump_regs,
.dma_rx_mode = dwxgmac2_dma_rx_mode,
.dma_tx_mode = dwxgmac2_dma_tx_mode,
.enable_dma_irq = dwxgmac2_enable_dma_irq,
@@ -488,4 +526,5 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.enable_tso = dwxgmac2_enable_tso,
.qmode = dwxgmac2_qmode,
.set_bfsize = dwxgmac2_set_bfsize,
+ .enable_sph = dwxgmac2_enable_sph,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 6c61b753b55e..3af2e5015245 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -201,7 +201,7 @@ static const struct stmmac_hwif_entry {
.min_id = DWXGMAC_CORE_2_10,
.regs = {
.ptp_off = PTP_XGMAC_OFFSET,
- .mmc_off = 0,
+ .mmc_off = MMC_XGMAC_OFFSET,
},
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
@@ -209,7 +209,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
.tc = &dwmac510_tc_ops,
- .mmc = NULL,
+ .mmc = &dwxgmac_mmc_ops,
.setup = dwxgmac2_setup,
.quirks = NULL,
},
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 278c0dbec9d9..9435b312495d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -86,6 +86,15 @@ struct stmmac_desc_ops {
void (*set_addr)(struct dma_desc *p, dma_addr_t addr);
/* clear descriptor */
void (*clear)(struct dma_desc *p);
+ /* RSS */
+ int (*get_rx_hash)(struct dma_desc *p, u32 *hash,
+ enum pkt_hash_types *type);
+ int (*get_rx_header_len)(struct dma_desc *p, unsigned int *len);
+ void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr);
+ void (*set_sarc)(struct dma_desc *p, u32 sarc_type);
+ void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
+ u32 inner_type);
+ void (*set_vlan)(struct dma_desc *p, u32 type);
};
#define stmmac_init_rx_desc(__priv, __args...) \
@@ -136,6 +145,18 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, set_addr, __args)
#define stmmac_clear_desc(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, clear, __args)
+#define stmmac_get_rx_hash(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_hash, __args)
+#define stmmac_get_rx_header_len(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_header_len, __args)
+#define stmmac_set_desc_sec_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_sec_addr, __args)
+#define stmmac_set_desc_sarc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_sarc, __args)
+#define stmmac_set_desc_vlan_tag(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_vlan_tag, __args)
+#define stmmac_set_desc_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_vlan, __args)
struct stmmac_dma_cfg;
struct dma_features;
@@ -186,6 +207,7 @@ struct stmmac_dma_ops {
void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
+ void (*enable_sph)(void __iomem *ioaddr, bool en, u32 chan);
};
#define stmmac_reset(__priv, __args...) \
@@ -242,6 +264,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, qmode, __args)
#define stmmac_set_dma_bfsize(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
+#define stmmac_enable_sph(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, enable_sph, __args)
struct mac_device_info;
struct net_device;
@@ -249,6 +273,7 @@ struct rgmii_adv;
struct stmmac_safety_stats;
struct stmmac_tc_entry;
struct stmmac_pps_cfg;
+struct stmmac_rss;
/* Helpers to program the MAC core */
struct stmmac_ops {
@@ -327,6 +352,17 @@ struct stmmac_ops {
u32 sub_second_inc, u32 systime_flags);
/* Loopback for selftests */
void (*set_mac_loopback)(void __iomem *ioaddr, bool enable);
+ /* RSS */
+ int (*rss_configure)(struct mac_device_info *hw,
+ struct stmmac_rss *cfg, u32 num_rxq);
+ /* VLAN */
+ void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
+ bool is_double);
+ void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ /* TX Timestamp */
+ int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
+ /* Source Address Insertion / Replacement */
+ void (*sarc_configure)(void __iomem *ioaddr, int val);
};
#define stmmac_core_init(__priv, __args...) \
@@ -397,6 +433,16 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, flex_pps_config, __args)
#define stmmac_set_mac_loopback(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args)
+#define stmmac_rss_configure(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, rss_configure, __args)
+#define stmmac_update_vlan_hash(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
+#define stmmac_enable_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
+#define stmmac_get_mac_tx_timestamp(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args)
+#define stmmac_sarc_configure(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, sarc_configure, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -503,6 +549,7 @@ extern const struct stmmac_ops dwxgmac210_ops;
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
extern const struct stmmac_mmc_ops dwmac_mmc_ops;
+extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 3587ceb9faf5..a0c05925883e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -24,6 +24,7 @@
#define MMC_GMAC4_OFFSET 0x700
#define MMC_GMAC3_X_OFFSET 0x100
+#define MMC_XGMAC_OFFSET 0x800
struct stmmac_counters {
unsigned int mmc_tx_octetcount_gb;
@@ -116,6 +117,14 @@ struct stmmac_counters {
unsigned int mmc_rx_tcp_err_octets;
unsigned int mmc_rx_icmp_gd_octets;
unsigned int mmc_rx_icmp_err_octets;
+
+ /* FPE */
+ unsigned int mmc_tx_fpe_fragment_cntr;
+ unsigned int mmc_tx_hold_req_cntr;
+ unsigned int mmc_rx_packet_assembly_err_cntr;
+ unsigned int mmc_rx_packet_smd_err_cntr;
+ unsigned int mmc_rx_packet_assembly_ok_cntr;
+ unsigned int mmc_rx_fpe_fragment_cntr;
};
#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index a471db6d7b11..a223584f5f9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -119,6 +119,64 @@
#define MMC_RX_ICMP_GD_OCTETS 0x180
#define MMC_RX_ICMP_ERR_OCTETS 0x184
+/* XGMAC MMC Registers */
+#define MMC_XGMAC_TX_OCTET_GB 0x14
+#define MMC_XGMAC_TX_PKT_GB 0x1c
+#define MMC_XGMAC_TX_BROAD_PKT_G 0x24
+#define MMC_XGMAC_TX_MULTI_PKT_G 0x2c
+#define MMC_XGMAC_TX_64OCT_GB 0x34
+#define MMC_XGMAC_TX_65OCT_GB 0x3c
+#define MMC_XGMAC_TX_128OCT_GB 0x44
+#define MMC_XGMAC_TX_256OCT_GB 0x4c
+#define MMC_XGMAC_TX_512OCT_GB 0x54
+#define MMC_XGMAC_TX_1024OCT_GB 0x5c
+#define MMC_XGMAC_TX_UNI_PKT_GB 0x64
+#define MMC_XGMAC_TX_MULTI_PKT_GB 0x6c
+#define MMC_XGMAC_TX_BROAD_PKT_GB 0x74
+#define MMC_XGMAC_TX_UNDER 0x7c
+#define MMC_XGMAC_TX_OCTET_G 0x84
+#define MMC_XGMAC_TX_PKT_G 0x8c
+#define MMC_XGMAC_TX_PAUSE 0x94
+#define MMC_XGMAC_TX_VLAN_PKT_G 0x9c
+#define MMC_XGMAC_TX_LPI_USEC 0xa4
+#define MMC_XGMAC_TX_LPI_TRAN 0xa8
+
+#define MMC_XGMAC_RX_PKT_GB 0x100
+#define MMC_XGMAC_RX_OCTET_GB 0x108
+#define MMC_XGMAC_RX_OCTET_G 0x110
+#define MMC_XGMAC_RX_BROAD_PKT_G 0x118
+#define MMC_XGMAC_RX_MULTI_PKT_G 0x120
+#define MMC_XGMAC_RX_CRC_ERR 0x128
+#define MMC_XGMAC_RX_RUNT_ERR 0x130
+#define MMC_XGMAC_RX_JABBER_ERR 0x134
+#define MMC_XGMAC_RX_UNDER 0x138
+#define MMC_XGMAC_RX_OVER 0x13c
+#define MMC_XGMAC_RX_64OCT_GB 0x140
+#define MMC_XGMAC_RX_65OCT_GB 0x148
+#define MMC_XGMAC_RX_128OCT_GB 0x150
+#define MMC_XGMAC_RX_256OCT_GB 0x158
+#define MMC_XGMAC_RX_512OCT_GB 0x160
+#define MMC_XGMAC_RX_1024OCT_GB 0x168
+#define MMC_XGMAC_RX_UNI_PKT_G 0x170
+#define MMC_XGMAC_RX_LENGTH_ERR 0x178
+#define MMC_XGMAC_RX_RANGE 0x180
+#define MMC_XGMAC_RX_PAUSE 0x188
+#define MMC_XGMAC_RX_FIFOOVER_PKT 0x190
+#define MMC_XGMAC_RX_VLAN_PKT_GB 0x198
+#define MMC_XGMAC_RX_WATCHDOG_ERR 0x1a0
+#define MMC_XGMAC_RX_LPI_USEC 0x1a4
+#define MMC_XGMAC_RX_LPI_TRAN 0x1a8
+#define MMC_XGMAC_RX_DISCARD_PKT_GB 0x1ac
+#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
+#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
+
+#define MMC_XGMAC_TX_FPE_FRAG 0x208
+#define MMC_XGMAC_TX_HOLD_REQ 0x20c
+#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
+#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
+#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
+#define MMC_XGMAC_RX_FPE_FRAG 0x234
+
static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
{
u32 value = readl(mmcaddr + MMC_CNTRL);
@@ -263,3 +321,137 @@ const struct stmmac_mmc_ops dwmac_mmc_ops = {
.intr_all_mask = dwmac_mmc_intr_all_mask,
.read = dwmac_mmc_read,
};
+
+static void dwxgmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
+{
+ u32 value = readl(mmcaddr + MMC_CNTRL);
+
+ value |= (mode & 0x3F);
+
+ writel(value, mmcaddr + MMC_CNTRL);
+}
+
+static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
+{
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
+}
+
+static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest)
+{
+ u64 tmp = 0;
+
+ tmp += readl(addr + reg);
+ tmp += ((u64 )readl(addr + reg + 0x4)) << 32;
+ if (tmp > GENMASK(31, 0))
+ *dest = ~0x0;
+ else
+ *dest = *dest + tmp;
+}
+
+/* This reads the MAC core counters (if actaully supported).
+ * by default the MMC core is programmed to reset each
+ * counter after a read. So all the field of the mmc struct
+ * have to be incremented.
+ */
+static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
+{
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_GB,
+ &mmc->mmc_tx_octetcount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_GB,
+ &mmc->mmc_tx_framecount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_G,
+ &mmc->mmc_tx_broadcastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_G,
+ &mmc->mmc_tx_multicastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_64OCT_GB,
+ &mmc->mmc_tx_64_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_65OCT_GB,
+ &mmc->mmc_tx_65_to_127_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_128OCT_GB,
+ &mmc->mmc_tx_128_to_255_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_256OCT_GB,
+ &mmc->mmc_tx_256_to_511_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_512OCT_GB,
+ &mmc->mmc_tx_512_to_1023_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_1024OCT_GB,
+ &mmc->mmc_tx_1024_to_max_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNI_PKT_GB,
+ &mmc->mmc_tx_unicast_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_GB,
+ &mmc->mmc_tx_multicast_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_GB,
+ &mmc->mmc_tx_broadcast_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNDER,
+ &mmc->mmc_tx_underflow_error);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_G,
+ &mmc->mmc_tx_octetcount_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_G,
+ &mmc->mmc_tx_framecount_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PAUSE,
+ &mmc->mmc_tx_pause_frame);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_VLAN_PKT_G,
+ &mmc->mmc_tx_vlan_frame_g);
+
+ /* MMC RX counter registers */
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PKT_GB,
+ &mmc->mmc_rx_framecount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_GB,
+ &mmc->mmc_rx_octetcount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_G,
+ &mmc->mmc_rx_octetcount_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_BROAD_PKT_G,
+ &mmc->mmc_rx_broadcastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_MULTI_PKT_G,
+ &mmc->mmc_rx_multicastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR,
+ &mmc->mmc_rx_crc_error);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR,
+ &mmc->mmc_rx_crc_error);
+ mmc->mmc_rx_run_error += readl(mmcaddr + MMC_XGMAC_RX_RUNT_ERR);
+ mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_XGMAC_RX_JABBER_ERR);
+ mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_XGMAC_RX_UNDER);
+ mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_XGMAC_RX_OVER);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_64OCT_GB,
+ &mmc->mmc_rx_64_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_65OCT_GB,
+ &mmc->mmc_rx_65_to_127_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_128OCT_GB,
+ &mmc->mmc_rx_128_to_255_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_256OCT_GB,
+ &mmc->mmc_rx_256_to_511_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_512OCT_GB,
+ &mmc->mmc_rx_512_to_1023_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_1024OCT_GB,
+ &mmc->mmc_rx_1024_to_max_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UNI_PKT_G,
+ &mmc->mmc_rx_unicast_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_LENGTH_ERR,
+ &mmc->mmc_rx_length_error);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_RANGE,
+ &mmc->mmc_rx_autofrangetype);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PAUSE,
+ &mmc->mmc_rx_pause_frames);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_FIFOOVER_PKT,
+ &mmc->mmc_rx_fifo_overflow);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_VLAN_PKT_GB,
+ &mmc->mmc_rx_vlan_frames_gb);
+ mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_XGMAC_RX_WATCHDOG_ERR);
+
+ mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_XGMAC_TX_FPE_FRAG);
+ mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_XGMAC_TX_HOLD_REQ);
+ mmc->mmc_rx_packet_assembly_err_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_ERR);
+ mmc->mmc_rx_packet_smd_err_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_PKT_SMD_ERR);
+ mmc->mmc_rx_packet_assembly_ok_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_OK);
+ mmc->mmc_rx_fpe_fragment_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_FPE_FRAG);
+}
+
+const struct stmmac_mmc_ops dwxgmac_mmc_ops = {
+ .ctrl = dwxgmac_mmc_ctrl,
+ .intr_all_mask = dwxgmac_mmc_intr_all_mask,
+ .read = dwxgmac_mmc_read,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 5cd966c154f3..dcb2e29a5717 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -13,6 +13,7 @@
#define DRV_MODULE_VERSION "Jan_2016"
#include <linux/clk.h>
+#include <linux/if_vlan.h>
#include <linux/stmmac.h>
#include <linux/phylink.h>
#include <linux/pci.h>
@@ -57,7 +58,9 @@ struct stmmac_tx_queue {
struct stmmac_rx_buffer {
struct page *page;
+ struct page *sec_page;
dma_addr_t addr;
+ dma_addr_t sec_addr;
};
struct stmmac_rx_queue {
@@ -73,6 +76,12 @@ struct stmmac_rx_queue {
u32 rx_zeroc_thresh;
dma_addr_t dma_rx_phy;
u32 rx_tail_addr;
+ unsigned int state_saved;
+ struct {
+ struct sk_buff *skb;
+ unsigned int len;
+ unsigned int error;
+ } state;
};
struct stmmac_channel {
@@ -113,6 +122,12 @@ struct stmmac_pps_cfg {
struct timespec64 period;
};
+struct stmmac_rss {
+ int enable;
+ u8 key[STMMAC_RSS_HASH_KEY_SIZE];
+ u32 table[STMMAC_RSS_MAX_TABLE_SIZE];
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames;
@@ -123,6 +138,8 @@ struct stmmac_priv {
int hwts_tx_en;
bool tx_path_in_lpi_mode;
bool tso;
+ int sph;
+ u32 sarc_type;
unsigned int dma_buf_sz;
unsigned int rx_copybreak;
@@ -185,11 +202,10 @@ struct stmmac_priv {
spinlock_t ptp_lock;
void __iomem *mmcaddr;
void __iomem *ptpaddr;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir;
- struct dentry *dbgfs_rings_status;
- struct dentry *dbgfs_dma_cap;
#endif
unsigned long state;
@@ -203,6 +219,9 @@ struct stmmac_priv {
/* Pulse Per Second output */
struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
+
+ /* Receive Side Scaling */
+ struct stmmac_rss rss;
};
enum stmmac_state {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 6efb66820d4c..1c450105e5a6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -18,10 +18,12 @@
#include "stmmac.h"
#include "dwmac_dma.h"
+#include "dwxgmac2.h"
#define REG_SPACE_SIZE 0x1060
#define MAC100_ETHTOOL_NAME "st_mac100"
#define GMAC_ETHTOOL_NAME "st_gmac"
+#define XGMAC_ETHTOOL_NAME "st_xgmac"
#define ETHTOOL_DMA_OFFSET 55
@@ -65,6 +67,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(rx_missed_cntr),
STMMAC_STAT(rx_overflow_cntr),
STMMAC_STAT(rx_vlan),
+ STMMAC_STAT(rx_split_hdr_pkt_n),
/* Tx/Rx IRQ error info */
STMMAC_STAT(tx_undeflow_irq),
STMMAC_STAT(tx_process_stopped_irq),
@@ -243,6 +246,12 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
+ STMMAC_MMC_STAT(mmc_tx_fpe_fragment_cntr),
+ STMMAC_MMC_STAT(mmc_tx_hold_req_cntr),
+ STMMAC_MMC_STAT(mmc_rx_packet_assembly_err_cntr),
+ STMMAC_MMC_STAT(mmc_rx_packet_smd_err_cntr),
+ STMMAC_MMC_STAT(mmc_rx_packet_assembly_ok_cntr),
+ STMMAC_MMC_STAT(mmc_rx_fpe_fragment_cntr),
};
#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
@@ -253,6 +262,8 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
if (priv->plat->has_gmac || priv->plat->has_gmac4)
strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
+ else if (priv->plat->has_xgmac)
+ strlcpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver));
else
strlcpy(info->driver, MAC100_ETHTOOL_NAME,
sizeof(info->driver));
@@ -398,23 +409,28 @@ static int stmmac_check_if_running(struct net_device *dev)
static int stmmac_ethtool_get_regs_len(struct net_device *dev)
{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->plat->has_xgmac)
+ return XGMAC_REGSIZE * 4;
return REG_SPACE_SIZE;
}
static void stmmac_ethtool_gregs(struct net_device *dev,
struct ethtool_regs *regs, void *space)
{
- u32 *reg_space = (u32 *) space;
-
struct stmmac_priv *priv = netdev_priv(dev);
-
- memset(reg_space, 0x0, REG_SPACE_SIZE);
+ u32 *reg_space = (u32 *) space;
stmmac_dump_mac_regs(priv, priv->hw, reg_space);
stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
- /* Copy DMA registers to where ethtool expects them */
- memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4],
- NUM_DWMAC1000_DMA_REGS * 4);
+
+ if (!priv->plat->has_xgmac) {
+ /* Copy DMA registers to where ethtool expects them */
+ memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
+ &reg_space[DMA_BUS_MODE / 4],
+ NUM_DWMAC1000_DMA_REGS * 4);
+ }
}
static int stmmac_nway_reset(struct net_device *dev)
@@ -758,6 +774,76 @@ static int stmmac_set_coalesce(struct net_device *dev,
return 0;
}
+static int stmmac_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = priv->plat->rx_queues_to_use;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static u32 stmmac_get_rxfh_key_size(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ return sizeof(priv->rss.key);
+}
+
+static u32 stmmac_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ return ARRAY_SIZE(priv->rss.table);
+}
+
+static int stmmac_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (indir) {
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ indir[i] = priv->rss.table[i];
+ }
+
+ if (key)
+ memcpy(key, priv->rss.key, sizeof(priv->rss.key));
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i;
+
+ if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = indir[i];
+ }
+
+ if (key)
+ memcpy(priv->rss.key, key, sizeof(priv->rss.key));
+
+ return stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+}
+
static int stmmac_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
@@ -849,6 +935,11 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_eee = stmmac_ethtool_op_get_eee,
.set_eee = stmmac_ethtool_op_set_eee,
.get_sset_count = stmmac_get_sset_count,
+ .get_rxnfc = stmmac_get_rxnfc,
+ .get_rxfh_key_size = stmmac_get_rxfh_key_size,
+ .get_rxfh_indir_size = stmmac_get_rxfh_indir_size,
+ .get_rxfh = stmmac_get_rxfh,
+ .set_rxfh = stmmac_set_rxfh,
.get_ts_info = stmmac_get_ts_info,
.get_coalesce = stmmac_get_coalesce,
.set_coalesce = stmmac_set_coalesce,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index fd54c7c87485..06ccd216ae90 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -105,7 +105,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_DEBUG_FS
-static int stmmac_init_fs(struct net_device *dev);
+static void stmmac_init_fs(struct net_device *dev);
static void stmmac_exit_fs(struct net_device *dev);
#endif
@@ -432,6 +432,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
struct dma_desc *p, struct sk_buff *skb)
{
struct skb_shared_hwtstamps shhwtstamp;
+ bool found = false;
u64 ns = 0;
if (!priv->hwts_tx_en)
@@ -443,9 +444,13 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
/* check tx tstamp status */
if (stmmac_get_tx_timestamp_status(priv, p)) {
- /* get the valid tstamp */
stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
+ found = true;
+ } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
+ found = true;
+ }
+ if (found) {
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp.hwtstamp = ns_to_ktime(ns);
@@ -453,8 +458,6 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
/* pass tstamp to stack */
skb_tstamp_tx(skb, &shhwtstamp);
}
-
- return;
}
/* stmmac_get_rx_hwtstamp - get HW RX timestamps
@@ -1198,6 +1201,17 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
if (!buf->page)
return -ENOMEM;
+ if (priv->sph) {
+ buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ if (!buf->sec_page)
+ return -ENOMEM;
+
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
+ } else {
+ buf->sec_page = NULL;
+ }
+
buf->addr = page_pool_get_dma_addr(buf->page);
stmmac_set_desc_addr(priv, p, buf->addr);
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
@@ -1220,6 +1234,10 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
if (buf->page)
page_pool_put_page(rx_q->page_pool, buf->page, false);
buf->page = NULL;
+
+ if (buf->sec_page)
+ page_pool_put_page(rx_q->page_pool, buf->sec_page, false);
+ buf->sec_page = NULL;
}
/**
@@ -2417,6 +2435,22 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
}
}
+static void stmmac_mac_config_rss(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
+ priv->rss.enable = false;
+ return;
+ }
+
+ if (priv->dev->features & NETIF_F_RXHASH)
+ priv->rss.enable = true;
+ else
+ priv->rss.enable = false;
+
+ stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+}
+
/**
* stmmac_mtl_configuration - Configure MTL
* @priv: driver private structure
@@ -2461,6 +2495,10 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv)
/* Set RX routing */
if (rx_queues_count > 1)
stmmac_mac_config_rx_queues_routing(priv);
+
+ /* Receive Side Scaling */
+ if (rx_queues_count > 1)
+ stmmac_mac_config_rss(priv);
}
static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
@@ -2573,6 +2611,16 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
}
+ /* Enable Split Header */
+ if (priv->sph && priv->hw->rx_csum) {
+ for (chan = 0; chan < rx_cnt; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
+ }
+
+ /* VLAN Tag Insertion */
+ if (priv->dma_cap.vlins)
+ stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
+
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
@@ -2750,6 +2798,33 @@ static int stmmac_release(struct net_device *dev)
return 0;
}
+static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
+ struct stmmac_tx_queue *tx_q)
+{
+ u16 tag = 0x0, inner_tag = 0x0;
+ u32 inner_type = 0x0;
+ struct dma_desc *p;
+
+ if (!priv->dma_cap.vlins)
+ return false;
+ if (!skb_vlan_tag_present(skb))
+ return false;
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ inner_tag = skb_vlan_tag_get(skb);
+ inner_type = STMMAC_VLAN_INSERT;
+ }
+
+ tag = skb_vlan_tag_get(skb);
+
+ p = tx_q->dma_tx + tx_q->cur_tx;
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
+ return false;
+
+ stmmac_set_tx_owner(priv, p);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ return true;
+}
+
/**
* stmmac_tso_allocator - close entry point of the driver
* @priv: driver private structure
@@ -2829,12 +2904,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
- unsigned int first_entry;
struct stmmac_tx_queue *tx_q;
+ unsigned int first_entry;
int tmp_pay_len = 0;
u32 pay_len, mss;
u8 proto_hdr_len;
dma_addr_t des;
+ bool has_vlan;
int i;
tx_q = &priv->tx_queue[queue];
@@ -2876,12 +2952,18 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data_len);
}
+ /* Check if VLAN can be inserted by HW */
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
+
first_entry = tx_q->cur_tx;
WARN_ON(tx_q->tx_skbuff[first_entry]);
desc = tx_q->dma_tx + first_entry;
first = desc;
+ if (has_vlan)
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
+
/* first descriptor: fill Headers on Buf1 */
des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
@@ -2960,6 +3042,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_set_ic_bit++;
}
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+
skb_tx_timestamp(skb);
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
@@ -3038,6 +3123,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int first_entry;
unsigned int enh_desc;
dma_addr_t des;
+ bool has_vlan;
int entry;
tx_q = &priv->tx_queue[queue];
@@ -3063,6 +3149,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
+ /* Check if VLAN can be inserted by HW */
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
+
entry = tx_q->cur_tx;
first_entry = entry;
WARN_ON(tx_q->tx_skbuff[first_entry]);
@@ -3076,6 +3165,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first = desc;
+ if (has_vlan)
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
+
enh_desc = priv->plat->enh_desc;
/* To program the descriptors according to the size of the frame */
if (enh_desc)
@@ -3173,6 +3265,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_set_ic_bit++;
}
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+
skb_tx_timestamp(skb);
/* Ready to fill the first descriptor and set the OWN bit w/o any
@@ -3292,6 +3387,17 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
break;
}
+ if (priv->sph && !buf->sec_page) {
+ buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ if (!buf->sec_page)
+ break;
+
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+
+ dma_sync_single_for_device(priv->device, buf->sec_addr,
+ len, DMA_FROM_DEVICE);
+ }
+
buf->addr = page_pool_get_dma_addr(buf->page);
/* Sync whole allocation to device. This will invalidate old
@@ -3301,6 +3407,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
DMA_FROM_DEVICE);
stmmac_set_desc_addr(priv, p, buf->addr);
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
stmmac_refill_desc3(priv, rx_q, p);
rx_q->rx_count_frames++;
@@ -3330,9 +3437,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned int count = 0, error = 0, len = 0;
+ int status = 0, coe = priv->hw->rx_csum;
unsigned int next_entry = rx_q->cur_rx;
- int coe = priv->hw->rx_csum;
- unsigned int count = 0;
+ struct sk_buff *skb = NULL;
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -3346,10 +3454,30 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
+ unsigned int hlen = 0, prev_len = 0;
+ enum pkt_hash_types hash_type;
struct stmmac_rx_buffer *buf;
struct dma_desc *np, *p;
- int entry, status;
+ unsigned int sec_len;
+ int entry;
+ u32 hash;
+
+ if (!count && rx_q->state_saved) {
+ skb = rx_q->state.skb;
+ error = rx_q->state.error;
+ len = rx_q->state.len;
+ } else {
+ rx_q->state_saved = false;
+ skb = NULL;
+ error = 0;
+ len = 0;
+ }
+ if (count >= limit)
+ break;
+
+read_again:
+ sec_len = 0;
entry = next_entry;
buf = &rx_q->buf_pool[entry];
@@ -3376,6 +3504,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
np = rx_q->dma_rx + next_entry;
prefetch(np);
+ prefetch(page_address(buf->page));
if (priv->extend_desc)
stmmac_rx_extended_status(priv, &priv->dev->stats,
@@ -3384,26 +3513,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
page_pool_recycle_direct(rx_q->page_pool, buf->page);
priv->dev->stats.rx_errors++;
buf->page = NULL;
- } else {
- struct sk_buff *skb;
- int frame_len;
- unsigned int des;
+ error = 1;
+ }
- stmmac_get_desc_addr(priv, p, &des);
- frame_len = stmmac_get_rx_frame_len(priv, p, coe);
+ if (unlikely(error && (status & rx_not_ls)))
+ goto read_again;
+ if (unlikely(error)) {
+ dev_kfree_skb(skb);
+ continue;
+ }
- /* If frame length is greater than skb buffer size
- * (preallocated during init) then the packet is
- * ignored
- */
- if (frame_len > priv->dma_buf_sz) {
- if (net_ratelimit())
- netdev_err(priv->dev,
- "len %d larger than size (%d)\n",
- frame_len, priv->dma_buf_sz);
- priv->dev->stats.rx_length_errors++;
- continue;
- }
+ /* Buffer is good. Go on. */
+
+ if (likely(status & rx_not_ls)) {
+ len += priv->dma_buf_sz;
+ } else {
+ prev_len = len;
+ len = stmmac_get_rx_frame_len(priv, p, coe);
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
* Type frames (LLC/LLC-SNAP)
@@ -3414,53 +3540,97 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
*/
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
unlikely(status != llc_snap))
- frame_len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ }
+
+ if (!skb) {
+ int ret = stmmac_get_rx_header_len(priv, p, &hlen);
- if (netif_msg_rx_status(priv)) {
- netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
- p, entry, des);
- netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
- frame_len, status);
+ if (priv->sph && !ret && (hlen > 0)) {
+ sec_len = len;
+ if (!(status & rx_not_ls))
+ sec_len = sec_len - hlen;
+ len = hlen;
+
+ prefetch(page_address(buf->sec_page));
+ priv->xstats.rx_split_hdr_pkt_n++;
}
- skb = netdev_alloc_skb_ip_align(priv->dev, frame_len);
- if (unlikely(!skb)) {
+ skb = napi_alloc_skb(&ch->rx_napi, len);
+ if (!skb) {
priv->dev->stats.rx_dropped++;
continue;
}
- dma_sync_single_for_cpu(priv->device, buf->addr,
- frame_len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(priv->device, buf->addr, len,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, page_address(buf->page),
- frame_len);
- skb_put(skb, frame_len);
-
- if (netif_msg_pktdata(priv)) {
- netdev_dbg(priv->dev, "frame received (%dbytes)",
- frame_len);
- print_pkt(skb->data, frame_len);
- }
+ len);
+ skb_put(skb, len);
- stmmac_get_rx_hwtstamp(priv, p, np, skb);
+ /* Data payload copied into SKB, page ready for recycle */
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ } else {
+ unsigned int buf_len = len - prev_len;
- stmmac_rx_vlan(priv->dev, skb);
+ if (likely(status & rx_not_ls))
+ buf_len = priv->dma_buf_sz;
- skb->protocol = eth_type_trans(skb, priv->dev);
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ buf_len, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf->page, 0, buf_len,
+ priv->dma_buf_sz);
- if (unlikely(!coe))
- skb_checksum_none_assert(skb);
- else
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* Data payload appended into SKB */
+ page_pool_release_page(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ }
- napi_gro_receive(&ch->rx_napi, skb);
+ if (sec_len > 0) {
+ dma_sync_single_for_cpu(priv->device, buf->sec_addr,
+ sec_len, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf->sec_page, 0, sec_len,
+ priv->dma_buf_sz);
- /* Data payload copied into SKB, page ready for recycle */
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
- buf->page = NULL;
+ len += sec_len;
- priv->dev->stats.rx_packets++;
- priv->dev->stats.rx_bytes += frame_len;
+ /* Data payload appended into SKB */
+ page_pool_release_page(rx_q->page_pool, buf->sec_page);
+ buf->sec_page = NULL;
}
+
+ if (likely(status & rx_not_ls))
+ goto read_again;
+
+ /* Got entire packet into SKB. Finish it. */
+
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
+ stmmac_rx_vlan(priv->dev, skb);
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (unlikely(!coe))
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
+ skb_set_hash(skb, hash, hash_type);
+
+ skb_record_rx_queue(skb, queue);
+ napi_gro_receive(&ch->rx_napi, skb);
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += len;
+ }
+
+ if (status & rx_not_ls) {
+ rx_q->state_saved = true;
+ rx_q->state.skb = skb;
+ rx_q->state.error = error;
+ rx_q->state.len = len;
}
stmmac_rx_refill(priv, queue);
@@ -3606,6 +3776,8 @@ static int stmmac_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct stmmac_priv *priv = netdev_priv(netdev);
+ bool sph_en;
+ u32 chan;
/* Keep the COE Type in case of csum is supporting */
if (features & NETIF_F_RXCSUM)
@@ -3617,6 +3789,10 @@ static int stmmac_set_features(struct net_device *netdev,
*/
stmmac_rx_ipc(priv, priv->hw);
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+
return 0;
}
@@ -3962,54 +4138,102 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
-static int stmmac_init_fs(struct net_device *dev)
+static void stmmac_init_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
/* Create per netdev entries */
priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
- if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
- netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
+ /* Entry to report DMA RX/TX rings */
+ debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
+ &stmmac_rings_status_fops);
- return -ENOMEM;
+ /* Entry to report the DMA HW features */
+ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
+ &stmmac_dma_cap_fops);
+}
+
+static void stmmac_exit_fs(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ debugfs_remove_recursive(priv->dbgfs_dir);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static u32 stmmac_vid_crc32_le(__le16 vid_le)
+{
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ u32 crc = ~0x0;
+ u32 temp = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= 0xedb88320;
}
- /* Entry to report DMA RX/TX rings */
- priv->dbgfs_rings_status =
- debugfs_create_file("descriptors_status", 0444,
- priv->dbgfs_dir, dev,
- &stmmac_rings_status_fops);
+ return crc;
+}
- if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
- netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
- debugfs_remove_recursive(priv->dbgfs_dir);
+static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
+{
+ u32 crc, hash = 0;
+ u16 vid;
- return -ENOMEM;
+ for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
+ __le16 vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
+ hash |= (1 << crc);
}
- /* Entry to report the DMA HW features */
- priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
- priv->dbgfs_dir,
- dev, &stmmac_dma_cap_fops);
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, is_double);
+}
+
+static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ bool is_double = false;
+ int ret;
- if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
- netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
- debugfs_remove_recursive(priv->dbgfs_dir);
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
+ is_double = true;
- return -ENOMEM;
+ set_bit(vid, priv->active_vlans);
+ ret = stmmac_vlan_update(priv, is_double);
+ if (ret) {
+ clear_bit(vid, priv->active_vlans);
+ return ret;
}
- return 0;
+ return ret;
}
-static void stmmac_exit_fs(struct net_device *dev)
+static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
- struct stmmac_priv *priv = netdev_priv(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ bool is_double = false;
- debugfs_remove_recursive(priv->dbgfs_dir);
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
+ is_double = true;
+
+ clear_bit(vid, priv->active_vlans);
+ return stmmac_vlan_update(priv, is_double);
}
-#endif /* CONFIG_DEBUG_FS */
static const struct net_device_ops stmmac_netdev_ops = {
.ndo_open = stmmac_open,
@@ -4027,6 +4251,8 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_poll_controller = stmmac_poll_controller,
#endif
.ndo_set_mac_address = stmmac_set_mac_address,
+ .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
};
static void stmmac_reset_subtask(struct stmmac_priv *priv)
@@ -4175,8 +4401,8 @@ int stmmac_dvr_probe(struct device *device,
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
- u32 queue, maxq;
- int ret = 0;
+ u32 queue, rxq, maxq;
+ int i, ret = 0;
ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
@@ -4259,6 +4485,12 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "TSO feature enabled\n");
}
+ if (priv->dma_cap.sphen) {
+ ndev->hw_features |= NETIF_F_GRO;
+ priv->sph = true;
+ dev_info(priv->device, "SPH feature enabled\n");
+ }
+
if (priv->dma_cap.addr64) {
ret = dma_set_mask_and_coherent(device,
DMA_BIT_MASK(priv->dma_cap.addr64));
@@ -4281,9 +4513,27 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
+ if (priv->dma_cap.vlhash) {
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ }
+ if (priv->dma_cap.vlins) {
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+ if (priv->dma_cap.dvlan)
+ ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
+ }
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
+ /* Initialize RSS */
+ rxq = priv->plat->rx_queues_to_use;
+ netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
+
+ if (priv->dma_cap.rssen && priv->plat->rss_en)
+ ndev->features |= NETIF_F_RXHASH;
+
/* MTU range: 46 - hw-specific max */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
@@ -4368,10 +4618,7 @@ int stmmac_dvr_probe(struct device *device,
}
#ifdef CONFIG_DEBUG_FS
- ret = stmmac_init_fs(ndev);
- if (ret < 0)
- netdev_warn(priv->dev, "%s: failed debugFS registration\n",
- __func__);
+ stmmac_init_fs(ndev);
#endif
return ret;
@@ -4617,16 +4864,8 @@ static int __init stmmac_init(void)
{
#ifdef CONFIG_DEBUG_FS
/* Create debugfs main directory if it doesn't exist yet */
- if (!stmmac_fs_dir) {
+ if (!stmmac_fs_dir)
stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
-
- if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
- pr_err("ERROR %s, debugfs create directory failed\n",
- STMMAC_RESOURCE_NAME);
-
- return -ENOMEM;
- }
- }
#endif
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 4304c1abc5d1..40c42637ad75 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -348,7 +348,9 @@ int stmmac_mdio_register(struct net_device *ndev)
max_addr = PHY_MAX_ADDR;
}
- new_bus->reset = &stmmac_mdio_reset;
+ if (mdio_bus_data->needs_reset)
+ new_bus->reset = &stmmac_mdio_reset;
+
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
new_bus->name, priv->plat->bus_id);
new_bus->priv = ndev;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 86f9c07a38cf..20906287b6d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -9,6 +9,7 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/clk-provider.h>
#include <linux/pci.h>
#include <linux/dmi.h>
@@ -63,6 +64,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->has_gmac = 1;
plat->force_sf_dma_mode = 1;
+ plat->mdio_bus_data->needs_reset = true;
plat->mdio_bus_data->phy_mask = 0;
/* Set default value for multicast hash bins */
@@ -107,6 +109,166 @@ static const struct stmmac_pci_info stmmac_pci_info = {
.setup = stmmac_default_data,
};
+static int intel_mgbe_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int i;
+
+ plat->clk_csr = 5;
+ plat->has_gmac = 0;
+ plat->has_gmac4 = 1;
+ plat->force_sf_dma_mode = 0;
+ plat->tso_en = 1;
+
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+
+ for (i = 0; i < plat->rx_queues_to_use; i++) {
+ plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
+ plat->rx_queues_cfg[i].chan = i;
+
+ /* Disable Priority config by default */
+ plat->rx_queues_cfg[i].use_prio = false;
+
+ /* Disable RX queues routing by default */
+ plat->rx_queues_cfg[i].pkt_route = 0x0;
+ }
+
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
+
+ /* Disable Priority config by default */
+ plat->tx_queues_cfg[i].use_prio = false;
+ }
+
+ /* FIFO size is 4096 bytes for 1 tx/rx queue */
+ plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
+ plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
+
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
+ plat->tx_queues_cfg[0].weight = 0x09;
+ plat->tx_queues_cfg[1].weight = 0x0A;
+ plat->tx_queues_cfg[2].weight = 0x0B;
+ plat->tx_queues_cfg[3].weight = 0x0C;
+ plat->tx_queues_cfg[4].weight = 0x0D;
+ plat->tx_queues_cfg[5].weight = 0x0E;
+ plat->tx_queues_cfg[6].weight = 0x0F;
+ plat->tx_queues_cfg[7].weight = 0x10;
+
+ plat->mdio_bus_data->phy_mask = 0;
+
+ plat->dma_cfg->pbl = 32;
+ plat->dma_cfg->pblx8 = true;
+ plat->dma_cfg->fixed_burst = 0;
+ plat->dma_cfg->mixed_burst = 0;
+ plat->dma_cfg->aal = 0;
+
+ plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
+ GFP_KERNEL);
+ if (!plat->axi)
+ return -ENOMEM;
+
+ plat->axi->axi_lpi_en = 0;
+ plat->axi->axi_xit_frm = 0;
+ plat->axi->axi_wr_osr_lmt = 1;
+ plat->axi->axi_rd_osr_lmt = 1;
+ plat->axi->axi_blen[0] = 4;
+ plat->axi->axi_blen[1] = 8;
+ plat->axi->axi_blen[2] = 16;
+
+ plat->ptp_max_adj = plat->clk_ptp_rate;
+
+ /* Set system clock */
+ plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
+ "stmmac-clk", NULL, 0,
+ plat->clk_ptp_rate);
+
+ if (IS_ERR(plat->stmmac_clk)) {
+ dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
+ plat->stmmac_clk = NULL;
+ }
+ clk_prepare_enable(plat->stmmac_clk);
+
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
+
+ return 0;
+}
+
+static int ehl_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int ret;
+
+ plat->rx_queues_to_use = 8;
+ plat->tx_queues_to_use = 8;
+ plat->clk_ptp_rate = 200000000;
+ ret = intel_mgbe_common_data(pdev, plat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ehl_sgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->interface = PHY_INTERFACE_MODE_SGMII;
+ return ehl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_sgmii1g_pci_info = {
+ .setup = ehl_sgmii_data,
+};
+
+static int ehl_rgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->interface = PHY_INTERFACE_MODE_RGMII;
+ return ehl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_rgmii1g_pci_info = {
+ .setup = ehl_rgmii_data,
+};
+
+static int tgl_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int ret;
+
+ plat->rx_queues_to_use = 6;
+ plat->tx_queues_to_use = 4;
+ plat->clk_ptp_rate = 200000000;
+ ret = intel_mgbe_common_data(pdev, plat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tgl_sgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->interface = PHY_INTERFACE_MODE_SGMII;
+ return tgl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info tgl_sgmii1g_pci_info = {
+ .setup = tgl_sgmii_data,
+};
+
static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
{
.func = 6,
@@ -292,10 +454,15 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
+ struct net_device *ndev = dev_get_drvdata(&pdev->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
int i;
stmmac_dvr_remove(&pdev->dev);
+ if (priv->plat->stmmac_clk)
+ clk_unregister_fixed_rate(priv->plat->stmmac_clk);
+
for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
@@ -348,6 +515,9 @@ static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
#define STMMAC_QUARK_ID 0x0937
#define STMMAC_DEVICE_ID 0x1108
+#define STMMAC_EHL_RGMII1G_ID 0x4b30
+#define STMMAC_EHL_SGMII1G_ID 0x4b31
+#define STMMAC_TGL_SGMII1G_ID 0xa0ac
#define STMMAC_DEVICE(vendor_id, dev_id, info) { \
PCI_VDEVICE(vendor_id, dev_id), \
@@ -358,6 +528,9 @@ static const struct pci_device_id stmmac_id_table[] = {
STMMAC_DEVICE(STMMAC, STMMAC_DEVICE_ID, stmmac_pci_info),
STMMAC_DEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_MAC, stmmac_pci_info),
STMMAC_DEVICE(INTEL, STMMAC_QUARK_ID, quark_pci_info),
+ STMMAC_DEVICE(INTEL, STMMAC_EHL_RGMII1G_ID, ehl_rgmii1g_pci_info),
+ STMMAC_DEVICE(INTEL, STMMAC_EHL_SGMII1G_ID, ehl_sgmii1g_pci_info),
+ STMMAC_DEVICE(INTEL, STMMAC_TGL_SGMII1G_ID, tgl_sgmii1g_pci_info),
{}
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 154daf4d1072..eaf8f08f2e91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -342,10 +342,16 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
mdio = true;
}
- if (mdio)
+ if (mdio) {
plat->mdio_bus_data =
devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
+ plat->mdio_bus_data->needs_reset = true;
+ }
+
return 0;
}
@@ -522,13 +528,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
}
/* clock setup */
- plat->stmmac_clk = devm_clk_get(&pdev->dev,
- STMMAC_RESOURCE_NAME);
- if (IS_ERR(plat->stmmac_clk)) {
- dev_warn(&pdev->dev, "Cannot get CSR clock\n");
- plat->stmmac_clk = NULL;
+ if (!of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) {
+ plat->stmmac_clk = devm_clk_get(&pdev->dev,
+ STMMAC_RESOURCE_NAME);
+ if (IS_ERR(plat->stmmac_clk)) {
+ dev_warn(&pdev->dev, "Cannot get CSR clock\n");
+ plat->stmmac_clk = NULL;
+ }
+ clk_prepare_enable(plat->stmmac_clk);
}
- clk_prepare_enable(plat->stmmac_clk);
plat->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(plat->pclk)) {
@@ -609,13 +617,8 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
* probe if needed before we went too far with resource allocation.
*/
stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
- if (stmmac_res->irq < 0) {
- if (stmmac_res->irq != -EPROBE_DEFER) {
- dev_err(&pdev->dev,
- "MAC IRQ configuration information not found\n");
- }
+ if (stmmac_res->irq < 0)
return stmmac_res->irq;
- }
/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
* The external wake up irq can be passed through the platform code
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index c48224973a37..173493db038c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -194,6 +194,9 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
priv->pps[i].available = true;
}
+ if (priv->plat->ptp_max_adj)
+ stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
+
stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
spin_lock_init(&priv->ptp_lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index a97b1ea76438..ecc8602c6799 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -11,8 +11,10 @@
#include <linux/ip.h>
#include <linux/phy.h>
#include <linux/udp.h>
+#include <net/pkt_cls.h>
#include <net/tcp.h>
#include <net/udp.h>
+#include <net/tc_act/tc_gact.h>
#include "stmmac.h"
struct stmmachdr {
@@ -43,6 +45,7 @@ struct stmmac_packet_attrs {
int size;
int remove_sa;
u8 id;
+ int sarc;
};
static u8 stmmac_test_next_id;
@@ -228,8 +231,11 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
goto out;
}
- if (tpriv->packet->src) {
- if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
+ if (tpriv->packet->sarc) {
+ if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest))
+ goto out;
+ } else if (tpriv->packet->src) {
+ if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
goto out;
}
@@ -290,7 +296,9 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv,
tpriv->pt.dev = priv->dev;
tpriv->pt.af_packet_priv = tpriv;
tpriv->packet = attr;
- dev_add_pack(&tpriv->pt);
+
+ if (!attr->dont_wait)
+ dev_add_pack(&tpriv->pt);
skb = stmmac_test_get_udp_skb(priv, attr);
if (!skb) {
@@ -313,7 +321,8 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv,
ret = !tpriv->ok;
cleanup:
- dev_remove_pack(&tpriv->pt);
+ if (!attr->dont_wait)
+ dev_remove_pack(&tpriv->pt);
kfree(tpriv);
return ret;
}
@@ -700,6 +709,465 @@ cleanup:
return ret;
}
+static int stmmac_test_rss(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+
+ if (!priv->dma_cap.rssen || !priv->rss.enable)
+ return -EOPNOTSUPP;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.exp_hash = true;
+ attr.sport = 0x321;
+ attr.dport = 0x123;
+
+ return __stmmac_test_loopback(priv, &attr);
+}
+
+static int stmmac_test_vlan_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ struct stmmachdr *shdr;
+ struct ethhdr *ehdr;
+ struct udphdr *uhdr;
+ struct iphdr *ihdr;
+ u16 proto;
+
+ proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q;
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ if (skb_linearize(skb))
+ goto out;
+ if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
+ goto out;
+ if (tpriv->vlan_id) {
+ if (skb->vlan_proto != htons(proto))
+ goto out;
+ if (skb->vlan_tci != tpriv->vlan_id)
+ goto out;
+ }
+
+ ehdr = (struct ethhdr *)skb_mac_header(skb);
+ if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+ goto out;
+
+ ihdr = ip_hdr(skb);
+ if (tpriv->double_vlan)
+ ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
+ if (ihdr->protocol != IPPROTO_UDP)
+ goto out;
+
+ uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
+ if (uhdr->dest != htons(tpriv->packet->dport))
+ goto out;
+
+ shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
+ if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
+ goto out;
+
+ tpriv->ok = true;
+ complete(&tpriv->comp);
+
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0, i;
+
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = htons(ETH_P_IP);
+ tpriv->pt.func = stmmac_test_vlan_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+
+ /*
+ * As we use HASH filtering, false positives may appear. This is a
+ * specially chosen ID so that adjacent IDs (+4) have different
+ * HASH values.
+ */
+ tpriv->vlan_id = 0x123;
+ dev_add_pack(&tpriv->pt);
+
+ ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
+ if (ret)
+ goto cleanup;
+
+ for (i = 0; i < 4; i++) {
+ attr.vlan = 1;
+ attr.vlan_id_out = tpriv->vlan_id + i;
+ attr.dst = priv->dev->dev_addr;
+ attr.sport = 9;
+ attr.dport = 9;
+
+ skb = stmmac_test_get_udp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto vlan_del;
+ }
+
+ skb_set_queue_mapping(skb, 0);
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ goto vlan_del;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = !tpriv->ok;
+ if (ret && !i) {
+ goto vlan_del;
+ } else if (!ret && i) {
+ ret = -1;
+ goto vlan_del;
+ } else {
+ ret = 0;
+ }
+
+ tpriv->ok = false;
+ }
+
+vlan_del:
+ vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0, i;
+
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ tpriv->double_vlan = true;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = htons(ETH_P_8021Q);
+ tpriv->pt.func = stmmac_test_vlan_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+
+ /*
+ * As we use HASH filtering, false positives may appear. This is a
+ * specially chosen ID so that adjacent IDs (+4) have different
+ * HASH values.
+ */
+ tpriv->vlan_id = 0x123;
+ dev_add_pack(&tpriv->pt);
+
+ ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
+ if (ret)
+ goto cleanup;
+
+ for (i = 0; i < 4; i++) {
+ attr.vlan = 2;
+ attr.vlan_id_out = tpriv->vlan_id + i;
+ attr.dst = priv->dev->dev_addr;
+ attr.sport = 9;
+ attr.dport = 9;
+
+ skb = stmmac_test_get_udp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto vlan_del;
+ }
+
+ skb_set_queue_mapping(skb, 0);
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ goto vlan_del;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = !tpriv->ok;
+ if (ret && !i) {
+ goto vlan_del;
+ } else if (!ret && i) {
+ ret = -1;
+ goto vlan_del;
+ } else {
+ ret = 0;
+ }
+
+ tpriv->ok = false;
+ }
+
+vlan_del:
+ vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+#ifdef CONFIG_NET_CLS_ACT
+static int stmmac_test_rxp(struct stmmac_priv *priv)
+{
+ unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
+ struct tc_cls_u32_offload cls_u32 = { };
+ struct stmmac_packet_attrs attr = { };
+ struct tc_action **actions, *act;
+ struct tc_u32_sel *sel;
+ struct tcf_exts *exts;
+ int ret, i, nk = 1;
+
+ if (!tc_can_offload(priv->dev))
+ return -EOPNOTSUPP;
+ if (!priv->dma_cap.frpsel)
+ return -EOPNOTSUPP;
+
+ sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL);
+ if (!sel)
+ return -ENOMEM;
+
+ exts = kzalloc(sizeof(*exts), GFP_KERNEL);
+ if (!exts) {
+ ret = -ENOMEM;
+ goto cleanup_sel;
+ }
+
+ actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
+ if (!actions) {
+ ret = -ENOMEM;
+ goto cleanup_exts;
+ }
+
+ act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
+ if (!act) {
+ ret = -ENOMEM;
+ goto cleanup_actions;
+ }
+
+ cls_u32.command = TC_CLSU32_NEW_KNODE;
+ cls_u32.common.chain_index = 0;
+ cls_u32.common.protocol = htons(ETH_P_ALL);
+ cls_u32.knode.exts = exts;
+ cls_u32.knode.sel = sel;
+ cls_u32.knode.handle = 0x123;
+
+ exts->nr_actions = nk;
+ exts->actions = actions;
+ for (i = 0; i < nk; i++) {
+ struct tcf_gact *gact = to_gact(&act[i]);
+
+ actions[i] = &act[i];
+ gact->tcf_action = TC_ACT_SHOT;
+ }
+
+ sel->nkeys = nk;
+ sel->offshift = 0;
+ sel->keys[0].off = 6;
+ sel->keys[0].val = htonl(0xdeadbeef);
+ sel->keys[0].mask = ~0x0;
+
+ ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
+ if (ret)
+ goto cleanup_act;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.src = addr;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = !ret; /* Shall NOT receive packet */
+
+ cls_u32.command = TC_CLSU32_DELETE_KNODE;
+ stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
+
+cleanup_act:
+ kfree(act);
+cleanup_actions:
+ kfree(actions);
+cleanup_exts:
+ kfree(exts);
+cleanup_sel:
+ kfree(sel);
+ return ret;
+}
+#else
+static int stmmac_test_rxp(struct stmmac_priv *priv)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static int stmmac_test_desc_sai(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.remove_sa = true;
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ priv->sarc_type = 0x1;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ priv->sarc_type = 0x0;
+ return ret;
+}
+
+static int stmmac_test_desc_sar(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ priv->sarc_type = 0x2;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ priv->sarc_type = 0x0;
+ return ret;
+}
+
+static int stmmac_test_reg_sai(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.remove_sa = true;
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2))
+ return -EOPNOTSUPP;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
+ return ret;
+}
+
+static int stmmac_test_reg_sar(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3))
+ return -EOPNOTSUPP;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
+ return ret;
+}
+
+static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
+{
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0;
+ u16 proto;
+
+ if (!priv->dma_cap.vlins)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ proto = svlan ? ETH_P_8021AD : ETH_P_8021Q;
+
+ tpriv->ok = false;
+ tpriv->double_vlan = svlan;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP);
+ tpriv->pt.func = stmmac_test_vlan_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+ tpriv->vlan_id = 0x123;
+ dev_add_pack(&tpriv->pt);
+
+ ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = priv->dev->dev_addr;
+
+ skb = stmmac_test_get_udp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto vlan_del;
+ }
+
+ __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
+ skb->protocol = htons(proto);
+
+ skb_set_queue_mapping(skb, 0);
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ goto vlan_del;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = tpriv->ok ? 0 : -ETIMEDOUT;
+
+vlan_del:
+ vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id);
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int stmmac_test_vlanoff(struct stmmac_priv *priv)
+{
+ return stmmac_test_vlanoff_common(priv, false);
+}
+
+static int stmmac_test_svlanoff(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.dvlan)
+ return -EOPNOTSUPP;
+ return stmmac_test_vlanoff_common(priv, true);
+}
+
#define STMMAC_LOOPBACK_NONE 0
#define STMMAC_LOOPBACK_MAC 1
#define STMMAC_LOOPBACK_PHY 2
@@ -745,6 +1213,46 @@ static const struct stmmac_test {
.name = "Flow Control ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_flowctrl,
+ }, {
+ .name = "RSS ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_rss,
+ }, {
+ .name = "VLAN Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_vlanfilt,
+ }, {
+ .name = "Double VLAN Filtering",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_dvlanfilt,
+ }, {
+ .name = "Flexible RX Parser ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_rxp,
+ }, {
+ .name = "SA Insertion (desc) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_desc_sai,
+ }, {
+ .name = "SA Replacement (desc)",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_desc_sar,
+ }, {
+ .name = "SA Insertion (reg) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_reg_sai,
+ }, {
+ .name = "SA Replacement (reg)",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_reg_sar,
+ }, {
+ .name = "VLAN TX Insertion ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_vlanoff,
+ }, {
+ .name = "SVLAN TX Insertion ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_svlanoff,
},
};
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 6fc05c106afc..c91876f8c536 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -2034,7 +2034,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
__skb_frag_set_page(frag, page->buffer);
__skb_frag_ref(frag);
- frag->page_offset = off;
+ skb_frag_off_set(frag, off);
skb_frag_size_set(frag, hlen - swivel);
/* any more data? */
@@ -2058,7 +2058,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
__skb_frag_set_page(frag, page->buffer);
__skb_frag_ref(frag);
- frag->page_offset = 0;
+ skb_frag_off_set(frag, 0);
skb_frag_size_set(frag, hlen);
RX_USED_ADD(page, hlen + cp->crc_size);
}
@@ -2816,7 +2816,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
DMA_TO_DEVICE);
- tabort = cas_calc_tabort(cp, fragp->page_offset, len);
+ tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
if (unlikely(tabort)) {
void *addr;
@@ -2827,7 +2827,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
addr = cas_page_map(skb_frag_page(fragp));
memcpy(tx_tiny_buf(cp, ring, entry),
- addr + fragp->page_offset + len - tabort,
+ addr + skb_frag_off(fragp) + len - tabort,
tabort);
cas_page_unmap(addr);
mapping = tx_tiny_map(cp, ring, entry, tentry);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 0bc5863bffeb..f5fd1f3c07cc 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6695,7 +6695,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
len = skb_frag_size(frag);
mapping = np->ops->map_page(np->device, skb_frag_page(frag),
- frag->page_offset, len,
+ skb_frag_off(frag), len,
DMA_TO_DEVICE);
rp->tx_buffs[prod].skb = NULL;
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index baa3088b475c..8b94d9ad9e2b 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1088,7 +1088,7 @@ static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
vaddr = kmap_atomic(skb_frag_page(f));
blen = skb_frag_size(f);
blen += 8 - (blen & 7);
- err = ldc_map_single(lp, vaddr + f->page_offset,
+ err = ldc_map_single(lp, vaddr + skb_frag_off(f),
blen, cookies + nc, ncookies - nc,
map_perm);
kunmap_atomic(vaddr);
@@ -1124,7 +1124,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- docopy |= f->page_offset & 7;
+ docopy |= skb_frag_off(f) & 7;
}
if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
skb_tailroom(skb) < pad ||
@@ -1532,8 +1532,7 @@ out_dropped:
else if (port)
del_timer(&port->clean_timer);
rcu_read_unlock();
- if (skb)
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
vnet_free_skbs(freeskbs);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
index 031cf9c3435a..8c4195a9a2cc 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
@@ -503,7 +503,7 @@ static int xlgmac_map_tx_skb(struct xlgmac_channel *channel,
struct xlgmac_desc_data *desc_data;
unsigned int offset, datalen, len;
struct xlgmac_pkt_info *pkt_info;
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int tso, vlan;
dma_addr_t skb_dma;
unsigned int i;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index 1f8e9601592a..a1f5a1e61040 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -116,7 +116,7 @@ static void xlgmac_prep_tx_pkt(struct xlgmac_pdata *pdata,
struct sk_buff *skb,
struct xlgmac_pkt_info *pkt_info)
{
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned int context_desc;
unsigned int len;
unsigned int i;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 5d6960fe3309..0f8a924fc60c 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1501,7 +1501,7 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
bdx_tx_db_inc_wptr(db);
for (i = 0; i < nr_frags; i++) {
- const struct skb_frag_struct *frag;
+ const skb_frag_t *frag;
frag = &skb_shinfo(skb)->frags[i];
db->wptr->len = skb_frag_size(frag);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a46b8b2e44e1..f298d714efd6 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2764,7 +2764,7 @@ static int cpsw_probe(struct platform_device *pdev)
struct net_device *ndev;
struct cpsw_priv *priv;
void __iomem *ss_regs;
- struct resource *res, *ss_res;
+ struct resource *ss_res;
struct gpio_descs *mode;
const struct soc_device_attribute *soc;
struct cpsw_common *cpsw;
@@ -2799,8 +2799,7 @@ static int cpsw_probe(struct platform_device *pdev)
return PTR_ERR(ss_regs);
cpsw->regs = ss_regs;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- cpsw->wr_regs = devm_ioremap_resource(dev, res);
+ cpsw->wr_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(cpsw->wr_regs))
return PTR_ERR(cpsw->wr_regs);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 642843945031..1b2702f74455 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1116,7 +1116,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct page *page = skb_frag_page(frag);
- u32 page_offset = frag->page_offset;
+ u32 page_offset = skb_frag_off(frag);
u32 buf_len = skb_frag_size(frag);
dma_addr_t desc_dma;
u32 desc_dma_32;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 0f346761a2b2..538e70810d3d 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2311,11 +2311,9 @@ spider_net_alloc_card(void)
{
struct net_device *netdev;
struct spider_net_card *card;
- size_t alloc_size;
- alloc_size = sizeof(struct spider_net_card) +
- (tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr);
- netdev = alloc_etherdev(alloc_size);
+ netdev = alloc_etherdev(struct_size(card, darray,
+ tx_descriptors + rx_descriptors));
if (!netdev)
return NULL;
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index ab55416a10fa..ed12dbd156f0 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1127,15 +1127,13 @@ static int rhine_init_one_platform(struct platform_device *pdev)
const struct of_device_id *match;
const u32 *quirks;
int irq;
- struct resource *res;
void __iomem *ioaddr;
match = of_match_device(rhine_of_tbl, &pdev->dev);
if (!match)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ioaddr))
return PTR_ERR(ioaddr);
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 9303aeb2595f..4476491b58f9 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -961,8 +961,7 @@ static int epp_close(struct net_device *dev)
parport_write_control(pp, 0); /* reset the adapter */
parport_release(bc->pdev);
parport_unregister_device(bc->pdev);
- if (bc->skb)
- dev_kfree_skb(bc->skb);
+ dev_kfree_skb(bc->skb);
bc->skb = NULL;
printk(KERN_INFO "%s: close epp at iobase 0x%lx irq %u\n",
bc_drvname, dev->base_addr, dev->irq);
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index c6f83e0df0a3..df495b5595f5 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -475,8 +475,7 @@ static int hdlcdrv_close(struct net_device *dev)
if (s->ops && s->ops->close)
i = s->ops->close(dev);
- if (s->skb)
- dev_kfree_skb(s->skb);
+ dev_kfree_skb(s->skb);
s->skb = NULL;
s->opened = 0;
return i;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 442018ccd65e..c5bfa19ddb93 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -25,6 +25,7 @@
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/jiffies.h>
+#include <linux/refcount.h>
#include <net/ax25.h>
@@ -70,7 +71,7 @@ struct mkiss {
#define CRC_MODE_FLEX_TEST 3
#define CRC_MODE_SMACK_TEST 4
- atomic_t refcnt;
+ refcount_t refcnt;
struct completion dead;
};
@@ -668,7 +669,7 @@ static struct mkiss *mkiss_get(struct tty_struct *tty)
read_lock(&disc_data_lock);
ax = tty->disc_data;
if (ax)
- atomic_inc(&ax->refcnt);
+ refcount_inc(&ax->refcnt);
read_unlock(&disc_data_lock);
return ax;
@@ -676,7 +677,7 @@ static struct mkiss *mkiss_get(struct tty_struct *tty)
static void mkiss_put(struct mkiss *ax)
{
- if (atomic_dec_and_test(&ax->refcnt))
+ if (refcount_dec_and_test(&ax->refcnt))
complete(&ax->dead);
}
@@ -704,7 +705,7 @@ static int mkiss_open(struct tty_struct *tty)
ax->dev = dev;
spin_lock_init(&ax->buflock);
- atomic_set(&ax->refcnt, 1);
+ refcount_set(&ax->refcnt, 1);
init_completion(&ax->dead);
ax->tty = tty;
@@ -784,7 +785,7 @@ static void mkiss_close(struct tty_struct *tty)
* We have now ensured that nobody can start using ap from now on, but
* we have to wait for all existing users to finish.
*/
- if (!atomic_dec_and_test(&ax->refcnt))
+ if (!refcount_dec_and_test(&ax->refcnt))
wait_for_completion(&ax->dead);
/*
* Halt the transmit queue so that a new transmit cannot scribble
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index e8fce6d715ef..0a6cd2f1111f 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -435,7 +435,7 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
slots_used += fill_pg_buf(skb_frag_page(frag),
- frag->page_offset,
+ skb_frag_off(frag),
skb_frag_size(frag), &pb[slots_used]);
}
return slots_used;
@@ -449,7 +449,7 @@ static int count_skb_frag_slots(struct sk_buff *skb)
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
unsigned long size = skb_frag_size(frag);
- unsigned long offset = frag->page_offset;
+ unsigned long offset = skb_frag_off(frag);
/* Skip unused frames from start of page */
offset &= ~PAGE_MASK;
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index c9392d70e639..5a37514e4234 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1158,23 +1158,16 @@ static int adf7242_stats_show(struct seq_file *file, void *offset)
return 0;
}
-static int adf7242_debugfs_init(struct adf7242_local *lp)
+static void adf7242_debugfs_init(struct adf7242_local *lp)
{
char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-";
- struct dentry *stats;
strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
- if (IS_ERR_OR_NULL(lp->debugfs_root))
- return PTR_ERR_OR_ZERO(lp->debugfs_root);
- stats = debugfs_create_devm_seqfile(&lp->spi->dev, "status",
- lp->debugfs_root,
- adf7242_stats_show);
- return PTR_ERR_OR_ZERO(stats);
-
- return 0;
+ debugfs_create_devm_seqfile(&lp->spi->dev, "status", lp->debugfs_root,
+ adf7242_stats_show);
}
static const s32 adf7242_powers[] = {
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 595cf7e2a651..7d67f41387f5 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1626,24 +1626,16 @@ static int at86rf230_stats_show(struct seq_file *file, void *offset)
}
DEFINE_SHOW_ATTRIBUTE(at86rf230_stats);
-static int at86rf230_debugfs_init(struct at86rf230_local *lp)
+static void at86rf230_debugfs_init(struct at86rf230_local *lp)
{
char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "at86rf230-";
- struct dentry *stats;
strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
at86rf230_debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
- if (!at86rf230_debugfs_root)
- return -ENOMEM;
-
- stats = debugfs_create_file("trac_stats", 0444,
- at86rf230_debugfs_root, lp,
- &at86rf230_stats_fops);
- if (!stats)
- return -ENOMEM;
- return 0;
+ debugfs_create_file("trac_stats", 0444, at86rf230_debugfs_root, lp,
+ &at86rf230_stats_fops);
}
static void at86rf230_debugfs_remove(void)
@@ -1651,7 +1643,7 @@ static void at86rf230_debugfs_remove(void)
debugfs_remove_recursive(at86rf230_debugfs_root);
}
#else
-static int at86rf230_debugfs_init(struct at86rf230_local *lp) { return 0; }
+static void at86rf230_debugfs_init(struct at86rf230_local *lp) { }
static void at86rf230_debugfs_remove(void) { }
#endif
@@ -1751,9 +1743,7 @@ static int at86rf230_probe(struct spi_device *spi)
/* going into sleep by default */
at86rf230_sleep(lp);
- rc = at86rf230_debugfs_init(lp);
- if (rc)
- goto free_dev;
+ at86rf230_debugfs_init(lp);
rc = ieee802154_register_hw(lp->hw);
if (rc)
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index b188fce3f641..11402dc347db 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -3019,14 +3019,7 @@ static int ca8210_test_interface_init(struct ca8210_priv *priv)
priv,
&test_int_fops
);
- if (IS_ERR(test->ca8210_dfs_spi_int)) {
- dev_err(
- &priv->spi->dev,
- "Error %ld when creating debugfs node\n",
- PTR_ERR(test->ca8210_dfs_spi_int)
- );
- return PTR_ERR(test->ca8210_dfs_spi_int);
- }
+
debugfs_create_symlink("ca8210", NULL, node_name);
init_waitqueue_head(&test->readq);
return kfifo_alloc(
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1c96bed5a7c4..887bbba4631e 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -126,6 +126,7 @@ static int ipvlan_init(struct net_device *dev)
(phy_dev->state & IPVLAN_STATE_MASK);
dev->features = phy_dev->features & IPVLAN_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED;
+ dev->hw_enc_features |= dev->features;
dev->gso_max_size = phy_dev->gso_max_size;
dev->gso_max_segs = phy_dev->gso_max_segs;
dev->hard_header_len = phy_dev->hard_header_len;
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index bcc40a236624..39cdb6c18ec0 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -17,16 +17,60 @@
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/inet.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
#include <net/devlink.h>
+#include <net/ip.h>
+#include <uapi/linux/devlink.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/udp.h>
#include "netdevsim.h"
static struct dentry *nsim_dev_ddir;
+#define NSIM_DEV_DUMMY_REGION_SIZE (1024 * 32)
+
+static ssize_t nsim_dev_take_snapshot_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev *nsim_dev = file->private_data;
+ void *dummy_data;
+ int err;
+ u32 id;
+
+ dummy_data = kmalloc(NSIM_DEV_DUMMY_REGION_SIZE, GFP_KERNEL);
+ if (!dummy_data)
+ return -ENOMEM;
+
+ get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE);
+
+ id = devlink_region_shapshot_id_get(priv_to_devlink(nsim_dev));
+ err = devlink_region_snapshot_create(nsim_dev->dummy_region,
+ dummy_data, id, kfree);
+ if (err) {
+ pr_err("Failed to create region snapshot\n");
+ kfree(dummy_data);
+ return err;
+ }
+
+ return count;
+}
+
+static const struct file_operations nsim_dev_take_snapshot_fops = {
+ .open = simple_open,
+ .write = nsim_dev_take_snapshot_write,
+ .llseek = generic_file_llseek,
+};
+
static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
{
char dev_ddir_name[16];
@@ -40,6 +84,12 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
return PTR_ERR_OR_ZERO(nsim_dev->ports_ddir) ?: -EINVAL;
debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir,
&nsim_dev->fw_update_status);
+ debugfs_create_u32("max_macs", 0600, nsim_dev->ddir,
+ &nsim_dev->max_macs);
+ debugfs_create_bool("test1", 0600, nsim_dev->ddir,
+ &nsim_dev->test1);
+ debugfs_create_file("take_snapshot", 0200, nsim_dev->ddir, nsim_dev,
+ &nsim_dev_take_snapshot_fops);
return 0;
}
@@ -193,6 +243,284 @@ out:
return err;
}
+enum nsim_devlink_param_id {
+ NSIM_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ NSIM_DEVLINK_PARAM_ID_TEST1,
+};
+
+static const struct devlink_param nsim_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(MAX_MACS,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL),
+ DEVLINK_PARAM_DRIVER(NSIM_DEVLINK_PARAM_ID_TEST1,
+ "test1", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL),
+};
+
+static void nsim_devlink_set_params_init_values(struct nsim_dev *nsim_dev,
+ struct devlink *devlink)
+{
+ union devlink_param_value value;
+
+ value.vu32 = nsim_dev->max_macs;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
+ value.vbool = nsim_dev->test1;
+ devlink_param_driverinit_value_set(devlink,
+ NSIM_DEVLINK_PARAM_ID_TEST1,
+ value);
+}
+
+static void nsim_devlink_param_load_driverinit_values(struct devlink *devlink)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ union devlink_param_value saved_value;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &saved_value);
+ if (!err)
+ nsim_dev->max_macs = saved_value.vu32;
+ err = devlink_param_driverinit_value_get(devlink,
+ NSIM_DEVLINK_PARAM_ID_TEST1,
+ &saved_value);
+ if (!err)
+ nsim_dev->test1 = saved_value.vbool;
+}
+
+#define NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX 16
+
+static int nsim_dev_dummy_region_init(struct nsim_dev *nsim_dev,
+ struct devlink *devlink)
+{
+ nsim_dev->dummy_region =
+ devlink_region_create(devlink, "dummy",
+ NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX,
+ NSIM_DEV_DUMMY_REGION_SIZE);
+ return PTR_ERR_OR_ZERO(nsim_dev->dummy_region);
+}
+
+static void nsim_dev_dummy_region_exit(struct nsim_dev *nsim_dev)
+{
+ devlink_region_destroy(nsim_dev->dummy_region);
+}
+
+struct nsim_trap_item {
+ void *trap_ctx;
+ enum devlink_trap_action action;
+};
+
+struct nsim_trap_data {
+ struct delayed_work trap_report_dw;
+ struct nsim_trap_item *trap_items_arr;
+ struct nsim_dev *nsim_dev;
+ spinlock_t trap_lock; /* Protects trap_items_arr */
+};
+
+/* All driver-specific traps must be documented in
+ * Documentation/networking/devlink-trap-netdevsim.rst
+ */
+enum {
+ NSIM_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX,
+ NSIM_TRAP_ID_FID_MISS,
+};
+
+#define NSIM_TRAP_NAME_FID_MISS "fid_miss"
+
+#define NSIM_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
+
+#define NSIM_TRAP_DROP(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ NSIM_TRAP_METADATA)
+#define NSIM_TRAP_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ NSIM_TRAP_METADATA)
+#define NSIM_TRAP_DRIVER_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(EXCEPTION, TRAP, NSIM_TRAP_ID_##_id, \
+ NSIM_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC(_group_id), \
+ NSIM_TRAP_METADATA)
+
+static const struct devlink_trap nsim_traps_arr[] = {
+ NSIM_TRAP_DROP(SMAC_MC, L2_DROPS),
+ NSIM_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
+ NSIM_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
+ NSIM_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS),
+ NSIM_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS),
+ NSIM_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
+ NSIM_TRAP_DRIVER_EXCEPTION(FID_MISS, L2_DROPS),
+ NSIM_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS),
+ NSIM_TRAP_EXCEPTION(TTL_ERROR, L3_DROPS),
+ NSIM_TRAP_DROP(TAIL_DROP, BUFFER_DROPS),
+};
+
+#define NSIM_TRAP_L4_DATA_LEN 100
+
+static struct sk_buff *nsim_dev_trap_skb_build(void)
+{
+ int tot_len, data_len = NSIM_TRAP_L4_DATA_LEN;
+ struct sk_buff *skb;
+ struct udphdr *udph;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+
+ skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+ tot_len = sizeof(struct iphdr) + sizeof(struct udphdr) + data_len;
+
+ eth = skb_put(skb, sizeof(struct ethhdr));
+ eth_random_addr(eth->h_dest);
+ eth_random_addr(eth->h_source);
+ eth->h_proto = htons(ETH_P_IP);
+ skb->protocol = htons(ETH_P_IP);
+
+ iph = skb_put(skb, sizeof(struct iphdr));
+ iph->protocol = IPPROTO_UDP;
+ iph->saddr = in_aton("192.0.2.1");
+ iph->daddr = in_aton("198.51.100.1");
+ iph->version = 0x4;
+ iph->frag_off = 0;
+ iph->ihl = 0x5;
+ iph->tot_len = htons(tot_len);
+ iph->ttl = 100;
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+ udph = skb_put_zero(skb, sizeof(struct udphdr) + data_len);
+ get_random_bytes(&udph->source, sizeof(u16));
+ get_random_bytes(&udph->dest, sizeof(u16));
+ udph->len = htons(sizeof(struct udphdr) + data_len);
+
+ return skb;
+}
+
+static void nsim_dev_trap_report(struct nsim_dev_port *nsim_dev_port)
+{
+ struct nsim_dev *nsim_dev = nsim_dev_port->ns->nsim_dev;
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
+ struct nsim_trap_data *nsim_trap_data;
+ int i;
+
+ nsim_trap_data = nsim_dev->trap_data;
+
+ spin_lock(&nsim_trap_data->trap_lock);
+ for (i = 0; i < ARRAY_SIZE(nsim_traps_arr); i++) {
+ struct nsim_trap_item *nsim_trap_item;
+ struct sk_buff *skb;
+
+ nsim_trap_item = &nsim_trap_data->trap_items_arr[i];
+ if (nsim_trap_item->action == DEVLINK_TRAP_ACTION_DROP)
+ continue;
+
+ skb = nsim_dev_trap_skb_build();
+ if (!skb)
+ continue;
+ skb->dev = nsim_dev_port->ns->netdev;
+
+ /* Trapped packets are usually passed to devlink in softIRQ,
+ * but in this case they are generated in a workqueue. Disable
+ * softIRQs to prevent lockdep from complaining about
+ * "incosistent lock state".
+ */
+ local_bh_disable();
+ devlink_trap_report(devlink, skb, nsim_trap_item->trap_ctx,
+ &nsim_dev_port->devlink_port);
+ local_bh_enable();
+ consume_skb(skb);
+ }
+ spin_unlock(&nsim_trap_data->trap_lock);
+}
+
+#define NSIM_TRAP_REPORT_INTERVAL_MS 100
+
+static void nsim_dev_trap_report_work(struct work_struct *work)
+{
+ struct nsim_trap_data *nsim_trap_data;
+ struct nsim_dev_port *nsim_dev_port;
+ struct nsim_dev *nsim_dev;
+
+ nsim_trap_data = container_of(work, struct nsim_trap_data,
+ trap_report_dw.work);
+ nsim_dev = nsim_trap_data->nsim_dev;
+
+ /* For each running port and enabled packet trap, generate a UDP
+ * packet with a random 5-tuple and report it.
+ */
+ mutex_lock(&nsim_dev->port_list_lock);
+ list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
+ if (!netif_running(nsim_dev_port->ns->netdev))
+ continue;
+
+ nsim_dev_trap_report(nsim_dev_port);
+ }
+ mutex_unlock(&nsim_dev->port_list_lock);
+
+ schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
+ msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
+}
+
+static int nsim_dev_traps_init(struct devlink *devlink)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ struct nsim_trap_data *nsim_trap_data;
+ int err;
+
+ nsim_trap_data = kzalloc(sizeof(*nsim_trap_data), GFP_KERNEL);
+ if (!nsim_trap_data)
+ return -ENOMEM;
+
+ nsim_trap_data->trap_items_arr = kcalloc(ARRAY_SIZE(nsim_traps_arr),
+ sizeof(struct nsim_trap_item),
+ GFP_KERNEL);
+ if (!nsim_trap_data->trap_items_arr) {
+ err = -ENOMEM;
+ goto err_trap_data_free;
+ }
+
+ /* The lock is used to protect the action state of the registered
+ * traps. The value is written by user and read in delayed work when
+ * iterating over all the traps.
+ */
+ spin_lock_init(&nsim_trap_data->trap_lock);
+ nsim_trap_data->nsim_dev = nsim_dev;
+ nsim_dev->trap_data = nsim_trap_data;
+
+ err = devlink_traps_register(devlink, nsim_traps_arr,
+ ARRAY_SIZE(nsim_traps_arr), NULL);
+ if (err)
+ goto err_trap_items_free;
+
+ INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
+ nsim_dev_trap_report_work);
+ schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
+ msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
+
+ return 0;
+
+err_trap_items_free:
+ kfree(nsim_trap_data->trap_items_arr);
+err_trap_data_free:
+ kfree(nsim_trap_data);
+ return err;
+}
+
+static void nsim_dev_traps_exit(struct devlink *devlink)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ cancel_delayed_work_sync(&nsim_dev->trap_data->trap_report_dw);
+ devlink_traps_unregister(devlink, nsim_traps_arr,
+ ARRAY_SIZE(nsim_traps_arr));
+ kfree(nsim_dev->trap_data->trap_items_arr);
+ kfree(nsim_dev->trap_data);
+}
+
static int nsim_dev_reload(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
@@ -214,6 +542,7 @@ static int nsim_dev_reload(struct devlink *devlink,
return err;
}
}
+ nsim_devlink_param_load_driverinit_values(devlink);
return 0;
}
@@ -258,11 +587,66 @@ static int nsim_dev_flash_update(struct devlink *devlink, const char *file_name,
return 0;
}
+static struct nsim_trap_item *
+nsim_dev_trap_item_lookup(struct nsim_dev *nsim_dev, u16 trap_id)
+{
+ struct nsim_trap_data *nsim_trap_data = nsim_dev->trap_data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nsim_traps_arr); i++) {
+ if (nsim_traps_arr[i].id == trap_id)
+ return &nsim_trap_data->trap_items_arr[i];
+ }
+
+ return NULL;
+}
+
+static int nsim_dev_devlink_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ struct nsim_trap_item *nsim_trap_item;
+
+ nsim_trap_item = nsim_dev_trap_item_lookup(nsim_dev, trap->id);
+ if (WARN_ON(!nsim_trap_item))
+ return -ENOENT;
+
+ nsim_trap_item->trap_ctx = trap_ctx;
+ nsim_trap_item->action = trap->init_action;
+
+ return 0;
+}
+
+static int
+nsim_dev_devlink_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ struct nsim_trap_item *nsim_trap_item;
+
+ nsim_trap_item = nsim_dev_trap_item_lookup(nsim_dev, trap->id);
+ if (WARN_ON(!nsim_trap_item))
+ return -ENOENT;
+
+ spin_lock(&nsim_dev->trap_data->trap_lock);
+ nsim_trap_item->action = action;
+ spin_unlock(&nsim_dev->trap_data->trap_lock);
+
+ return 0;
+}
+
static const struct devlink_ops nsim_dev_devlink_ops = {
.reload = nsim_dev_reload,
.flash_update = nsim_dev_flash_update,
+ .trap_init = nsim_dev_devlink_trap_init,
+ .trap_action_set = nsim_dev_devlink_trap_action_set,
};
+#define NSIM_DEV_MAX_MACS_DEFAULT 32
+#define NSIM_DEV_TEST1_DEFAULT true
+
static struct nsim_dev *
nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
{
@@ -280,6 +664,8 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
INIT_LIST_HEAD(&nsim_dev->port_list);
mutex_init(&nsim_dev->port_list_lock);
nsim_dev->fw_update_status = true;
+ nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
+ nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
err = nsim_dev_resources_register(devlink);
if (err)
@@ -289,18 +675,40 @@ nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
if (err)
goto err_resources_unregister;
- err = nsim_dev_debugfs_init(nsim_dev);
+ err = devlink_params_register(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
if (err)
goto err_dl_unregister;
+ nsim_devlink_set_params_init_values(nsim_dev, devlink);
+
+ err = nsim_dev_dummy_region_init(nsim_dev, devlink);
+ if (err)
+ goto err_params_unregister;
+
+ err = nsim_dev_traps_init(devlink);
+ if (err)
+ goto err_dummy_region_exit;
+
+ err = nsim_dev_debugfs_init(nsim_dev);
+ if (err)
+ goto err_traps_exit;
err = nsim_bpf_dev_init(nsim_dev);
if (err)
goto err_debugfs_exit;
+ devlink_params_publish(devlink);
return nsim_dev;
err_debugfs_exit:
nsim_dev_debugfs_exit(nsim_dev);
+err_traps_exit:
+ nsim_dev_traps_exit(devlink);
+err_dummy_region_exit:
+ nsim_dev_dummy_region_exit(nsim_dev);
+err_params_unregister:
+ devlink_params_unregister(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
err_dl_unregister:
devlink_unregister(devlink);
err_resources_unregister:
@@ -316,6 +724,10 @@ static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
nsim_bpf_dev_exit(nsim_dev);
nsim_dev_debugfs_exit(nsim_dev);
+ nsim_dev_traps_exit(devlink);
+ nsim_dev_dummy_region_exit(nsim_dev);
+ devlink_params_unregister(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
mutex_destroy(&nsim_dev->port_list_lock);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 9404637d34b7..66bf13765ad0 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -145,6 +145,7 @@ struct nsim_dev_port {
struct nsim_dev {
struct nsim_bus_dev *nsim_bus_dev;
struct nsim_fib_data *fib_data;
+ struct nsim_trap_data *trap_data;
struct dentry *ddir;
struct dentry *ports_ddir;
struct bpf_offload_dev *bpf_dev;
@@ -158,6 +159,9 @@ struct nsim_dev {
struct list_head port_list;
struct mutex port_list_lock; /* protects port list */
bool fw_update_status;
+ u32 max_macs;
+ bool test1;
+ struct devlink_region *dummy_region;
};
int nsim_dev_init(void);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 20f14c5fbb7e..03be30cde552 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -21,6 +21,19 @@ config MDIO_BUS
if MDIO_BUS
+config MDIO_ASPEED
+ tristate "ASPEED MDIO bus controller"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on OF_MDIO && HAS_IOMEM
+ help
+ This module provides a driver for the independent MDIO bus
+ controllers found in the ASPEED AST2600 SoC. This is a driver for the
+ third revision of the ASPEED MDIO register interface - the first two
+ revisions are the "old" and "new" interfaces found in the AST2400 and
+ AST2500, embedded in the MAC. For legacy reasons, FTGMAC100 driver
+ continues to drive the embedded MDIO controller for the AST2400 and
+ AST2500 SoCs, so say N if AST2600 support is not required.
+
config MDIO_BCM_IPROC
tristate "Broadcom iProc MDIO bus controller"
depends on ARCH_BCM_IPROC || COMPILE_TEST
@@ -159,8 +172,8 @@ config MDIO_MSCC_MIIM
config MDIO_OCTEON
tristate "Octeon and some ThunderX SOCs MDIO buses"
- depends on 64BIT
- depends on HAS_IOMEM && OF_MDIO
+ depends on (64BIT && OF_MDIO) || COMPILE_TEST
+ depends on HAS_IOMEM
select MDIO_CAVIUM
help
This module provides a driver for the Octeon and ThunderX MDIO
@@ -244,6 +257,15 @@ config SFP
depends on HWMON || HWMON=n
select MDIO_I2C
+config ADIN_PHY
+ tristate "Analog Devices Industrial Ethernet PHYs"
+ help
+ Adds support for the Analog Devices Industrial Ethernet PHYs.
+ Currently supports the:
+ - ADIN1200 - Robust,Industrial, Low Power 10/100 Ethernet PHY
+ - ADIN1300 - Robust,Industrial, Low Latency 10/100/1000 Gigabit
+ Ethernet PHY
+
config AMD_PHY
tristate "AMD PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 839acb292c38..a03437e091f3 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -22,6 +22,7 @@ libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o
obj-$(CONFIG_PHYLINK) += phylink.o
obj-$(CONFIG_PHYLIB) += libphy.o
+obj-$(CONFIG_MDIO_ASPEED) += mdio-aspeed.o
obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_SFP) += sfp.o
sfp-obj-$(CONFIG_SFP) += sfp-bus.o
obj-y += $(sfp-obj-y) $(sfp-obj-m)
+obj-$(CONFIG_ADIN_PHY) += adin.o
obj-$(CONFIG_AMD_PHY) += amd.o
aquantia-objs += aquantia_main.o
ifdef CONFIG_HWMON
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
new file mode 100644
index 000000000000..4dec83df048d
--- /dev/null
+++ b/drivers/net/phy/adin.c
@@ -0,0 +1,720 @@
+// SPDX-License-Identifier: GPL-2.0+
+/**
+ * Driver for Analog Devices Industrial Ethernet PHYs
+ *
+ * Copyright 2019 Analog Devices Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/property.h>
+
+#define PHY_ID_ADIN1200 0x0283bc20
+#define PHY_ID_ADIN1300 0x0283bc30
+
+#define ADIN1300_MII_EXT_REG_PTR 0x0010
+#define ADIN1300_MII_EXT_REG_DATA 0x0011
+
+#define ADIN1300_PHY_CTRL1 0x0012
+#define ADIN1300_AUTO_MDI_EN BIT(10)
+#define ADIN1300_MAN_MDIX_EN BIT(9)
+
+#define ADIN1300_RX_ERR_CNT 0x0014
+
+#define ADIN1300_PHY_CTRL2 0x0016
+#define ADIN1300_DOWNSPEED_AN_100_EN BIT(11)
+#define ADIN1300_DOWNSPEED_AN_10_EN BIT(10)
+#define ADIN1300_GROUP_MDIO_EN BIT(6)
+#define ADIN1300_DOWNSPEEDS_EN \
+ (ADIN1300_DOWNSPEED_AN_100_EN | ADIN1300_DOWNSPEED_AN_10_EN)
+
+#define ADIN1300_PHY_CTRL3 0x0017
+#define ADIN1300_LINKING_EN BIT(13)
+#define ADIN1300_DOWNSPEED_RETRIES_MSK GENMASK(12, 10)
+
+#define ADIN1300_INT_MASK_REG 0x0018
+#define ADIN1300_INT_MDIO_SYNC_EN BIT(9)
+#define ADIN1300_INT_ANEG_STAT_CHNG_EN BIT(8)
+#define ADIN1300_INT_ANEG_PAGE_RX_EN BIT(6)
+#define ADIN1300_INT_IDLE_ERR_CNT_EN BIT(5)
+#define ADIN1300_INT_MAC_FIFO_OU_EN BIT(4)
+#define ADIN1300_INT_RX_STAT_CHNG_EN BIT(3)
+#define ADIN1300_INT_LINK_STAT_CHNG_EN BIT(2)
+#define ADIN1300_INT_SPEED_CHNG_EN BIT(1)
+#define ADIN1300_INT_HW_IRQ_EN BIT(0)
+#define ADIN1300_INT_MASK_EN \
+ (ADIN1300_INT_LINK_STAT_CHNG_EN | ADIN1300_INT_HW_IRQ_EN)
+#define ADIN1300_INT_STATUS_REG 0x0019
+
+#define ADIN1300_PHY_STATUS1 0x001a
+#define ADIN1300_PAIR_01_SWAP BIT(11)
+
+/* EEE register addresses, accessible via Clause 22 access using
+ * ADIN1300_MII_EXT_REG_PTR & ADIN1300_MII_EXT_REG_DATA.
+ * The bit-fields are the same as specified by IEEE for EEE.
+ */
+#define ADIN1300_EEE_CAP_REG 0x8000
+#define ADIN1300_EEE_ADV_REG 0x8001
+#define ADIN1300_EEE_LPABLE_REG 0x8002
+#define ADIN1300_CLOCK_STOP_REG 0x9400
+#define ADIN1300_LPI_WAKE_ERR_CNT_REG 0xa000
+
+#define ADIN1300_GE_SOFT_RESET_REG 0xff0c
+#define ADIN1300_GE_SOFT_RESET BIT(0)
+
+#define ADIN1300_GE_RGMII_CFG_REG 0xff23
+#define ADIN1300_GE_RGMII_RX_MSK GENMASK(8, 6)
+#define ADIN1300_GE_RGMII_RX_SEL(x) \
+ FIELD_PREP(ADIN1300_GE_RGMII_RX_MSK, x)
+#define ADIN1300_GE_RGMII_GTX_MSK GENMASK(5, 3)
+#define ADIN1300_GE_RGMII_GTX_SEL(x) \
+ FIELD_PREP(ADIN1300_GE_RGMII_GTX_MSK, x)
+#define ADIN1300_GE_RGMII_RXID_EN BIT(2)
+#define ADIN1300_GE_RGMII_TXID_EN BIT(1)
+#define ADIN1300_GE_RGMII_EN BIT(0)
+
+/* RGMII internal delay settings for rx and tx for ADIN1300 */
+#define ADIN1300_RGMII_1_60_NS 0x0001
+#define ADIN1300_RGMII_1_80_NS 0x0002
+#define ADIN1300_RGMII_2_00_NS 0x0000
+#define ADIN1300_RGMII_2_20_NS 0x0006
+#define ADIN1300_RGMII_2_40_NS 0x0007
+
+#define ADIN1300_GE_RMII_CFG_REG 0xff24
+#define ADIN1300_GE_RMII_FIFO_DEPTH_MSK GENMASK(6, 4)
+#define ADIN1300_GE_RMII_FIFO_DEPTH_SEL(x) \
+ FIELD_PREP(ADIN1300_GE_RMII_FIFO_DEPTH_MSK, x)
+#define ADIN1300_GE_RMII_EN BIT(0)
+
+/* RMII fifo depth values */
+#define ADIN1300_RMII_4_BITS 0x0000
+#define ADIN1300_RMII_8_BITS 0x0001
+#define ADIN1300_RMII_12_BITS 0x0002
+#define ADIN1300_RMII_16_BITS 0x0003
+#define ADIN1300_RMII_20_BITS 0x0004
+#define ADIN1300_RMII_24_BITS 0x0005
+
+/**
+ * struct adin_cfg_reg_map - map a config value to aregister value
+ * @cfg value in device configuration
+ * @reg value in the register
+ */
+struct adin_cfg_reg_map {
+ int cfg;
+ int reg;
+};
+
+static const struct adin_cfg_reg_map adin_rgmii_delays[] = {
+ { 1600, ADIN1300_RGMII_1_60_NS },
+ { 1800, ADIN1300_RGMII_1_80_NS },
+ { 2000, ADIN1300_RGMII_2_00_NS },
+ { 2200, ADIN1300_RGMII_2_20_NS },
+ { 2400, ADIN1300_RGMII_2_40_NS },
+ { },
+};
+
+static const struct adin_cfg_reg_map adin_rmii_fifo_depths[] = {
+ { 4, ADIN1300_RMII_4_BITS },
+ { 8, ADIN1300_RMII_8_BITS },
+ { 12, ADIN1300_RMII_12_BITS },
+ { 16, ADIN1300_RMII_16_BITS },
+ { 20, ADIN1300_RMII_20_BITS },
+ { 24, ADIN1300_RMII_24_BITS },
+ { },
+};
+
+/**
+ * struct adin_clause45_mmd_map - map to convert Clause 45 regs to Clause 22
+ * @devad device address used in Clause 45 access
+ * @cl45_regnum register address defined by Clause 45
+ * @adin_regnum equivalent register address accessible via Clause 22
+ */
+struct adin_clause45_mmd_map {
+ int devad;
+ u16 cl45_regnum;
+ u16 adin_regnum;
+};
+
+static struct adin_clause45_mmd_map adin_clause45_mmd_map[] = {
+ { MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE, ADIN1300_EEE_CAP_REG },
+ { MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, ADIN1300_EEE_LPABLE_REG },
+ { MDIO_MMD_AN, MDIO_AN_EEE_ADV, ADIN1300_EEE_ADV_REG },
+ { MDIO_MMD_PCS, MDIO_CTRL1, ADIN1300_CLOCK_STOP_REG },
+ { MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR, ADIN1300_LPI_WAKE_ERR_CNT_REG },
+};
+
+struct adin_hw_stat {
+ const char *string;
+ u16 reg1;
+ u16 reg2;
+};
+
+static struct adin_hw_stat adin_hw_stats[] = {
+ { "total_frames_checked_count", 0x940A, 0x940B }, /* hi + lo */
+ { "length_error_frames_count", 0x940C },
+ { "alignment_error_frames_count", 0x940D },
+ { "symbol_error_count", 0x940E },
+ { "oversized_frames_count", 0x940F },
+ { "undersized_frames_count", 0x9410 },
+ { "odd_nibble_frames_count", 0x9411 },
+ { "odd_preamble_packet_count", 0x9412 },
+ { "dribble_bits_frames_count", 0x9413 },
+ { "false_carrier_events_count", 0x9414 },
+};
+
+/**
+ * struct adin_priv - ADIN PHY driver private data
+ * stats statistic counters for the PHY
+ */
+struct adin_priv {
+ u64 stats[ARRAY_SIZE(adin_hw_stats)];
+};
+
+static int adin_lookup_reg_value(const struct adin_cfg_reg_map *tbl, int cfg)
+{
+ size_t i;
+
+ for (i = 0; tbl[i].cfg; i++) {
+ if (tbl[i].cfg == cfg)
+ return tbl[i].reg;
+ }
+
+ return -EINVAL;
+}
+
+static u32 adin_get_reg_value(struct phy_device *phydev,
+ const char *prop_name,
+ const struct adin_cfg_reg_map *tbl,
+ u32 dflt)
+{
+ struct device *dev = &phydev->mdio.dev;
+ u32 val;
+ int rc;
+
+ if (device_property_read_u32(dev, prop_name, &val))
+ return dflt;
+
+ rc = adin_lookup_reg_value(tbl, val);
+ if (rc < 0) {
+ phydev_warn(phydev,
+ "Unsupported value %u for %s using default (%u)\n",
+ val, prop_name, dflt);
+ return dflt;
+ }
+
+ return rc;
+}
+
+static int adin_config_rgmii_mode(struct phy_device *phydev)
+{
+ u32 val;
+ int reg;
+
+ if (!phy_interface_is_rgmii(phydev))
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_GE_RGMII_CFG_REG,
+ ADIN1300_GE_RGMII_EN);
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_GE_RGMII_CFG_REG);
+ if (reg < 0)
+ return reg;
+
+ reg |= ADIN1300_GE_RGMII_EN;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ reg |= ADIN1300_GE_RGMII_RXID_EN;
+
+ val = adin_get_reg_value(phydev, "adi,rx-internal-delay-ps",
+ adin_rgmii_delays,
+ ADIN1300_RGMII_2_00_NS);
+ reg &= ~ADIN1300_GE_RGMII_RX_MSK;
+ reg |= ADIN1300_GE_RGMII_RX_SEL(val);
+ } else {
+ reg &= ~ADIN1300_GE_RGMII_RXID_EN;
+ }
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ reg |= ADIN1300_GE_RGMII_TXID_EN;
+
+ val = adin_get_reg_value(phydev, "adi,tx-internal-delay-ps",
+ adin_rgmii_delays,
+ ADIN1300_RGMII_2_00_NS);
+ reg &= ~ADIN1300_GE_RGMII_GTX_MSK;
+ reg |= ADIN1300_GE_RGMII_GTX_SEL(val);
+ } else {
+ reg &= ~ADIN1300_GE_RGMII_TXID_EN;
+ }
+
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_GE_RGMII_CFG_REG, reg);
+}
+
+static int adin_config_rmii_mode(struct phy_device *phydev)
+{
+ u32 val;
+ int reg;
+
+ if (phydev->interface != PHY_INTERFACE_MODE_RMII)
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_GE_RMII_CFG_REG,
+ ADIN1300_GE_RMII_EN);
+
+ reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_GE_RMII_CFG_REG);
+ if (reg < 0)
+ return reg;
+
+ reg |= ADIN1300_GE_RMII_EN;
+
+ val = adin_get_reg_value(phydev, "adi,fifo-depth-bits",
+ adin_rmii_fifo_depths,
+ ADIN1300_RMII_8_BITS);
+
+ reg &= ~ADIN1300_GE_RMII_FIFO_DEPTH_MSK;
+ reg |= ADIN1300_GE_RMII_FIFO_DEPTH_SEL(val);
+
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_GE_RMII_CFG_REG, reg);
+}
+
+static int adin_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, cnt, enable;
+
+ val = phy_read(phydev, ADIN1300_PHY_CTRL2);
+ if (val < 0)
+ return val;
+
+ cnt = phy_read(phydev, ADIN1300_PHY_CTRL3);
+ if (cnt < 0)
+ return cnt;
+
+ enable = FIELD_GET(ADIN1300_DOWNSPEEDS_EN, val);
+ cnt = FIELD_GET(ADIN1300_DOWNSPEED_RETRIES_MSK, cnt);
+
+ *data = (enable && cnt) ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int adin_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ u16 val;
+ int rc;
+
+ if (cnt == DOWNSHIFT_DEV_DISABLE)
+ return phy_clear_bits(phydev, ADIN1300_PHY_CTRL2,
+ ADIN1300_DOWNSPEEDS_EN);
+
+ if (cnt > 7)
+ return -E2BIG;
+
+ val = FIELD_PREP(ADIN1300_DOWNSPEED_RETRIES_MSK, cnt);
+ val |= ADIN1300_LINKING_EN;
+
+ rc = phy_modify(phydev, ADIN1300_PHY_CTRL3,
+ ADIN1300_LINKING_EN | ADIN1300_DOWNSPEED_RETRIES_MSK,
+ val);
+ if (rc < 0)
+ return rc;
+
+ return phy_set_bits(phydev, ADIN1300_PHY_CTRL2,
+ ADIN1300_DOWNSPEEDS_EN);
+}
+
+static int adin_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return adin_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adin_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return adin_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adin_config_init(struct phy_device *phydev)
+{
+ int rc;
+
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ rc = adin_config_rgmii_mode(phydev);
+ if (rc < 0)
+ return rc;
+
+ rc = adin_config_rmii_mode(phydev);
+ if (rc < 0)
+ return rc;
+
+ rc = adin_set_downshift(phydev, 4);
+ if (rc < 0)
+ return rc;
+
+ phydev_dbg(phydev, "PHY is using mode '%s'\n",
+ phy_modes(phydev->interface));
+
+ return 0;
+}
+
+static int adin_phy_ack_intr(struct phy_device *phydev)
+{
+ /* Clear pending interrupts */
+ int rc = phy_read(phydev, ADIN1300_INT_STATUS_REG);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int adin_phy_config_intr(struct phy_device *phydev)
+{
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ return phy_set_bits(phydev, ADIN1300_INT_MASK_REG,
+ ADIN1300_INT_MASK_EN);
+
+ return phy_clear_bits(phydev, ADIN1300_INT_MASK_REG,
+ ADIN1300_INT_MASK_EN);
+}
+
+static int adin_cl45_to_adin_reg(struct phy_device *phydev, int devad,
+ u16 cl45_regnum)
+{
+ struct adin_clause45_mmd_map *m;
+ int i;
+
+ if (devad == MDIO_MMD_VEND1)
+ return cl45_regnum;
+
+ for (i = 0; i < ARRAY_SIZE(adin_clause45_mmd_map); i++) {
+ m = &adin_clause45_mmd_map[i];
+ if (m->devad == devad && m->cl45_regnum == cl45_regnum)
+ return m->adin_regnum;
+ }
+
+ phydev_err(phydev,
+ "No translation available for devad: %d reg: %04x\n",
+ devad, cl45_regnum);
+
+ return -EINVAL;
+}
+
+static int adin_read_mmd(struct phy_device *phydev, int devad, u16 regnum)
+{
+ struct mii_bus *bus = phydev->mdio.bus;
+ int phy_addr = phydev->mdio.addr;
+ int adin_regnum;
+ int err;
+
+ adin_regnum = adin_cl45_to_adin_reg(phydev, devad, regnum);
+ if (adin_regnum < 0)
+ return adin_regnum;
+
+ err = __mdiobus_write(bus, phy_addr, ADIN1300_MII_EXT_REG_PTR,
+ adin_regnum);
+ if (err)
+ return err;
+
+ return __mdiobus_read(bus, phy_addr, ADIN1300_MII_EXT_REG_DATA);
+}
+
+static int adin_write_mmd(struct phy_device *phydev, int devad, u16 regnum,
+ u16 val)
+{
+ struct mii_bus *bus = phydev->mdio.bus;
+ int phy_addr = phydev->mdio.addr;
+ int adin_regnum;
+ int err;
+
+ adin_regnum = adin_cl45_to_adin_reg(phydev, devad, regnum);
+ if (adin_regnum < 0)
+ return adin_regnum;
+
+ err = __mdiobus_write(bus, phy_addr, ADIN1300_MII_EXT_REG_PTR,
+ adin_regnum);
+ if (err)
+ return err;
+
+ return __mdiobus_write(bus, phy_addr, ADIN1300_MII_EXT_REG_DATA, val);
+}
+
+static int adin_config_mdix(struct phy_device *phydev)
+{
+ bool auto_en, mdix_en;
+ int reg;
+
+ mdix_en = false;
+ auto_en = false;
+ switch (phydev->mdix_ctrl) {
+ case ETH_TP_MDI:
+ break;
+ case ETH_TP_MDI_X:
+ mdix_en = true;
+ break;
+ case ETH_TP_MDI_AUTO:
+ auto_en = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = phy_read(phydev, ADIN1300_PHY_CTRL1);
+ if (reg < 0)
+ return reg;
+
+ if (mdix_en)
+ reg |= ADIN1300_MAN_MDIX_EN;
+ else
+ reg &= ~ADIN1300_MAN_MDIX_EN;
+
+ if (auto_en)
+ reg |= ADIN1300_AUTO_MDI_EN;
+ else
+ reg &= ~ADIN1300_AUTO_MDI_EN;
+
+ return phy_write(phydev, ADIN1300_PHY_CTRL1, reg);
+}
+
+static int adin_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = adin_config_mdix(phydev);
+ if (ret)
+ return ret;
+
+ return genphy_config_aneg(phydev);
+}
+
+static int adin_mdix_update(struct phy_device *phydev)
+{
+ bool auto_en, mdix_en;
+ bool swapped;
+ int reg;
+
+ reg = phy_read(phydev, ADIN1300_PHY_CTRL1);
+ if (reg < 0)
+ return reg;
+
+ auto_en = !!(reg & ADIN1300_AUTO_MDI_EN);
+ mdix_en = !!(reg & ADIN1300_MAN_MDIX_EN);
+
+ /* If MDI/MDIX is forced, just read it from the control reg */
+ if (!auto_en) {
+ if (mdix_en)
+ phydev->mdix = ETH_TP_MDI_X;
+ else
+ phydev->mdix = ETH_TP_MDI;
+ return 0;
+ }
+
+ /**
+ * Otherwise, we need to deduce it from the PHY status2 reg.
+ * When Auto-MDI is enabled, the ADIN1300_MAN_MDIX_EN bit implies
+ * a preference for MDIX when it is set.
+ */
+ reg = phy_read(phydev, ADIN1300_PHY_STATUS1);
+ if (reg < 0)
+ return reg;
+
+ swapped = !!(reg & ADIN1300_PAIR_01_SWAP);
+
+ if (mdix_en != swapped)
+ phydev->mdix = ETH_TP_MDI_X;
+ else
+ phydev->mdix = ETH_TP_MDI;
+
+ return 0;
+}
+
+static int adin_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = adin_mdix_update(phydev);
+ if (ret < 0)
+ return ret;
+
+ return genphy_read_status(phydev);
+}
+
+static int adin_soft_reset(struct phy_device *phydev)
+{
+ int rc;
+
+ /* The reset bit is self-clearing, set it and wait */
+ rc = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_GE_SOFT_RESET_REG,
+ ADIN1300_GE_SOFT_RESET);
+ if (rc < 0)
+ return rc;
+
+ msleep(10);
+
+ /* If we get a read error something may be wrong */
+ rc = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_GE_SOFT_RESET_REG);
+
+ return rc < 0 ? rc : 0;
+}
+
+static int adin_get_sset_count(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(adin_hw_stats);
+}
+
+static void adin_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(adin_hw_stats); i++) {
+ strlcpy(&data[i * ETH_GSTRING_LEN],
+ adin_hw_stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+static int adin_read_mmd_stat_regs(struct phy_device *phydev,
+ struct adin_hw_stat *stat,
+ u32 *val)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, stat->reg1);
+ if (ret < 0)
+ return ret;
+
+ *val = (ret & 0xffff);
+
+ if (stat->reg2 == 0)
+ return 0;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, stat->reg2);
+ if (ret < 0)
+ return ret;
+
+ *val <<= 16;
+ *val |= (ret & 0xffff);
+
+ return 0;
+}
+
+static u64 adin_get_stat(struct phy_device *phydev, int i)
+{
+ struct adin_hw_stat *stat = &adin_hw_stats[i];
+ struct adin_priv *priv = phydev->priv;
+ u32 val;
+ int ret;
+
+ if (stat->reg1 > 0x1f) {
+ ret = adin_read_mmd_stat_regs(phydev, stat, &val);
+ if (ret < 0)
+ return (u64)(~0);
+ } else {
+ ret = phy_read(phydev, stat->reg1);
+ if (ret < 0)
+ return (u64)(~0);
+ val = (ret & 0xffff);
+ }
+
+ priv->stats[i] += val;
+
+ return priv->stats[i];
+}
+
+static void adin_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i, rc;
+
+ /* latch copies of all the frame-checker counters */
+ rc = phy_read(phydev, ADIN1300_RX_ERR_CNT);
+ if (rc < 0)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(adin_hw_stats); i++)
+ data[i] = adin_get_stat(phydev, i);
+}
+
+static int adin_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct adin_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
+static struct phy_driver adin_driver[] = {
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200),
+ .name = "ADIN1200",
+ .probe = adin_probe,
+ .config_init = adin_config_init,
+ .soft_reset = adin_soft_reset,
+ .config_aneg = adin_config_aneg,
+ .read_status = adin_read_status,
+ .get_tunable = adin_get_tunable,
+ .set_tunable = adin_set_tunable,
+ .ack_interrupt = adin_phy_ack_intr,
+ .config_intr = adin_phy_config_intr,
+ .get_sset_count = adin_get_sset_count,
+ .get_strings = adin_get_strings,
+ .get_stats = adin_get_stats,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .read_mmd = adin_read_mmd,
+ .write_mmd = adin_write_mmd,
+ },
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300),
+ .name = "ADIN1300",
+ .probe = adin_probe,
+ .config_init = adin_config_init,
+ .soft_reset = adin_soft_reset,
+ .config_aneg = adin_config_aneg,
+ .read_status = adin_read_status,
+ .get_tunable = adin_get_tunable,
+ .set_tunable = adin_set_tunable,
+ .ack_interrupt = adin_phy_ack_intr,
+ .config_intr = adin_phy_config_intr,
+ .get_sset_count = adin_get_sset_count,
+ .get_strings = adin_get_strings,
+ .get_stats = adin_get_stats,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .read_mmd = adin_read_mmd,
+ .write_mmd = adin_write_mmd,
+ },
+};
+
+module_phy_driver(adin_driver);
+
+static struct mdio_device_id __maybe_unused adin_tbl[] = {
+ { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, adin_tbl);
+MODULE_DESCRIPTION("Analog Devices Industrial Ethernet PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 6ad8b1c63c34..2aa7b2e60046 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -249,10 +249,6 @@ static int at803x_config_init(struct phy_device *phydev)
{
int ret;
- ret = genphy_config_init(phydev);
- if (ret < 0)
- return ret;
-
/* The RX and TX delay default is:
* after HW reset: RX delay enabled and TX delay disabled
* after SW reset: RX delay enabled, while TX delay retains the
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 7ed4760fb155..8a4b1d167ce2 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -254,13 +254,8 @@ static int dp83822_config_intr(struct phy_device *phydev)
static int dp83822_config_init(struct phy_device *phydev)
{
- int err;
int value;
- err = genphy_config_init(phydev);
- if (err < 0)
- return err;
-
value = DP83822_WOL_MAGIC_EN | DP83822_WOL_SECURE_ON | DP83822_WOL_EN;
return phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 6f9bc7d91f17..54c7c1b44e4d 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -68,13 +68,8 @@ static int dp83848_config_intr(struct phy_device *phydev)
static int dp83848_config_init(struct phy_device *phydev)
{
- int err;
int val;
- err = genphy_config_init(phydev);
- if (err < 0)
- return err;
-
/* DP83620 always reports Auto Negotiation Ability on BMSR. Instead,
* we check initial value of BMCR Auto negotiation enable bit
*/
@@ -113,13 +108,13 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
static struct phy_driver dp83848_driver[] = {
DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY",
- genphy_config_init),
+ NULL),
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY",
- genphy_config_init),
+ NULL),
DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY",
dp83848_config_init),
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY",
- genphy_config_init),
+ NULL),
};
module_phy_driver(dp83848_driver);
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index ac27da16824d..06f08832ebcd 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -277,10 +277,6 @@ static int dp83811_config_init(struct phy_device *phydev)
{
int value, err;
- err = genphy_config_init(phydev);
- if (err < 0)
- return err;
-
value = phy_read(phydev, MII_DP83811_SGMII_CTRL);
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
err = phy_write(phydev, MII_DP83811_SGMII_CTRL,
diff --git a/drivers/net/phy/mdio-aspeed.c b/drivers/net/phy/mdio-aspeed.c
new file mode 100644
index 000000000000..cad820568f75
--- /dev/null
+++ b/drivers/net/phy/mdio-aspeed.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2019 IBM Corp. */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "mdio-aspeed"
+
+#define ASPEED_MDIO_CTRL 0x0
+#define ASPEED_MDIO_CTRL_FIRE BIT(31)
+#define ASPEED_MDIO_CTRL_ST BIT(28)
+#define ASPEED_MDIO_CTRL_ST_C45 0
+#define ASPEED_MDIO_CTRL_ST_C22 1
+#define ASPEED_MDIO_CTRL_OP GENMASK(27, 26)
+#define MDIO_C22_OP_WRITE 0b01
+#define MDIO_C22_OP_READ 0b10
+#define ASPEED_MDIO_CTRL_PHYAD GENMASK(25, 21)
+#define ASPEED_MDIO_CTRL_REGAD GENMASK(20, 16)
+#define ASPEED_MDIO_CTRL_MIIWDATA GENMASK(15, 0)
+
+#define ASPEED_MDIO_DATA 0x4
+#define ASPEED_MDIO_DATA_MDC_THRES GENMASK(31, 24)
+#define ASPEED_MDIO_DATA_MDIO_EDGE BIT(23)
+#define ASPEED_MDIO_DATA_MDIO_LATCH GENMASK(22, 20)
+#define ASPEED_MDIO_DATA_IDLE BIT(16)
+#define ASPEED_MDIO_DATA_MIIRDATA GENMASK(15, 0)
+
+#define ASPEED_MDIO_INTERVAL_US 100
+#define ASPEED_MDIO_TIMEOUT_US (ASPEED_MDIO_INTERVAL_US * 10)
+
+struct aspeed_mdio {
+ void __iomem *base;
+};
+
+static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct aspeed_mdio *ctx = bus->priv;
+ u32 ctrl;
+ u32 data;
+ int rc;
+
+ dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr,
+ regnum);
+
+ /* Just clause 22 for the moment */
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ ctrl = ASPEED_MDIO_CTRL_FIRE
+ | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_READ)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum);
+
+ iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
+
+ rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data,
+ data & ASPEED_MDIO_DATA_IDLE,
+ ASPEED_MDIO_INTERVAL_US,
+ ASPEED_MDIO_TIMEOUT_US);
+ if (rc < 0)
+ return rc;
+
+ return FIELD_GET(ASPEED_MDIO_DATA_MIIRDATA, data);
+}
+
+static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ struct aspeed_mdio *ctx = bus->priv;
+ u32 ctrl;
+
+ dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n",
+ __func__, addr, regnum, val);
+
+ /* Just clause 22 for the moment */
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ ctrl = ASPEED_MDIO_CTRL_FIRE
+ | FIELD_PREP(ASPEED_MDIO_CTRL_ST, ASPEED_MDIO_CTRL_ST_C22)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_OP, MDIO_C22_OP_WRITE)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, addr)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regnum)
+ | FIELD_PREP(ASPEED_MDIO_CTRL_MIIWDATA, val);
+
+ iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
+
+ return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
+ !(ctrl & ASPEED_MDIO_CTRL_FIRE),
+ ASPEED_MDIO_INTERVAL_US,
+ ASPEED_MDIO_TIMEOUT_US);
+}
+
+static int aspeed_mdio_probe(struct platform_device *pdev)
+{
+ struct aspeed_mdio *ctx;
+ struct mii_bus *bus;
+ int rc;
+
+ bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*ctx));
+ if (!bus)
+ return -ENOMEM;
+
+ ctx = bus->priv;
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctx->base))
+ return PTR_ERR(ctx->base);
+
+ bus->name = DRV_NAME;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id);
+ bus->parent = &pdev->dev;
+ bus->read = aspeed_mdio_read;
+ bus->write = aspeed_mdio_write;
+
+ rc = of_mdiobus_register(bus, pdev->dev.of_node);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
+ return rc;
+ }
+
+ platform_set_drvdata(pdev, bus);
+
+ return 0;
+}
+
+static int aspeed_mdio_remove(struct platform_device *pdev)
+{
+ mdiobus_unregister(platform_get_drvdata(pdev));
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_mdio_of_match[] = {
+ { .compatible = "aspeed,ast2600-mdio", },
+ { },
+};
+
+static struct platform_driver aspeed_mdio_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = aspeed_mdio_of_match,
+ },
+ .probe = aspeed_mdio_probe,
+ .remove = aspeed_mdio_remove,
+};
+
+module_platform_driver(aspeed_mdio_driver);
+
+MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
index 7d0f388d8db8..7e9975d25066 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -123,15 +123,13 @@ static int iproc_mdio_probe(struct platform_device *pdev)
{
struct iproc_mdio_priv *priv;
struct mii_bus *bus;
- struct resource *res;
int rc;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
dev_err(&pdev->dev, "failed to ioremap register\n");
return PTR_ERR(priv->base);
diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/phy/mdio-cavium.h
index ed5f9bb5448d..b7f89ad27465 100644
--- a/drivers/net/phy/mdio-cavium.h
+++ b/drivers/net/phy/mdio-cavium.h
@@ -108,6 +108,8 @@ static inline u64 oct_mdio_readq(u64 addr)
return cvmx_read_csr(addr);
}
#else
+#include <linux/io-64-nonatomic-lo-hi.h>
+
#define oct_mdio_writeq(val, addr) writeq(val, (void *)addr)
#define oct_mdio_readq(addr) readq((void *)addr)
#endif
diff --git a/drivers/net/phy/mdio-hisi-femac.c b/drivers/net/phy/mdio-hisi-femac.c
index 287f3ccf1da1..f231c2fbb1de 100644
--- a/drivers/net/phy/mdio-hisi-femac.c
+++ b/drivers/net/phy/mdio-hisi-femac.c
@@ -74,7 +74,6 @@ static int hisi_femac_mdio_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mii_bus *bus;
struct hisi_femac_mdio_data *data;
- struct resource *res;
int ret;
bus = mdiobus_alloc_size(sizeof(*data));
@@ -88,8 +87,7 @@ static int hisi_femac_mdio_probe(struct platform_device *pdev)
bus->parent = &pdev->dev;
data = bus->priv;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->membase = devm_ioremap_resource(&pdev->dev, res);
+ data->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->membase)) {
ret = PTR_ERR(data->membase);
goto err_out_free_mdiobus;
diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/phy/mdio-moxart.c
index af3910fe8ec7..2d16fc4173c1 100644
--- a/drivers/net/phy/mdio-moxart.c
+++ b/drivers/net/phy/mdio-moxart.c
@@ -113,7 +113,6 @@ static int moxart_mdio_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mii_bus *bus;
struct moxart_mdio_data *data;
- struct resource *res;
int ret, i;
bus = mdiobus_alloc_size(sizeof(*data));
@@ -138,8 +137,7 @@ static int moxart_mdio_probe(struct platform_device *pdev)
bus->irq[i] = PHY_IGNORE_INTERRUPT;
data = bus->priv;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base)) {
ret = PTR_ERR(data->base);
goto err_out_free_mdiobus;
diff --git a/drivers/net/phy/mdio-mux-meson-g12a.c b/drivers/net/phy/mdio-mux-meson-g12a.c
index 6644762ff2ab..7a9ad54582e1 100644
--- a/drivers/net/phy/mdio-mux-meson-g12a.c
+++ b/drivers/net/phy/mdio-mux-meson-g12a.c
@@ -302,7 +302,6 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct g12a_mdio_mux *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -311,8 +310,7 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(dev, res);
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 20ffd8fb79ce..58d6504495e0 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -92,7 +92,6 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mii_bus *bus;
struct sun4i_mdio_data *data;
- struct resource *res;
int ret;
bus = mdiobus_alloc_size(sizeof(*data));
@@ -106,8 +105,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
bus->parent = &pdev->dev;
data = bus->priv;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->membase = devm_ioremap_resource(&pdev->dev, res);
+ data->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->membase)) {
ret = PTR_ERR(data->membase);
goto err_out_free_mdiobus;
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index 717cc2a056e8..34990eaa3298 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -328,7 +328,6 @@ static int xgene_mdio_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mii_bus *mdio_bus;
const struct of_device_id *of_id;
- struct resource *res;
struct xgene_mdio_pdata *pdata;
void __iomem *csr_base;
int mdio_id = 0, ret = 0;
@@ -355,8 +354,7 @@ static int xgene_mdio_probe(struct platform_device *pdev)
pdata->mdio_id = mdio_id;
pdata->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- csr_base = devm_ioremap_resource(dev, res);
+ csr_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csr_base))
return PTR_ERR(csr_base);
pdata->mac_csr_addr = csr_base;
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index fa80d6dce8ee..e8f2ca625837 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -136,7 +136,7 @@ static int meson_gxl_config_init(struct phy_device *phydev)
if (ret)
return ret;
- return genphy_config_init(phydev);
+ return 0;
}
/* This function is provided to cope with the possible failures of this phy
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index eb1b3287fe08..a644e8e5071c 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -305,7 +305,6 @@ static int lan88xx_config_init(struct phy_device *phydev)
{
int val;
- genphy_config_init(phydev);
/*Zerodetect delay enable */
val = phy_read_mmd(phydev, MDIO_MMD_PCS,
PHY_ARDENNES_MMD_DEV_3_PHY_CFG);
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index 3d09b471632c..001def4509c2 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -48,7 +48,6 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.features = PHY_BASIC_T1_FEATURES,
- .config_init = genphy_config_init,
.config_aneg = genphy_config_aneg,
.ack_interrupt = lan87xx_phy_ack_interrupt,
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 645d354ffb48..7ada1fd9ca71 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -1725,7 +1725,7 @@ static int vsc8584_config_init(struct phy_device *phydev)
return ret;
}
- return genphy_config_init(phydev);
+ return 0;
err:
mutex_unlock(&phydev->mdio.bus->mdio_lock);
@@ -1767,7 +1767,7 @@ static int vsc85xx_config_init(struct phy_device *phydev)
return rc;
}
- return genphy_config_init(phydev);
+ return 0;
}
static int vsc8584_did_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 16667fbac8bf..369903d9b6ec 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -207,14 +207,14 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
return count;
}
-static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
+static int __set_linkmode_max_speed(u32 max_speed, unsigned long *addr)
{
const struct phy_setting *p;
int i;
for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
if (p->speed > max_speed)
- linkmode_clear_bit(p->bit, phydev->supported);
+ linkmode_clear_bit(p->bit, addr);
else
break;
}
@@ -222,6 +222,11 @@ static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
return 0;
}
+static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
+{
+ return __set_linkmode_max_speed(max_speed, phydev->supported);
+}
+
int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
{
int err;
@@ -310,6 +315,34 @@ void phy_resolve_aneg_linkmode(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
+static int phy_resolve_min_speed(struct phy_device *phydev, bool fdx_only)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
+ int i = ARRAY_SIZE(settings);
+
+ linkmode_and(common, phydev->lp_advertising, phydev->advertising);
+
+ while (--i >= 0) {
+ if (test_bit(settings[i].bit, common)) {
+ if (fdx_only && settings[i].duplex != DUPLEX_FULL)
+ continue;
+ return settings[i].speed;
+ }
+ }
+
+ return SPEED_UNKNOWN;
+}
+
+int phy_speed_down_core(struct phy_device *phydev)
+{
+ int min_common_speed = phy_resolve_min_speed(phydev, true);
+
+ if (min_common_speed == SPEED_UNKNOWN)
+ return -EINVAL;
+
+ return __set_linkmode_max_speed(min_common_speed, phydev->advertising);
+}
+
static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad,
u16 regnum)
{
@@ -783,24 +816,43 @@ int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val)
EXPORT_SYMBOL(phy_write_paged);
/**
- * phy_modify_paged() - Convenience function for modifying a paged register
+ * phy_modify_paged_changed() - Function for modifying a paged register
* @phydev: a pointer to a &struct phy_device
* @page: the page for the phy
* @regnum: register number
* @mask: bit mask of bits to clear
* @set: bit mask of bits to set
*
- * Same rules as for phy_read() and phy_write().
+ * Returns negative errno, 0 if there was no change, and 1 in case of change
*/
-int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
- u16 mask, u16 set)
+int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum,
+ u16 mask, u16 set)
{
int ret = 0, oldpage;
oldpage = phy_select_page(phydev, page);
if (oldpage >= 0)
- ret = __phy_modify(phydev, regnum, mask, set);
+ ret = __phy_modify_changed(phydev, regnum, mask, set);
return phy_restore_page(phydev, oldpage, ret);
}
+EXPORT_SYMBOL(phy_modify_paged_changed);
+
+/**
+ * phy_modify_paged() - Convenience function for modifying a paged register
+ * @phydev: a pointer to a &struct phy_device
+ * @page: the page for the phy
+ * @regnum: register number
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Same rules as for phy_read() and phy_write().
+ */
+int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
+ u16 mask, u16 set)
+{
+ int ret = phy_modify_paged_changed(phydev, page, regnum, mask, set);
+
+ return ret < 0 ? ret : 0;
+}
EXPORT_SYMBOL(phy_modify_paged);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 6b0f89369b46..35d29a823af8 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -608,38 +608,21 @@ static int phy_poll_aneg_done(struct phy_device *phydev)
*/
int phy_speed_down(struct phy_device *phydev, bool sync)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_tmp);
int ret;
if (phydev->autoneg != AUTONEG_ENABLE)
return 0;
- linkmode_copy(adv_old, phydev->advertising);
- linkmode_copy(adv, phydev->lp_advertising);
- linkmode_and(adv, adv, phydev->supported);
-
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, adv) ||
- linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, adv)) {
- linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- phydev->advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- phydev->advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- phydev->advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- phydev->advertising);
- } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- adv) ||
- linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- adv)) {
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- phydev->advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- phydev->advertising);
- }
+ linkmode_copy(adv_tmp, phydev->advertising);
+
+ ret = phy_speed_down_core(phydev);
+ if (ret)
+ return ret;
- if (linkmode_equal(phydev->advertising, adv_old))
+ linkmode_copy(phydev->adv_old, adv_tmp);
+
+ if (linkmode_equal(phydev->advertising, adv_tmp))
return 0;
ret = phy_config_aneg(phydev);
@@ -658,30 +641,19 @@ EXPORT_SYMBOL_GPL(phy_speed_down);
*/
int phy_speed_up(struct phy_device *phydev)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(all_speeds) = { 0, };
- __ETHTOOL_DECLARE_LINK_MODE_MASK(not_speeds);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(speeds);
-
- linkmode_copy(adv_old, phydev->advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_tmp);
if (phydev->autoneg != AUTONEG_ENABLE)
return 0;
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, all_speeds);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, all_speeds);
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, all_speeds);
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, all_speeds);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, all_speeds);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, all_speeds);
+ if (linkmode_empty(phydev->adv_old))
+ return 0;
- linkmode_andnot(not_speeds, adv_old, all_speeds);
- linkmode_copy(supported, phydev->supported);
- linkmode_and(speeds, supported, all_speeds);
- linkmode_or(phydev->advertising, not_speeds, speeds);
+ linkmode_copy(adv_tmp, phydev->advertising);
+ linkmode_copy(phydev->advertising, phydev->adv_old);
+ linkmode_zero(phydev->adv_old);
- if (linkmode_equal(phydev->advertising, adv_old))
+ if (linkmode_equal(phydev->advertising, adv_tmp))
return 0;
return phy_config_aneg(phydev);
@@ -939,8 +911,8 @@ void phy_state_machine(struct work_struct *work)
if (phydev->link) {
phydev->link = 0;
phy_link_down(phydev, true);
- do_suspend = true;
}
+ do_suspend = true;
break;
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 27ebc2c6c2d0..d347ddcac45b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1564,24 +1564,20 @@ EXPORT_SYMBOL(phy_reset_after_clk_enable);
*/
static int genphy_config_advert(struct phy_device *phydev)
{
- u32 advertise;
- int bmsr, adv;
- int err, changed = 0;
+ int err, bmsr, changed = 0;
+ u32 adv;
/* Only allow advertising what this PHY supports */
linkmode_and(phydev->advertising, phydev->advertising,
phydev->supported);
- if (!ethtool_convert_link_mode_to_legacy_u32(&advertise,
- phydev->advertising))
- phydev_warn(phydev, "PHY advertising (%*pb) more modes than genphy supports, some modes not advertised.\n",
- __ETHTOOL_LINK_MODE_MASK_NBITS,
- phydev->advertising);
+
+ adv = linkmode_adv_to_mii_adv_t(phydev->advertising);
/* Setup standard advertisement */
err = phy_modify_changed(phydev, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_100BASE4 |
ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM,
- ethtool_adv_to_mii_adv_t(advertise));
+ adv);
if (err < 0)
return err;
if (err > 0)
@@ -1598,13 +1594,7 @@ static int genphy_config_advert(struct phy_device *phydev)
if (!(bmsr & BMSR_ESTATEN))
return changed;
- /* Configure gigabit if it's supported */
- adv = 0;
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- phydev->supported) ||
- linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- phydev->supported))
- adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
+ adv = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
err = phy_modify_changed(phydev, MII_CTRL1000,
ADVERTISE_1000FULL | ADVERTISE_1000HALF,
@@ -1681,18 +1671,20 @@ int genphy_restart_aneg(struct phy_device *phydev)
EXPORT_SYMBOL(genphy_restart_aneg);
/**
- * genphy_config_aneg - restart auto-negotiation or write BMCR
+ * __genphy_config_aneg - restart auto-negotiation or write BMCR
* @phydev: target phy_device struct
+ * @changed: whether autoneg is requested
*
* Description: If auto-negotiation is enabled, we configure the
* advertising, and then restart auto-negotiation. If it is not
* enabled, then we write the BMCR.
*/
-int genphy_config_aneg(struct phy_device *phydev)
+int __genphy_config_aneg(struct phy_device *phydev, bool changed)
{
- int err, changed;
+ int err;
- changed = genphy_config_eee_advert(phydev);
+ if (genphy_config_eee_advert(phydev))
+ changed = true;
if (AUTONEG_ENABLE != phydev->autoneg)
return genphy_setup_forced(phydev);
@@ -1700,10 +1692,10 @@ int genphy_config_aneg(struct phy_device *phydev)
err = genphy_config_advert(phydev);
if (err < 0) /* error */
return err;
+ else if (err)
+ changed = true;
- changed |= err;
-
- if (changed == 0) {
+ if (!changed) {
/* Advertisement hasn't changed, but maybe aneg was never on to
* begin with? Or maybe phy was isolated?
*/
@@ -1713,18 +1705,15 @@ int genphy_config_aneg(struct phy_device *phydev)
return ctl;
if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
- changed = 1; /* do restart aneg */
+ changed = true; /* do restart aneg */
}
/* Only restart aneg if we are advertising something different
* than we were before.
*/
- if (changed > 0)
- return genphy_restart_aneg(phydev);
-
- return 0;
+ return changed ? genphy_restart_aneg(phydev) : 0;
}
-EXPORT_SYMBOL(genphy_config_aneg);
+EXPORT_SYMBOL(__genphy_config_aneg);
/**
* genphy_aneg_done - return auto-negotiation status
@@ -1805,7 +1794,7 @@ EXPORT_SYMBOL(genphy_update_link);
*/
int genphy_read_status(struct phy_device *phydev)
{
- int adv, lpa, lpagb, err, old_link = phydev->link;
+ int lpa, lpagb, err, old_link = phydev->link;
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
@@ -1821,19 +1810,18 @@ int genphy_read_status(struct phy_device *phydev)
phydev->pause = 0;
phydev->asym_pause = 0;
- linkmode_zero(phydev->lp_advertising);
-
if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
if (phydev->is_gigabit_capable) {
lpagb = phy_read(phydev, MII_STAT1000);
if (lpagb < 0)
return lpagb;
- adv = phy_read(phydev, MII_CTRL1000);
- if (adv < 0)
- return adv;
-
if (lpagb & LPA_1000MSFAIL) {
+ int adv = phy_read(phydev, MII_CTRL1000);
+
+ if (adv < 0)
+ return adv;
+
if (adv & CTL1000_ENABLE_MASTER)
phydev_err(phydev, "Master/Slave resolution failed, maybe conflicting manual settings?\n");
else
@@ -1907,57 +1895,6 @@ int genphy_soft_reset(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_soft_reset);
-int genphy_config_init(struct phy_device *phydev)
-{
- int val;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(features) = { 0, };
-
- linkmode_set_bit_array(phy_basic_ports_array,
- ARRAY_SIZE(phy_basic_ports_array),
- features);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, features);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, features);
-
- /* Do we support autonegotiation? */
- val = phy_read(phydev, MII_BMSR);
- if (val < 0)
- return val;
-
- if (val & BMSR_ANEGCAPABLE)
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, features);
-
- if (val & BMSR_100FULL)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, features);
- if (val & BMSR_100HALF)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, features);
- if (val & BMSR_10FULL)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, features);
- if (val & BMSR_10HALF)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, features);
-
- if (val & BMSR_ESTATEN) {
- val = phy_read(phydev, MII_ESTATUS);
- if (val < 0)
- return val;
-
- if (val & ESTATUS_1000_TFULL)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- features);
- if (val & ESTATUS_1000_THALF)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- features);
- if (val & ESTATUS_1000_XFULL)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
- features);
- }
-
- linkmode_and(phydev->supported, phydev->supported, features);
- linkmode_and(phydev->advertising, phydev->advertising, features);
-
- return 0;
-}
-EXPORT_SYMBOL(genphy_config_init);
-
/**
* genphy_read_abilities - read PHY abilities from Clause 22 registers
* @phydev: target phy_device struct
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index a669945eb829..677c45985338 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -39,6 +39,16 @@
#define RTL8366RB_POWER_SAVE 0x15
#define RTL8366RB_POWER_SAVE_ON BIT(12)
+#define RTL_SUPPORTS_5000FULL BIT(14)
+#define RTL_SUPPORTS_2500FULL BIT(13)
+#define RTL_SUPPORTS_10000FULL BIT(0)
+#define RTL_ADV_2500FULL BIT(7)
+#define RTL_LPADV_10000FULL BIT(11)
+#define RTL_LPADV_5000FULL BIT(6)
+#define RTL_LPADV_2500FULL BIT(5)
+
+#define RTL_GENERIC_PHYID 0x001cc800
+
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
MODULE_LICENSE("GPL");
@@ -256,6 +266,166 @@ static int rtl8366rb_config_init(struct phy_device *phydev)
return ret;
}
+static int rtlgen_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
+{
+ int ret;
+
+ if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE) {
+ rtl821x_write_page(phydev, 0xa5c);
+ ret = __phy_read(phydev, 0x12);
+ rtl821x_write_page(phydev, 0);
+ } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
+ rtl821x_write_page(phydev, 0xa5d);
+ ret = __phy_read(phydev, 0x10);
+ rtl821x_write_page(phydev, 0);
+ } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE) {
+ rtl821x_write_page(phydev, 0xa5d);
+ ret = __phy_read(phydev, 0x11);
+ rtl821x_write_page(phydev, 0);
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int rtlgen_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
+ u16 val)
+{
+ int ret;
+
+ if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) {
+ rtl821x_write_page(phydev, 0xa5d);
+ ret = __phy_write(phydev, 0x10, val);
+ rtl821x_write_page(phydev, 0);
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int rtl8125_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
+{
+ int ret = rtlgen_read_mmd(phydev, devnum, regnum);
+
+ if (ret != -EOPNOTSUPP)
+ return ret;
+
+ if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE2) {
+ rtl821x_write_page(phydev, 0xa6e);
+ ret = __phy_read(phydev, 0x16);
+ rtl821x_write_page(phydev, 0);
+ } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) {
+ rtl821x_write_page(phydev, 0xa6d);
+ ret = __phy_read(phydev, 0x12);
+ rtl821x_write_page(phydev, 0);
+ } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE2) {
+ rtl821x_write_page(phydev, 0xa6d);
+ ret = __phy_read(phydev, 0x10);
+ rtl821x_write_page(phydev, 0);
+ }
+
+ return ret;
+}
+
+static int rtl8125_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
+ u16 val)
+{
+ int ret = rtlgen_write_mmd(phydev, devnum, regnum, val);
+
+ if (ret != -EOPNOTSUPP)
+ return ret;
+
+ if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) {
+ rtl821x_write_page(phydev, 0xa6d);
+ ret = __phy_write(phydev, 0x12, val);
+ rtl821x_write_page(phydev, 0);
+ }
+
+ return ret;
+}
+
+static int rtl8125_get_features(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_paged(phydev, 0xa61, 0x13);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->supported, val & RTL_SUPPORTS_2500FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->supported, val & RTL_SUPPORTS_5000FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->supported, val & RTL_SUPPORTS_10000FULL);
+
+ return genphy_read_abilities(phydev);
+}
+
+static int rtl8125_config_aneg(struct phy_device *phydev)
+{
+ int ret = 0;
+
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ u16 adv2500 = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->advertising))
+ adv2500 = RTL_ADV_2500FULL;
+
+ ret = phy_modify_paged_changed(phydev, 0xa5d, 0x12,
+ RTL_ADV_2500FULL, adv2500);
+ if (ret < 0)
+ return ret;
+ }
+
+ return __genphy_config_aneg(phydev, ret);
+}
+
+static int rtl8125_read_status(struct phy_device *phydev)
+{
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ int lpadv = phy_read_paged(phydev, 0xa5d, 0x13);
+
+ if (lpadv < 0)
+ return lpadv;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->lp_advertising, lpadv & RTL_LPADV_10000FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ phydev->lp_advertising, lpadv & RTL_LPADV_5000FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->lp_advertising, lpadv & RTL_LPADV_2500FULL);
+ }
+
+ return genphy_read_status(phydev);
+}
+
+static bool rtlgen_supports_2_5gbps(struct phy_device *phydev)
+{
+ int val;
+
+ phy_write(phydev, RTL821x_PAGE_SELECT, 0xa61);
+ val = phy_read(phydev, 0x13);
+ phy_write(phydev, RTL821x_PAGE_SELECT, 0);
+
+ return val >= 0 && val & RTL_SUPPORTS_2500FULL;
+}
+
+static int rtlgen_match_phy_device(struct phy_device *phydev)
+{
+ return phydev->phy_id == RTL_GENERIC_PHYID &&
+ !rtlgen_supports_2_5gbps(phydev);
+}
+
+static int rtl8125_match_phy_device(struct phy_device *phydev)
+{
+ return phydev->phy_id == RTL_GENERIC_PHYID &&
+ rtlgen_supports_2_5gbps(phydev);
+}
+
static struct phy_driver realtek_drvs[] = {
{
PHY_ID_MATCH_EXACT(0x00008201),
@@ -326,12 +496,26 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- PHY_ID_MATCH_EXACT(0x001cc800),
- .name = "Generic Realtek PHY",
+ .name = "Generic FE-GE Realtek PHY",
+ .match_phy_device = rtlgen_match_phy_device,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ .read_mmd = rtlgen_read_mmd,
+ .write_mmd = rtlgen_write_mmd,
+ }, {
+ .name = "RTL8125 2.5Gbps internal",
+ .match_phy_device = rtl8125_match_phy_device,
+ .get_features = rtl8125_get_features,
+ .config_aneg = rtl8125_config_aneg,
+ .read_status = rtl8125_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
+ .read_mmd = rtl8125_read_mmd,
+ .write_mmd = rtl8125_write_mmd,
}, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index e36c04c26866..272d5773573e 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -429,6 +429,7 @@ static umode_t sfp_hwmon_is_visible(const void *data,
return 0;
/* fall through */
case hwmon_temp_input:
+ case hwmon_temp_label:
return 0444;
default:
return 0;
@@ -447,6 +448,7 @@ static umode_t sfp_hwmon_is_visible(const void *data,
return 0;
/* fall through */
case hwmon_in_input:
+ case hwmon_in_label:
return 0444;
default:
return 0;
@@ -465,6 +467,7 @@ static umode_t sfp_hwmon_is_visible(const void *data,
return 0;
/* fall through */
case hwmon_curr_input:
+ case hwmon_curr_label:
return 0444;
default:
return 0;
@@ -492,6 +495,7 @@ static umode_t sfp_hwmon_is_visible(const void *data,
return 0;
/* fall through */
case hwmon_power_input:
+ case hwmon_power_label:
return 0444;
default:
return 0;
@@ -987,9 +991,63 @@ static int sfp_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
}
}
+static const char *const sfp_hwmon_power_labels[] = {
+ "TX_power",
+ "RX_power",
+};
+
+static int sfp_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_label:
+ *str = "bias";
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_label:
+ *str = "temperature";
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_label:
+ *str = "VCC";
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_label:
+ *str = sfp_hwmon_power_labels[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static const struct hwmon_ops sfp_hwmon_ops = {
.is_visible = sfp_hwmon_is_visible,
.read = sfp_hwmon_read,
+ .read_string = sfp_hwmon_read_string,
};
static u32 sfp_hwmon_chip_config[] = {
@@ -1007,7 +1065,8 @@ static u32 sfp_hwmon_temp_config[] = {
HWMON_T_MAX | HWMON_T_MIN |
HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM |
HWMON_T_CRIT | HWMON_T_LCRIT |
- HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM,
+ HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM |
+ HWMON_T_LABEL,
0,
};
@@ -1021,7 +1080,8 @@ static u32 sfp_hwmon_vcc_config[] = {
HWMON_I_MAX | HWMON_I_MIN |
HWMON_I_MAX_ALARM | HWMON_I_MIN_ALARM |
HWMON_I_CRIT | HWMON_I_LCRIT |
- HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM,
+ HWMON_I_CRIT_ALARM | HWMON_I_LCRIT_ALARM |
+ HWMON_I_LABEL,
0,
};
@@ -1035,7 +1095,8 @@ static u32 sfp_hwmon_bias_config[] = {
HWMON_C_MAX | HWMON_C_MIN |
HWMON_C_MAX_ALARM | HWMON_C_MIN_ALARM |
HWMON_C_CRIT | HWMON_C_LCRIT |
- HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM,
+ HWMON_C_CRIT_ALARM | HWMON_C_LCRIT_ALARM |
+ HWMON_C_LABEL,
0,
};
@@ -1050,13 +1111,15 @@ static u32 sfp_hwmon_power_config[] = {
HWMON_P_MAX | HWMON_P_MIN |
HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
HWMON_P_CRIT | HWMON_P_LCRIT |
- HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM,
+ HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
+ HWMON_P_LABEL,
/* Receive power */
HWMON_P_INPUT |
HWMON_P_MAX | HWMON_P_MIN |
HWMON_P_MAX_ALARM | HWMON_P_MIN_ALARM |
HWMON_P_CRIT | HWMON_P_LCRIT |
- HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM,
+ HWMON_P_CRIT_ALARM | HWMON_P_LCRIT_ALARM |
+ HWMON_P_LABEL,
0,
};
diff --git a/drivers/net/phy/swphy.c b/drivers/net/phy/swphy.c
index dad22481d9c1..53c214a22b95 100644
--- a/drivers/net/phy/swphy.c
+++ b/drivers/net/phy/swphy.c
@@ -22,6 +22,7 @@ struct swmii_regs {
u16 bmsr;
u16 lpa;
u16 lpagb;
+ u16 estat;
};
enum {
@@ -48,6 +49,7 @@ static const struct swmii_regs speed[] = {
[SWMII_SPEED_1000] = {
.bmsr = BMSR_ESTATEN,
.lpagb = LPA_1000FULL | LPA_1000HALF,
+ .estat = ESTATUS_1000_TFULL | ESTATUS_1000_THALF,
},
};
@@ -56,11 +58,13 @@ static const struct swmii_regs duplex[] = {
.bmsr = BMSR_ESTATEN | BMSR_100HALF,
.lpa = LPA_10HALF | LPA_100HALF,
.lpagb = LPA_1000HALF,
+ .estat = ESTATUS_1000_THALF,
},
[SWMII_DUPLEX_FULL] = {
.bmsr = BMSR_ESTATEN | BMSR_100FULL,
.lpa = LPA_10FULL | LPA_100FULL,
.lpagb = LPA_1000FULL,
+ .estat = ESTATUS_1000_TFULL,
},
};
@@ -112,6 +116,7 @@ int swphy_read_reg(int reg, const struct fixed_phy_status *state)
{
int speed_index, duplex_index;
u16 bmsr = BMSR_ANEGCAPABLE;
+ u16 estat = 0;
u16 lpagb = 0;
u16 lpa = 0;
@@ -125,6 +130,7 @@ int swphy_read_reg(int reg, const struct fixed_phy_status *state)
duplex_index = state->duplex ? SWMII_DUPLEX_FULL : SWMII_DUPLEX_HALF;
bmsr |= speed[speed_index].bmsr & duplex[duplex_index].bmsr;
+ estat |= speed[speed_index].estat & duplex[duplex_index].estat;
if (state->link) {
bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
@@ -151,6 +157,8 @@ int swphy_read_reg(int reg, const struct fixed_phy_status *state)
return lpa;
case MII_STAT1000:
return lpagb;
+ case MII_ESTATUS:
+ return estat;
/*
* We do not support emulating Clause 45 over Clause 22 register
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 43691b1acfd9..bb680352708a 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -197,7 +197,7 @@ static int vsc738x_config_init(struct phy_device *phydev)
vsc73xx_config_init(phydev);
- return genphy_config_init(phydev);
+ return 0;
}
static int vsc739x_config_init(struct phy_device *phydev)
@@ -229,7 +229,7 @@ static int vsc739x_config_init(struct phy_device *phydev)
vsc73xx_config_init(phydev);
- return genphy_config_init(phydev);
+ return 0;
}
static int vsc73xx_config_aneg(struct phy_device *phydev)
@@ -267,7 +267,7 @@ static int vsc8601_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
- return genphy_config_init(phydev);
+ return 0;
}
static int vsc824x_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
index ea90db3c7705..58a69f830d29 100644
--- a/drivers/net/slip/slhc.c
+++ b/drivers/net/slip/slhc.c
@@ -91,8 +91,8 @@ static unsigned short pull16(unsigned char **cpp);
struct slcompress *
slhc_init(int rslots, int tslots)
{
- register short i;
- register struct cstate *ts;
+ short i;
+ struct cstate *ts;
struct slcompress *comp;
if (rslots < 0 || rslots > 255 || tslots < 0 || tslots > 255)
@@ -206,7 +206,7 @@ pull16(unsigned char **cpp)
static long
decode(unsigned char **cpp)
{
- register int x;
+ int x;
x = *(*cpp)++;
if(x == 0){
@@ -227,14 +227,14 @@ int
slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
unsigned char *ocp, unsigned char **cpp, int compress_cid)
{
- register struct cstate *ocs = &(comp->tstate[comp->xmit_oldest]);
- register struct cstate *lcs = ocs;
- register struct cstate *cs = lcs->next;
- register unsigned long deltaS, deltaA;
- register short changes = 0;
+ struct cstate *ocs = &(comp->tstate[comp->xmit_oldest]);
+ struct cstate *lcs = ocs;
+ struct cstate *cs = lcs->next;
+ unsigned long deltaS, deltaA;
+ short changes = 0;
int hlen;
unsigned char new_seq[16];
- register unsigned char *cp = new_seq;
+ unsigned char *cp = new_seq;
struct iphdr *ip;
struct tcphdr *th, *oth;
__sum16 csum;
@@ -486,11 +486,11 @@ uncompressed:
int
slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
{
- register int changes;
+ int changes;
long x;
- register struct tcphdr *thp;
- register struct iphdr *ip;
- register struct cstate *cs;
+ struct tcphdr *thp;
+ struct iphdr *ip;
+ struct cstate *cs;
int len, hdrlen;
unsigned char *cp = icp;
@@ -543,7 +543,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
switch(changes & SPECIALS_MASK){
case SPECIAL_I: /* Echoed terminal traffic */
{
- register short i;
+ short i;
i = ntohs(ip->tot_len) - hdrlen;
thp->ack_seq = htonl( ntohl(thp->ack_seq) + i);
thp->seq = htonl( ntohl(thp->seq) + i);
@@ -637,7 +637,7 @@ bad:
int
slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
{
- register struct cstate *cs;
+ struct cstate *cs;
unsigned ihl;
unsigned char index;
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index fcf31335a8b6..dacb4f680fd4 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -1005,7 +1005,7 @@ static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
*len = skb_frag_size(frag);
- return kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
+ return kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
}
static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index b39ee714fb01..e39f41efda3e 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -221,6 +221,7 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
int tailroom = skb_tailroom(skb);
u32 packet_len;
u32 padbytes = 0xffff0000;
+ void *ptr;
padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
@@ -256,13 +257,11 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
}
packet_len = ((skb->len ^ 0x0000ffff) << 16) + skb->len;
- skb_push(skb, 4);
- cpu_to_le32s(&packet_len);
- skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
+ ptr = skb_push(skb, 4);
+ put_unaligned_le32(packet_len, ptr);
if (padlen) {
- cpu_to_le32s(&padbytes);
- memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
+ put_unaligned_le32(padbytes, skb_tail_pointer(skb));
skb_put(skb, sizeof(padbytes));
}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 0bc457ba8574..daa54486ab09 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1366,8 +1366,7 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 0;
skb_trim(skb, skb->len - 4);
- memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
- le32_to_cpus(&rx_hdr);
+ rx_hdr = get_unaligned_le32(skb_tail_pointer(skb));
pkt_cnt = (u16)rx_hdr;
hdr_off = (u16)(rx_hdr >> 16);
@@ -1422,6 +1421,7 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
int frame_size = dev->maxpacket;
int mss = skb_shinfo(skb)->gso_size;
int headroom;
+ void *ptr;
tx_hdr1 = skb->len;
tx_hdr2 = mss;
@@ -1436,13 +1436,9 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
return NULL;
}
- skb_push(skb, 4);
- cpu_to_le32s(&tx_hdr2);
- skb_copy_to_linear_data(skb, &tx_hdr2, 4);
-
- skb_push(skb, 4);
- cpu_to_le32s(&tx_hdr1);
- skb_copy_to_linear_data(skb, &tx_hdr1, 4);
+ ptr = skb_push(skb, 8);
+ put_unaligned_le32(tx_hdr1, ptr);
+ put_unaligned_le32(tx_hdr2, ptr + 4);
return skb;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f033fee225a1..58f5a219fb65 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1258,8 +1258,7 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
return;
}
- memcpy(&intdata, urb->transfer_buffer, 4);
- le32_to_cpus(&intdata);
+ intdata = get_unaligned_le32(urb->transfer_buffer);
if (intdata & INT_ENP_PHY_INT) {
netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
@@ -2730,6 +2729,7 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
struct sk_buff *skb, gfp_t flags)
{
u32 tx_cmd_a, tx_cmd_b;
+ void *ptr;
if (skb_cow_head(skb, TX_OVERHEAD)) {
dev_kfree_skb_any(skb);
@@ -2758,13 +2758,9 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
}
- skb_push(skb, 4);
- cpu_to_le32s(&tx_cmd_b);
- memcpy(skb->data, &tx_cmd_b, 4);
-
- skb_push(skb, 4);
- cpu_to_le32s(&tx_cmd_a);
- memcpy(skb->data, &tx_cmd_a, 4);
+ ptr = skb_push(skb, 8);
+ put_unaligned_le32(tx_cmd_a, ptr);
+ put_unaligned_le32(tx_cmd_b, ptr + 4);
return skb;
}
@@ -3105,16 +3101,13 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
struct sk_buff *skb2;
unsigned char *packet;
- memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
- le32_to_cpus(&rx_cmd_a);
+ rx_cmd_a = get_unaligned_le32(skb->data);
skb_pull(skb, sizeof(rx_cmd_a));
- memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
- le32_to_cpus(&rx_cmd_b);
+ rx_cmd_b = get_unaligned_le32(skb->data);
skb_pull(skb, sizeof(rx_cmd_b));
- memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
- le16_to_cpus(&rx_cmd_c);
+ rx_cmd_c = get_unaligned_le16(skb->data);
skb_pull(skb, sizeof(rx_cmd_c));
packet = skb->data;
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 6c2b3e368efe..217a2d8fa47b 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -87,9 +87,7 @@ static void vl600_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct vl600_state *s = dev->driver_priv;
- if (s->current_rx_buf)
- dev_kfree_skb(s->current_rx_buf);
-
+ dev_kfree_skb(s->current_rx_buf);
kfree(s);
return usbnet_cdc_unbind(dev, intf);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 04137ac373b0..778d27d1fb15 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -22,10 +22,11 @@
#include <linux/mdio.h>
#include <linux/usb/cdc.h>
#include <linux/suspend.h>
+#include <linux/atomic.h>
#include <linux/acpi.h>
/* Information for net-next */
-#define NETNEXT_VERSION "09"
+#define NETNEXT_VERSION "10"
/* Information for net */
#define NET_VERSION "10"
@@ -583,6 +584,9 @@ enum rtl_register_content {
#define TX_ALIGN 4
#define RX_ALIGN 8
+#define RTL8152_RX_MAX_PENDING 4096
+#define RTL8152_RXFG_HEADSZ 256
+
#define INTR_LINK 0x0004
#define RTL8152_REQT_READ 0xc0
@@ -615,7 +619,7 @@ enum rtl8152_flags {
RTL8152_LINK_CHG,
SELECTIVE_SUSPEND,
PHY_RESET,
- SCHEDULE_NAPI,
+ SCHEDULE_TASKLET,
GREEN_ETHERNET,
DELL_TB_RX_AGG_BUG,
};
@@ -694,11 +698,11 @@ struct tx_desc {
struct r8152;
struct rx_agg {
- struct list_head list;
+ struct list_head list, info_list;
struct urb *urb;
struct r8152 *context;
+ struct page *page;
void *buffer;
- void *head;
};
struct tx_agg {
@@ -719,7 +723,7 @@ struct r8152 {
struct net_device *netdev;
struct urb *intr_urb;
struct tx_agg tx_info[RTL8152_MAX_TX];
- struct rx_agg rx_info[RTL8152_MAX_RX];
+ struct list_head rx_info, rx_used;
struct list_head rx_done, tx_free;
struct sk_buff_head tx_queue, rx_queue;
spinlock_t rx_lock, tx_lock;
@@ -729,6 +733,7 @@ struct r8152 {
#ifdef CONFIG_PM_SLEEP
struct notifier_block pm_notifier;
#endif
+ struct tasklet_struct tx_tl;
struct rtl_ops {
void (*init)(struct r8152 *);
@@ -744,13 +749,21 @@ struct r8152 {
void (*autosuspend_en)(struct r8152 *tp, bool enable);
} rtl_ops;
+ atomic_t rx_count;
+
+ bool eee_en;
int intr_interval;
u32 saved_wolopts;
u32 msg_enable;
u32 tx_qlen;
u32 coalesce;
+ u32 rx_buf_sz;
+ u32 rx_copybreak;
+ u32 rx_pending;
+
u16 ocp_base;
u16 speed;
+ u16 eee_adv;
u8 *intr_buff;
u8 version;
u8 duplex;
@@ -1394,7 +1407,7 @@ static void write_bulk_callback(struct urb *urb)
return;
if (!skb_queue_empty(&tp->tx_queue))
- napi_schedule(&tp->napi);
+ tasklet_schedule(&tp->tx_tl);
}
static void intr_callback(struct urb *urb)
@@ -1470,18 +1483,72 @@ static inline void *tx_agg_align(void *data)
return (void *)ALIGN((uintptr_t)data, TX_ALIGN);
}
+static void free_rx_agg(struct r8152 *tp, struct rx_agg *agg)
+{
+ list_del(&agg->info_list);
+
+ usb_free_urb(agg->urb);
+ put_page(agg->page);
+ kfree(agg);
+
+ atomic_dec(&tp->rx_count);
+}
+
+static struct rx_agg *alloc_rx_agg(struct r8152 *tp, gfp_t mflags)
+{
+ struct net_device *netdev = tp->netdev;
+ int node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1;
+ unsigned int order = get_order(tp->rx_buf_sz);
+ struct rx_agg *rx_agg;
+ unsigned long flags;
+
+ rx_agg = kmalloc_node(sizeof(*rx_agg), mflags, node);
+ if (!rx_agg)
+ return NULL;
+
+ rx_agg->page = alloc_pages(mflags | __GFP_COMP, order);
+ if (!rx_agg->page)
+ goto free_rx;
+
+ rx_agg->buffer = page_address(rx_agg->page);
+
+ rx_agg->urb = usb_alloc_urb(0, mflags);
+ if (!rx_agg->urb)
+ goto free_buf;
+
+ rx_agg->context = tp;
+
+ INIT_LIST_HEAD(&rx_agg->list);
+ INIT_LIST_HEAD(&rx_agg->info_list);
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&rx_agg->info_list, &tp->rx_info);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+ atomic_inc(&tp->rx_count);
+
+ return rx_agg;
+
+free_buf:
+ __free_pages(rx_agg->page, order);
+free_rx:
+ kfree(rx_agg);
+ return NULL;
+}
+
static void free_all_mem(struct r8152 *tp)
{
+ struct rx_agg *agg, *agg_next;
+ unsigned long flags;
int i;
- for (i = 0; i < RTL8152_MAX_RX; i++) {
- usb_free_urb(tp->rx_info[i].urb);
- tp->rx_info[i].urb = NULL;
+ spin_lock_irqsave(&tp->rx_lock, flags);
- kfree(tp->rx_info[i].buffer);
- tp->rx_info[i].buffer = NULL;
- tp->rx_info[i].head = NULL;
- }
+ list_for_each_entry_safe(agg, agg_next, &tp->rx_info, info_list)
+ free_rx_agg(tp, agg);
+
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+ WARN_ON(atomic_read(&tp->rx_count));
for (i = 0; i < RTL8152_MAX_TX; i++) {
usb_free_urb(tp->tx_info[i].urb);
@@ -1505,46 +1572,28 @@ static int alloc_all_mem(struct r8152 *tp)
struct usb_interface *intf = tp->intf;
struct usb_host_interface *alt = intf->cur_altsetting;
struct usb_host_endpoint *ep_intr = alt->endpoint + 2;
- struct urb *urb;
int node, i;
- u8 *buf;
node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1;
spin_lock_init(&tp->rx_lock);
spin_lock_init(&tp->tx_lock);
+ INIT_LIST_HEAD(&tp->rx_info);
INIT_LIST_HEAD(&tp->tx_free);
INIT_LIST_HEAD(&tp->rx_done);
skb_queue_head_init(&tp->tx_queue);
skb_queue_head_init(&tp->rx_queue);
+ atomic_set(&tp->rx_count, 0);
for (i = 0; i < RTL8152_MAX_RX; i++) {
- buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node);
- if (!buf)
+ if (!alloc_rx_agg(tp, GFP_KERNEL))
goto err1;
-
- if (buf != rx_agg_align(buf)) {
- kfree(buf);
- buf = kmalloc_node(agg_buf_sz + RX_ALIGN, GFP_KERNEL,
- node);
- if (!buf)
- goto err1;
- }
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- kfree(buf);
- goto err1;
- }
-
- INIT_LIST_HEAD(&tp->rx_info[i].list);
- tp->rx_info[i].context = tp;
- tp->rx_info[i].urb = urb;
- tp->rx_info[i].buffer = buf;
- tp->rx_info[i].head = rx_agg_align(buf);
}
for (i = 0; i < RTL8152_MAX_TX; i++) {
+ struct urb *urb;
+ u8 *buf;
+
buf = kmalloc_node(agg_buf_sz, GFP_KERNEL, node);
if (!buf)
goto err1;
@@ -1910,6 +1959,46 @@ return_result:
return checksum;
}
+static inline bool rx_count_exceed(struct r8152 *tp)
+{
+ return atomic_read(&tp->rx_count) > RTL8152_MAX_RX;
+}
+
+static inline int agg_offset(struct rx_agg *agg, void *addr)
+{
+ return (int)(addr - agg->buffer);
+}
+
+static struct rx_agg *rtl_get_free_rx(struct r8152 *tp, gfp_t mflags)
+{
+ struct rx_agg *agg, *agg_next, *agg_free = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->rx_lock, flags);
+
+ list_for_each_entry_safe(agg, agg_next, &tp->rx_used, list) {
+ if (page_count(agg->page) == 1) {
+ if (!agg_free) {
+ list_del_init(&agg->list);
+ agg_free = agg;
+ continue;
+ }
+ if (rx_count_exceed(tp)) {
+ list_del_init(&agg->list);
+ free_rx_agg(tp, agg);
+ }
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+ if (!agg_free && atomic_read(&tp->rx_count) < tp->rx_pending)
+ agg_free = alloc_rx_agg(tp, mflags);
+
+ return agg_free;
+}
+
static int rx_bottom(struct r8152 *tp, int budget)
{
unsigned long flags;
@@ -1945,7 +2034,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
list_for_each_safe(cursor, next, &rx_queue) {
struct rx_desc *rx_desc;
- struct rx_agg *agg;
+ struct rx_agg *agg, *agg_free;
int len_used = 0;
struct urb *urb;
u8 *rx_data;
@@ -1957,14 +2046,16 @@ static int rx_bottom(struct r8152 *tp, int budget)
if (urb->actual_length < ETH_ZLEN)
goto submit;
- rx_desc = agg->head;
- rx_data = agg->head;
+ agg_free = rtl_get_free_rx(tp, GFP_ATOMIC);
+
+ rx_desc = agg->buffer;
+ rx_data = agg->buffer;
len_used += sizeof(struct rx_desc);
while (urb->actual_length > len_used) {
struct net_device *netdev = tp->netdev;
struct net_device_stats *stats = &netdev->stats;
- unsigned int pkt_len;
+ unsigned int pkt_len, rx_frag_head_sz;
struct sk_buff *skb;
/* limite the skb numbers for rx_queue */
@@ -1982,22 +2073,37 @@ static int rx_bottom(struct r8152 *tp, int budget)
pkt_len -= ETH_FCS_LEN;
rx_data += sizeof(struct rx_desc);
- skb = napi_alloc_skb(napi, pkt_len);
+ if (!agg_free || tp->rx_copybreak > pkt_len)
+ rx_frag_head_sz = pkt_len;
+ else
+ rx_frag_head_sz = tp->rx_copybreak;
+
+ skb = napi_alloc_skb(napi, rx_frag_head_sz);
if (!skb) {
stats->rx_dropped++;
goto find_next_rx;
}
skb->ip_summed = r8152_rx_csum(tp, rx_desc);
- memcpy(skb->data, rx_data, pkt_len);
- skb_put(skb, pkt_len);
+ memcpy(skb->data, rx_data, rx_frag_head_sz);
+ skb_put(skb, rx_frag_head_sz);
+ pkt_len -= rx_frag_head_sz;
+ rx_data += rx_frag_head_sz;
+ if (pkt_len) {
+ skb_add_rx_frag(skb, 0, agg->page,
+ agg_offset(agg, rx_data),
+ pkt_len,
+ SKB_DATA_ALIGN(pkt_len));
+ get_page(agg->page);
+ }
+
skb->protocol = eth_type_trans(skb, netdev);
rtl_rx_vlan_tag(rx_desc, skb);
if (work_done < budget) {
- napi_gro_receive(napi, skb);
work_done++;
stats->rx_packets++;
- stats->rx_bytes += pkt_len;
+ stats->rx_bytes += skb->len;
+ napi_gro_receive(napi, skb);
} else {
__skb_queue_tail(&tp->rx_queue, skb);
}
@@ -2005,10 +2111,24 @@ static int rx_bottom(struct r8152 *tp, int budget)
find_next_rx:
rx_data = rx_agg_align(rx_data + pkt_len + ETH_FCS_LEN);
rx_desc = (struct rx_desc *)rx_data;
- len_used = (int)(rx_data - (u8 *)agg->head);
+ len_used = agg_offset(agg, rx_data);
len_used += sizeof(struct rx_desc);
}
+ WARN_ON(!agg_free && page_count(agg->page) > 1);
+
+ if (agg_free) {
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ if (page_count(agg->page) == 1) {
+ list_add(&agg_free->list, &tp->rx_used);
+ } else {
+ list_add_tail(&agg->list, &tp->rx_used);
+ agg = agg_free;
+ urb = agg->urb;
+ }
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ }
+
submit:
if (!ret) {
ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
@@ -2065,8 +2185,12 @@ static void tx_bottom(struct r8152 *tp)
} while (res == 0);
}
-static void bottom_half(struct r8152 *tp)
+static void bottom_half(unsigned long data)
{
+ struct r8152 *tp;
+
+ tp = (struct r8152 *)data;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
@@ -2078,7 +2202,7 @@ static void bottom_half(struct r8152 *tp)
if (!netif_carrier_ok(tp->netdev))
return;
- clear_bit(SCHEDULE_NAPI, &tp->flags);
+ clear_bit(SCHEDULE_TASKLET, &tp->flags);
tx_bottom(tp);
}
@@ -2089,16 +2213,12 @@ static int r8152_poll(struct napi_struct *napi, int budget)
int work_done;
work_done = rx_bottom(tp, budget);
- bottom_half(tp);
if (work_done < budget) {
if (!napi_complete_done(napi, work_done))
goto out;
if (!list_empty(&tp->rx_done))
napi_schedule(napi);
- else if (!skb_queue_empty(&tp->tx_queue) &&
- !list_empty(&tp->tx_free))
- napi_schedule(napi);
}
out:
@@ -2116,7 +2236,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
return 0;
usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
- agg->head, agg_buf_sz,
+ agg->buffer, tp->rx_buf_sz,
(usb_complete_t)read_bulk_callback, agg);
ret = usb_submit_urb(agg->urb, mem_flags);
@@ -2252,11 +2372,11 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
if (!list_empty(&tp->tx_free)) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- set_bit(SCHEDULE_NAPI, &tp->flags);
+ set_bit(SCHEDULE_TASKLET, &tp->flags);
schedule_delayed_work(&tp->schedule, 0);
} else {
usb_mark_last_busy(tp->udev);
- napi_schedule(&tp->napi);
+ tasklet_schedule(&tp->tx_tl);
}
} else if (skb_queue_len(&tp->tx_queue) > tp->tx_qlen) {
netif_stop_queue(netdev);
@@ -2333,44 +2453,80 @@ static void rxdy_gated_en(struct r8152 *tp, bool enable)
static int rtl_start_rx(struct r8152 *tp)
{
- int i, ret = 0;
+ struct rx_agg *agg, *agg_next;
+ struct list_head tmp_list;
+ unsigned long flags;
+ int ret = 0, i = 0;
- INIT_LIST_HEAD(&tp->rx_done);
- for (i = 0; i < RTL8152_MAX_RX; i++) {
- INIT_LIST_HEAD(&tp->rx_info[i].list);
- ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
- if (ret)
- break;
- }
+ INIT_LIST_HEAD(&tmp_list);
- if (ret && ++i < RTL8152_MAX_RX) {
- struct list_head rx_queue;
- unsigned long flags;
+ spin_lock_irqsave(&tp->rx_lock, flags);
- INIT_LIST_HEAD(&rx_queue);
+ INIT_LIST_HEAD(&tp->rx_done);
+ INIT_LIST_HEAD(&tp->rx_used);
- do {
- struct rx_agg *agg = &tp->rx_info[i++];
- struct urb *urb = agg->urb;
+ list_splice_init(&tp->rx_info, &tmp_list);
- urb->actual_length = 0;
- list_add_tail(&agg->list, &rx_queue);
- } while (i < RTL8152_MAX_RX);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
- spin_lock_irqsave(&tp->rx_lock, flags);
- list_splice_tail(&rx_queue, &tp->rx_done);
- spin_unlock_irqrestore(&tp->rx_lock, flags);
+ list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) {
+ INIT_LIST_HEAD(&agg->list);
+
+ /* Only RTL8152_MAX_RX rx_agg need to be submitted. */
+ if (++i > RTL8152_MAX_RX) {
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&agg->list, &tp->rx_used);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ } else if (unlikely(ret < 0)) {
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ } else {
+ ret = r8152_submit_rx(tp, agg, GFP_KERNEL);
+ }
}
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ WARN_ON(!list_empty(&tp->rx_info));
+ list_splice(&tmp_list, &tp->rx_info);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
return ret;
}
static int rtl_stop_rx(struct r8152 *tp)
{
- int i;
+ struct rx_agg *agg, *agg_next;
+ struct list_head tmp_list;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&tmp_list);
+
+ /* The usb_kill_urb() couldn't be used in atomic.
+ * Therefore, move the list of rx_info to a tmp one.
+ * Then, list_for_each_entry_safe could be used without
+ * spin lock.
+ */
+
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_splice_init(&tp->rx_info, &tmp_list);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+ list_for_each_entry_safe(agg, agg_next, &tmp_list, info_list) {
+ /* At least RTL8152_MAX_RX rx_agg have the page_count being
+ * equal to 1, so the other ones could be freed safely.
+ */
+ if (page_count(agg->page) > 1)
+ free_rx_agg(tp, agg);
+ else
+ usb_kill_urb(agg->urb);
+ }
- for (i = 0; i < RTL8152_MAX_RX; i++)
- usb_kill_urb(tp->rx_info[i].urb);
+ /* Move back the list of temp to the rx_info */
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ WARN_ON(!list_empty(&tp->rx_info));
+ list_splice(&tmp_list, &tp->rx_info);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
while (!skb_queue_empty(&tp->rx_queue))
dev_kfree_skb(__skb_dequeue(&tp->rx_queue));
@@ -2450,7 +2606,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
static void r8153_set_rx_early_size(struct r8152 *tp)
{
- u32 ocp_data = agg_buf_sz - rx_reserved_size(tp->netdev->mtu);
+ u32 ocp_data = tp->rx_buf_sz - rx_reserved_size(tp->netdev->mtu);
switch (tp->version) {
case RTL_VER_03:
@@ -3049,10 +3205,76 @@ static void r8152_eee_en(struct r8152 *tp, bool enable)
ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
}
-static void r8152b_enable_eee(struct r8152 *tp)
+static void r8153_eee_en(struct r8152 *tp, bool enable)
{
- r8152_eee_en(tp, true);
- r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
+ u32 ocp_data;
+ u16 config;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
+ config = ocp_reg_read(tp, OCP_EEE_CFG);
+
+ if (enable) {
+ ocp_data |= EEE_RX_EN | EEE_TX_EN;
+ config |= EEE10_EN;
+ } else {
+ ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
+ config &= ~EEE10_EN;
+ }
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
+ ocp_reg_write(tp, OCP_EEE_CFG, config);
+}
+
+static void r8153b_eee_en(struct r8152 *tp, bool enable)
+{
+ r8153_eee_en(tp, enable);
+
+ if (enable)
+ r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_EEE, 0);
+ else
+ r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_EEE);
+}
+
+static void rtl_eee_enable(struct r8152 *tp, bool enable)
+{
+ switch (tp->version) {
+ case RTL_VER_01:
+ case RTL_VER_02:
+ case RTL_VER_07:
+ if (enable) {
+ r8152_eee_en(tp, true);
+ r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
+ tp->eee_adv);
+ } else {
+ r8152_eee_en(tp, false);
+ r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0);
+ }
+ break;
+ case RTL_VER_03:
+ case RTL_VER_04:
+ case RTL_VER_05:
+ case RTL_VER_06:
+ if (enable) {
+ r8153_eee_en(tp, true);
+ ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv);
+ } else {
+ r8153_eee_en(tp, false);
+ ocp_reg_write(tp, OCP_EEE_ADV, 0);
+ }
+ break;
+ case RTL_VER_08:
+ case RTL_VER_09:
+ if (enable) {
+ r8153b_eee_en(tp, true);
+ ocp_reg_write(tp, OCP_EEE_ADV, tp->eee_adv);
+ } else {
+ r8153b_eee_en(tp, false);
+ ocp_reg_write(tp, OCP_EEE_ADV, 0);
+ }
+ break;
+ default:
+ break;
+ }
}
static void r8152b_enable_fc(struct r8152 *tp)
@@ -3073,7 +3295,7 @@ static void rtl8152_disable(struct r8152 *tp)
static void r8152b_hw_phy_cfg(struct r8152 *tp)
{
- r8152b_enable_eee(tp);
+ rtl_eee_enable(tp, tp->eee_en);
r8152_aldps_en(tp, true);
r8152b_enable_fc(tp);
@@ -3267,36 +3489,6 @@ static void r8153b_aldps_en(struct r8152 *tp, bool enable)
r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_ALDPS);
}
-static void r8153_eee_en(struct r8152 *tp, bool enable)
-{
- u32 ocp_data;
- u16 config;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
- config = ocp_reg_read(tp, OCP_EEE_CFG);
-
- if (enable) {
- ocp_data |= EEE_RX_EN | EEE_TX_EN;
- config |= EEE10_EN;
- } else {
- ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
- config &= ~EEE10_EN;
- }
-
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
- ocp_reg_write(tp, OCP_EEE_CFG, config);
-}
-
-static void r8153b_eee_en(struct r8152 *tp, bool enable)
-{
- r8153_eee_en(tp, enable);
-
- if (enable)
- r8153b_ups_flags_w1w0(tp, UPS_FLAGS_EN_EEE, 0);
- else
- r8153b_ups_flags_w1w0(tp, 0, UPS_FLAGS_EN_EEE);
-}
-
static void r8153b_enable_fc(struct r8152 *tp)
{
r8152b_enable_fc(tp);
@@ -3312,8 +3504,7 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
r8153_aldps_en(tp, false);
/* disable EEE before updating the PHY parameters */
- r8153_eee_en(tp, false);
- ocp_reg_write(tp, OCP_EEE_ADV, 0);
+ rtl_eee_enable(tp, false);
if (tp->version == RTL_VER_03) {
data = ocp_reg_read(tp, OCP_EEE_CFG);
@@ -3344,8 +3535,8 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
sram_write(tp, SRAM_10M_AMP1, 0x00af);
sram_write(tp, SRAM_10M_AMP2, 0x0208);
- r8153_eee_en(tp, true);
- ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
+ if (tp->eee_en)
+ rtl_eee_enable(tp, true);
r8153_aldps_en(tp, true);
r8152b_enable_fc(tp);
@@ -3385,8 +3576,7 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
r8153b_aldps_en(tp, false);
/* disable EEE before updating the PHY parameters */
- r8153b_eee_en(tp, false);
- ocp_reg_write(tp, OCP_EEE_ADV, 0);
+ rtl_eee_enable(tp, false);
r8153b_green_en(tp, test_bit(GREEN_ETHERNET, &tp->flags));
@@ -3448,8 +3638,8 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp)
r8153b_ups_flags_w1w0(tp, ups_flags, 0);
- r8153b_eee_en(tp, true);
- ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
+ if (tp->eee_en)
+ rtl_eee_enable(tp, true);
r8153b_aldps_en(tp, true);
r8153b_enable_fc(tp);
@@ -3870,9 +4060,11 @@ static void set_carrier(struct r8152 *tp)
} else {
if (netif_carrier_ok(netdev)) {
netif_carrier_off(netdev);
+ tasklet_disable(&tp->tx_tl);
napi_disable(napi);
tp->rtl_ops.disable(tp);
napi_enable(napi);
+ tasklet_enable(&tp->tx_tl);
netif_info(tp, link, netdev, "carrier off\n");
}
}
@@ -3905,10 +4097,10 @@ static void rtl_work_func_t(struct work_struct *work)
if (test_and_clear_bit(RTL8152_SET_RX_MODE, &tp->flags))
_rtl8152_set_rx_mode(tp->netdev);
- /* don't schedule napi before linking */
- if (test_and_clear_bit(SCHEDULE_NAPI, &tp->flags) &&
+ /* don't schedule tasket before linking */
+ if (test_and_clear_bit(SCHEDULE_TASKLET, &tp->flags) &&
netif_carrier_ok(tp->netdev))
- napi_schedule(&tp->napi);
+ tasklet_schedule(&tp->tx_tl);
mutex_unlock(&tp->control);
@@ -3994,6 +4186,7 @@ static int rtl8152_open(struct net_device *netdev)
goto out_unlock;
}
napi_enable(&tp->napi);
+ tasklet_enable(&tp->tx_tl);
mutex_unlock(&tp->control);
@@ -4021,6 +4214,7 @@ static int rtl8152_close(struct net_device *netdev)
#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&tp->pm_notifier);
#endif
+ tasklet_disable(&tp->tx_tl);
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
@@ -4289,6 +4483,7 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
return 0;
netif_stop_queue(netdev);
+ tasklet_disable(&tp->tx_tl);
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
@@ -4332,6 +4527,7 @@ static int rtl8152_post_reset(struct usb_interface *intf)
}
napi_enable(&tp->napi);
+ tasklet_enable(&tp->tx_tl);
netif_wake_queue(netdev);
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
@@ -4485,10 +4681,12 @@ static int rtl8152_system_suspend(struct r8152 *tp)
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
+ tasklet_disable(&tp->tx_tl);
napi_disable(napi);
cancel_delayed_work_sync(&tp->schedule);
tp->rtl_ops.down(tp);
napi_enable(napi);
+ tasklet_enable(&tp->tx_tl);
}
return 0;
@@ -4731,7 +4929,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
{
- u32 ocp_data, lp, adv, supported = 0;
+ u32 lp, adv, supported = 0;
u16 val;
val = r8152_mmd_read(tp, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
@@ -4743,13 +4941,10 @@ static int r8152_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
val = r8152_mmd_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
lp = mmd_eee_adv_to_ethtool_adv_t(val);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
- ocp_data &= EEE_RX_EN | EEE_TX_EN;
-
- eee->eee_enabled = !!ocp_data;
+ eee->eee_enabled = tp->eee_en;
eee->eee_active = !!(supported & adv & lp);
eee->supported = supported;
- eee->advertised = adv;
+ eee->advertised = tp->eee_adv;
eee->lp_advertised = lp;
return 0;
@@ -4759,19 +4954,17 @@ static int r8152_set_eee(struct r8152 *tp, struct ethtool_eee *eee)
{
u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised);
- r8152_eee_en(tp, eee->eee_enabled);
+ tp->eee_en = eee->eee_enabled;
+ tp->eee_adv = val;
- if (!eee->eee_enabled)
- val = 0;
-
- r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
+ rtl_eee_enable(tp, tp->eee_en);
return 0;
}
static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
{
- u32 ocp_data, lp, adv, supported = 0;
+ u32 lp, adv, supported = 0;
u16 val;
val = ocp_reg_read(tp, OCP_EEE_ABLE);
@@ -4783,46 +4976,15 @@ static int r8153_get_eee(struct r8152 *tp, struct ethtool_eee *eee)
val = ocp_reg_read(tp, OCP_EEE_LPABLE);
lp = mmd_eee_adv_to_ethtool_adv_t(val);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
- ocp_data &= EEE_RX_EN | EEE_TX_EN;
-
- eee->eee_enabled = !!ocp_data;
+ eee->eee_enabled = tp->eee_en;
eee->eee_active = !!(supported & adv & lp);
eee->supported = supported;
- eee->advertised = adv;
+ eee->advertised = tp->eee_adv;
eee->lp_advertised = lp;
return 0;
}
-static int r8153_set_eee(struct r8152 *tp, struct ethtool_eee *eee)
-{
- u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised);
-
- r8153_eee_en(tp, eee->eee_enabled);
-
- if (!eee->eee_enabled)
- val = 0;
-
- ocp_reg_write(tp, OCP_EEE_ADV, val);
-
- return 0;
-}
-
-static int r8153b_set_eee(struct r8152 *tp, struct ethtool_eee *eee)
-{
- u16 val = ethtool_adv_to_mmd_eee_adv_t(eee->advertised);
-
- r8153b_eee_en(tp, eee->eee_enabled);
-
- if (!eee->eee_enabled)
- val = 0;
-
- ocp_reg_write(tp, OCP_EEE_ADV, val);
-
- return 0;
-}
-
static int
rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *edata)
{
@@ -4956,6 +5118,77 @@ static int rtl8152_set_coalesce(struct net_device *netdev,
return ret;
}
+static int rtl8152_get_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tunable, void *d)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+
+ switch (tunable->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)d = tp->rx_copybreak;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int rtl8152_set_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tunable,
+ const void *d)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+ u32 val;
+
+ switch (tunable->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ val = *(u32 *)d;
+ if (val < ETH_ZLEN) {
+ netif_err(tp, rx_err, netdev,
+ "Invalid rx copy break value\n");
+ return -EINVAL;
+ }
+
+ if (tp->rx_copybreak != val) {
+ napi_disable(&tp->napi);
+ tp->rx_copybreak = val;
+ napi_enable(&tp->napi);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtl8152_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+
+ ring->rx_max_pending = RTL8152_RX_MAX_PENDING;
+ ring->rx_pending = tp->rx_pending;
+}
+
+static int rtl8152_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+
+ if (ring->rx_pending < (RTL8152_MAX_RX * 2))
+ return -EINVAL;
+
+ if (tp->rx_pending != ring->rx_pending) {
+ napi_disable(&tp->napi);
+ tp->rx_pending = ring->rx_pending;
+ napi_enable(&tp->napi);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops ops = {
.get_drvinfo = rtl8152_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -4973,6 +5206,10 @@ static const struct ethtool_ops ops = {
.set_eee = rtl_ethtool_set_eee,
.get_link_ksettings = rtl8152_get_link_ksettings,
.set_link_ksettings = rtl8152_set_link_ksettings,
+ .get_tunable = rtl8152_get_tunable,
+ .set_tunable = rtl8152_set_tunable,
+ .get_ringparam = rtl8152_get_ringparam,
+ .set_ringparam = rtl8152_set_ringparam,
};
static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -5117,6 +5354,9 @@ static int rtl_ops_init(struct r8152 *tp)
ops->in_nway = rtl8152_in_nway;
ops->hw_phy_cfg = r8152b_hw_phy_cfg;
ops->autosuspend_en = rtl_runtime_suspend_enable;
+ tp->rx_buf_sz = 16 * 1024;
+ tp->eee_en = true;
+ tp->eee_adv = MDIO_EEE_100TX;
break;
case RTL_VER_03:
@@ -5130,10 +5370,13 @@ static int rtl_ops_init(struct r8152 *tp)
ops->down = rtl8153_down;
ops->unload = rtl8153_unload;
ops->eee_get = r8153_get_eee;
- ops->eee_set = r8153_set_eee;
+ ops->eee_set = r8152_set_eee;
ops->in_nway = rtl8153_in_nway;
ops->hw_phy_cfg = r8153_hw_phy_cfg;
ops->autosuspend_en = rtl8153_runtime_enable;
+ tp->rx_buf_sz = 32 * 1024;
+ tp->eee_en = true;
+ tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX;
break;
case RTL_VER_08:
@@ -5145,10 +5388,13 @@ static int rtl_ops_init(struct r8152 *tp)
ops->down = rtl8153b_down;
ops->unload = rtl8153b_unload;
ops->eee_get = r8153_get_eee;
- ops->eee_set = r8153b_set_eee;
+ ops->eee_set = r8152_set_eee;
ops->in_nway = rtl8153_in_nway;
ops->hw_phy_cfg = r8153b_hw_phy_cfg;
ops->autosuspend_en = rtl8153b_runtime_enable;
+ tp->rx_buf_sz = 32 * 1024;
+ tp->eee_en = true;
+ tp->eee_adv = MDIO_EEE_1000T | MDIO_EEE_100TX;
break;
default:
@@ -5270,6 +5516,8 @@ static int rtl8152_probe(struct usb_interface *intf,
mutex_init(&tp->control);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t);
+ tasklet_init(&tp->tx_tl, bottom_half, (unsigned long)tp);
+ tasklet_disable(&tp->tx_tl);
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
@@ -5323,6 +5571,9 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100;
tp->duplex = DUPLEX_FULL;
+ tp->rx_copybreak = RTL8152_RXFG_HEADSZ;
+ tp->rx_pending = 10 * RTL8152_MAX_RX;
+
intf->needs_remote_wakeup = 1;
tp->rtl_ops.init(tp);
@@ -5352,6 +5603,7 @@ static int rtl8152_probe(struct usb_interface *intf,
return 0;
out1:
+ tasklet_kill(&tp->tx_tl);
usb_set_intfdata(intf, NULL);
out:
free_netdev(netdev);
@@ -5367,6 +5619,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
rtl_set_unplug(tp);
unregister_netdev(tp->netdev);
+ tasklet_kill(&tp->tx_tl);
cancel_delayed_work_sync(&tp->hw_phy_work);
tp->rtl_ops.unload(tp);
free_netdev(tp->netdev);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 98f33e270af1..13e51ccf0214 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -586,8 +586,7 @@ static void free_skb_pool(rtl8150_t *dev)
int i;
for (i = 0; i < RX_SKB_POOL_SIZE; i++)
- if (dev->rx_skb_pool[i])
- dev_kfree_skb(dev->rx_skb_pool[i]);
+ dev_kfree_skb(dev->rx_skb_pool[i]);
}
static void rx_fixup(unsigned long data)
@@ -946,8 +945,7 @@ static void rtl8150_disconnect(struct usb_interface *intf)
unlink_all_urbs(dev);
free_all_urbs(dev);
free_skb_pool(dev);
- if (dev->rx_skb)
- dev_kfree_skb(dev->rx_skb);
+ dev_kfree_skb(dev->rx_skb);
kfree(dev->intr_buff);
free_netdev(dev->netdev);
}
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 1417a22962a1..9556d431885f 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -661,8 +661,7 @@ static void smsc75xx_status(struct usbnet *dev, struct urb *urb)
return;
}
- memcpy(&intdata, urb->transfer_buffer, 4);
- le32_to_cpus(&intdata);
+ intdata = get_unaligned_le32(urb->transfer_buffer);
netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata);
@@ -2181,12 +2180,10 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
struct sk_buff *ax_skb;
unsigned char *packet;
- memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
- le32_to_cpus(&rx_cmd_a);
+ rx_cmd_a = get_unaligned_le32(skb->data);
skb_pull(skb, 4);
- memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
- le32_to_cpus(&rx_cmd_b);
+ rx_cmd_b = get_unaligned_le32(skb->data);
skb_pull(skb, 4 + RXW_PADDING);
packet = skb->data;
@@ -2258,6 +2255,7 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
struct sk_buff *skb, gfp_t flags)
{
u32 tx_cmd_a, tx_cmd_b;
+ void *ptr;
if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) {
dev_kfree_skb_any(skb);
@@ -2278,13 +2276,9 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
tx_cmd_b = 0;
}
- skb_push(skb, 4);
- cpu_to_le32s(&tx_cmd_b);
- memcpy(skb->data, &tx_cmd_b, 4);
-
- skb_push(skb, 4);
- cpu_to_le32s(&tx_cmd_a);
- memcpy(skb->data, &tx_cmd_a, 4);
+ ptr = skb_push(skb, 8);
+ put_unaligned_le32(tx_cmd_a, ptr);
+ put_unaligned_le32(tx_cmd_b, ptr + 4);
return skb;
}
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 35f39f23d881..c5d4a0060124 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -115,6 +115,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
u32 padbytes = 0xffff0000;
u32 packet_len;
int padlen;
+ void *ptr;
padlen = ((skb->len + 4) % (dev->maxpacket - 1)) ? 0 : 4;
@@ -133,14 +134,12 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
return NULL;
}
- skb_push(skb, 4);
+ ptr = skb_push(skb, 4);
packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
- cpu_to_le32s(&packet_len);
- skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
+ put_unaligned_le32(packet_len, ptr);
if (padlen) {
- cpu_to_le32s(&padbytes);
- memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
+ put_unaligned_le32(padbytes, skb_tail_pointer(skb));
skb_put(skb, sizeof(padbytes));
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 72514c46b478..58952a79b05f 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1324,11 +1324,11 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
total_len += skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *f = &skb_shinfo(skb)->frags[i];
+ skb_frag_t *f = &skb_shinfo(skb)->frags[i];
total_len += skb_frag_size(f);
- sg_set_page(&urb->sg[i + s], f->page.p, f->size,
- f->page_offset);
+ sg_set_page(&urb->sg[i + s], skb_frag_page(f), skb_frag_size(f),
+ skb_frag_off(f));
}
urb->transfer_buffer_length = total_len;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 2a1918f25e47..216acf37ca7c 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -657,13 +657,12 @@ static void
vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
struct vmxnet3_rx_buf_info *rbi)
{
- struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
- skb_shinfo(skb)->nr_frags;
+ skb_frag_t *frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
__skb_frag_set_page(frag, rbi->page);
- frag->page_offset = 0;
+ skb_frag_off_set(frag, 0);
skb_frag_size_set(frag, rcd->len);
skb->data_len += rcd->len;
skb->truesize += PAGE_SIZE;
@@ -755,7 +754,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
u32 buf_size;
buf_offset = 0;
@@ -956,7 +955,7 @@ static int txd_estimate(const struct sk_buff *skb)
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
count += VMXNET3_TXD_NEEDED(skb_frag_size(frag));
}
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
index 6544ac9df047..73f5892ce6c1 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -30,15 +30,6 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_netdev_queue_stopped,
debugfs_netdev_queue_stopped_get,
NULL, "%llu\n");
-
-static
-struct dentry *debugfs_create_netdev_queue_stopped(
- const char *name, struct dentry *parent, struct i2400m *i2400m)
-{
- return debugfs_create_file(name, 0400, parent, i2400m,
- &fops_netdev_queue_stopped);
-}
-
/*
* We don't allow partial reads of this file, as then the reader would
* get weirdly confused data as it is updated.
@@ -167,15 +158,6 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_suspend,
NULL, debugfs_i2400m_suspend_set,
"%llu\n");
-static
-struct dentry *debugfs_create_i2400m_suspend(
- const char *name, struct dentry *parent, struct i2400m *i2400m)
-{
- return debugfs_create_file(name, 0200, parent, i2400m,
- &fops_i2400m_suspend);
-}
-
-
/*
* Reset the device
*
@@ -205,73 +187,25 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_i2400m_reset,
NULL, debugfs_i2400m_reset_set,
"%llu\n");
-static
-struct dentry *debugfs_create_i2400m_reset(
- const char *name, struct dentry *parent, struct i2400m *i2400m)
+void i2400m_debugfs_add(struct i2400m *i2400m)
{
- return debugfs_create_file(name, 0200, parent, i2400m,
- &fops_i2400m_reset);
-}
-
-
-#define __debugfs_register(prefix, name, parent) \
-do { \
- result = d_level_register_debugfs(prefix, name, parent); \
- if (result < 0) \
- goto error; \
-} while (0)
-
-
-int i2400m_debugfs_add(struct i2400m *i2400m)
-{
- int result;
- struct device *dev = i2400m_dev(i2400m);
struct dentry *dentry = i2400m->wimax_dev.debugfs_dentry;
- struct dentry *fd;
dentry = debugfs_create_dir("i2400m", dentry);
- result = PTR_ERR(dentry);
- if (IS_ERR(dentry)) {
- if (result == -ENODEV)
- result = 0; /* No debugfs support */
- goto error;
- }
i2400m->debugfs_dentry = dentry;
- __debugfs_register("dl_", control, dentry);
- __debugfs_register("dl_", driver, dentry);
- __debugfs_register("dl_", debugfs, dentry);
- __debugfs_register("dl_", fw, dentry);
- __debugfs_register("dl_", netdev, dentry);
- __debugfs_register("dl_", rfkill, dentry);
- __debugfs_register("dl_", rx, dentry);
- __debugfs_register("dl_", tx, dentry);
-
- fd = debugfs_create_size_t("tx_in", 0400, dentry,
- &i2400m->tx_in);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "tx_in: %d\n", result);
- goto error;
- }
- fd = debugfs_create_size_t("tx_out", 0400, dentry,
- &i2400m->tx_out);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "tx_out: %d\n", result);
- goto error;
- }
+ d_level_register_debugfs("dl_", control, dentry);
+ d_level_register_debugfs("dl_", driver, dentry);
+ d_level_register_debugfs("dl_", debugfs, dentry);
+ d_level_register_debugfs("dl_", fw, dentry);
+ d_level_register_debugfs("dl_", netdev, dentry);
+ d_level_register_debugfs("dl_", rfkill, dentry);
+ d_level_register_debugfs("dl_", rx, dentry);
+ d_level_register_debugfs("dl_", tx, dentry);
- fd = debugfs_create_u32("state", 0600, dentry,
- &i2400m->state);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "state: %d\n", result);
- goto error;
- }
+ debugfs_create_size_t("tx_in", 0400, dentry, &i2400m->tx_in);
+ debugfs_create_size_t("tx_out", 0400, dentry, &i2400m->tx_out);
+ debugfs_create_u32("state", 0600, dentry, &i2400m->state);
/*
* Trace received messages from user space
@@ -295,60 +229,22 @@ int i2400m_debugfs_add(struct i2400m *i2400m)
* It is not really very atomic, but it is also not too
* critical.
*/
- fd = debugfs_create_u8("trace_msg_from_user", 0600, dentry,
- &i2400m->trace_msg_from_user);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "trace_msg_from_user: %d\n", result);
- goto error;
- }
+ debugfs_create_u8("trace_msg_from_user", 0600, dentry,
+ &i2400m->trace_msg_from_user);
- fd = debugfs_create_netdev_queue_stopped("netdev_queue_stopped",
- dentry, i2400m);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "netdev_queue_stopped: %d\n", result);
- goto error;
- }
+ debugfs_create_file("netdev_queue_stopped", 0400, dentry, i2400m,
+ &fops_netdev_queue_stopped);
- fd = debugfs_create_file("rx_stats", 0600, dentry, i2400m,
- &i2400m_rx_stats_fops);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "rx_stats: %d\n", result);
- goto error;
- }
+ debugfs_create_file("rx_stats", 0600, dentry, i2400m,
+ &i2400m_rx_stats_fops);
- fd = debugfs_create_file("tx_stats", 0600, dentry, i2400m,
- &i2400m_tx_stats_fops);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "tx_stats: %d\n", result);
- goto error;
- }
+ debugfs_create_file("tx_stats", 0600, dentry, i2400m,
+ &i2400m_tx_stats_fops);
- fd = debugfs_create_i2400m_suspend("suspend", dentry, i2400m);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry suspend: %d\n",
- result);
- goto error;
- }
+ debugfs_create_file("suspend", 0200, dentry, i2400m,
+ &fops_i2400m_suspend);
- fd = debugfs_create_i2400m_reset("reset", dentry, i2400m);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry reset: %d\n", result);
- goto error;
- }
-
- result = 0;
-error:
- return result;
+ debugfs_create_file("reset", 0200, dentry, i2400m, &fops_i2400m_reset);
}
void i2400m_debugfs_rm(struct i2400m *i2400m)
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 0a29222a1bf9..f66c0f8f6f4a 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -905,11 +905,7 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
goto error_sysfs_setup;
}
- result = i2400m_debugfs_add(i2400m);
- if (result < 0) {
- dev_err(dev, "cannot setup i2400m's debugfs: %d\n", result);
- goto error_debugfs_setup;
- }
+ i2400m_debugfs_add(i2400m);
result = i2400m_dev_start(i2400m, bm_flags);
if (result < 0)
@@ -919,7 +915,6 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
error_dev_start:
i2400m_debugfs_rm(i2400m);
-error_debugfs_setup:
sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
&i2400m_dev_attr_group);
error_sysfs_setup:
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 489cba9b284d..6c9a41bff2e0 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -397,14 +397,9 @@ int i2400m_is_boot_barker(struct i2400m *i2400m,
/* Short circuit if we have already discovered the barker
* associated with the device. */
- if (i2400m->barker
- && !memcmp(buf, i2400m->barker, sizeof(i2400m->barker->data))) {
- unsigned index = (i2400m->barker - i2400m_barker_db)
- / sizeof(*i2400m->barker);
- d_printf(2, dev, "boot barker cache-confirmed #%u/%08x\n",
- index, le32_to_cpu(i2400m->barker->data[0]));
+ if (i2400m->barker &&
+ !memcmp(buf, i2400m->barker, sizeof(i2400m->barker->data)))
return 0;
- }
for (i = 0; i < i2400m_barker_db_used; i++) {
barker = &i2400m_barker_db[i];
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 5a34e72bab9a..a3733a6d14f5 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -812,13 +812,10 @@ enum i2400m_pt;
int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
#ifdef CONFIG_DEBUG_FS
-int i2400m_debugfs_add(struct i2400m *);
+void i2400m_debugfs_add(struct i2400m *);
void i2400m_debugfs_rm(struct i2400m *);
#else
-static inline int i2400m_debugfs_add(struct i2400m *i2400m)
-{
- return 0;
-}
+static inline void i2400m_debugfs_add(struct i2400m *i2400m) {}
static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {}
#endif
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index d28b96d06919..c9fb619a9e01 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -1253,7 +1253,6 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
skb_len = skb->len;
d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n",
i2400m, skb, skb_len);
- result = -EIO;
msg_hdr = (void *) skb->data;
result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len);
if (result < 0)
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 2075e7b1fff6..6953f904232f 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -366,61 +366,25 @@ struct d_level D_LEVEL[] = {
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
-
-#define __debugfs_register(prefix, name, parent) \
-do { \
- result = d_level_register_debugfs(prefix, name, parent); \
- if (result < 0) \
- goto error; \
-} while (0)
-
-
static
-int i2400mu_debugfs_add(struct i2400mu *i2400mu)
+void i2400mu_debugfs_add(struct i2400mu *i2400mu)
{
- int result;
- struct device *dev = &i2400mu->usb_iface->dev;
struct dentry *dentry = i2400mu->i2400m.wimax_dev.debugfs_dentry;
- struct dentry *fd;
dentry = debugfs_create_dir("i2400m-usb", dentry);
- result = PTR_ERR(dentry);
- if (IS_ERR(dentry)) {
- if (result == -ENODEV)
- result = 0; /* No debugfs support */
- goto error;
- }
i2400mu->debugfs_dentry = dentry;
- __debugfs_register("dl_", usb, dentry);
- __debugfs_register("dl_", fw, dentry);
- __debugfs_register("dl_", notif, dentry);
- __debugfs_register("dl_", rx, dentry);
- __debugfs_register("dl_", tx, dentry);
- /* Don't touch these if you don't know what you are doing */
- fd = debugfs_create_u8("rx_size_auto_shrink", 0600, dentry,
- &i2400mu->rx_size_auto_shrink);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "rx_size_auto_shrink: %d\n", result);
- goto error;
- }
+ d_level_register_debugfs("dl_", usb, dentry);
+ d_level_register_debugfs("dl_", fw, dentry);
+ d_level_register_debugfs("dl_", notif, dentry);
+ d_level_register_debugfs("dl_", rx, dentry);
+ d_level_register_debugfs("dl_", tx, dentry);
- fd = debugfs_create_size_t("rx_size", 0600, dentry,
- &i2400mu->rx_size);
- result = PTR_ERR(fd);
- if (IS_ERR(fd) && result != -ENODEV) {
- dev_err(dev, "Can't create debugfs entry "
- "rx_size: %d\n", result);
- goto error;
- }
-
- return 0;
+ /* Don't touch these if you don't know what you are doing */
+ debugfs_create_u8("rx_size_auto_shrink", 0600, dentry,
+ &i2400mu->rx_size_auto_shrink);
-error:
- debugfs_remove_recursive(i2400mu->debugfs_dentry);
- return result;
+ debugfs_create_size_t("rx_size", 0600, dentry, &i2400mu->rx_size);
}
@@ -534,15 +498,9 @@ int i2400mu_probe(struct usb_interface *iface,
dev_err(dev, "cannot setup device: %d\n", result);
goto error_setup;
}
- result = i2400mu_debugfs_add(i2400mu);
- if (result < 0) {
- dev_err(dev, "Can't register i2400mu's debugfs: %d\n", result);
- goto error_debugfs_add;
- }
+ i2400mu_debugfs_add(i2400mu);
return 0;
-error_debugfs_add:
- i2400m_release(i2400m);
error_setup:
usb_set_intfdata(iface, NULL);
usb_put_dev(i2400mu->usb_dev);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 0606416dc971..12dad659bf68 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -6970,7 +6970,8 @@ exit:
return ret;
}
-static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
+static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath10k *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index f23cb2f3d296..34121fbf32e3 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2392,7 +2392,8 @@ out:
return ret;
}
-static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw)
+static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 2fb4258941a5..2414f574bf69 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -351,7 +351,7 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) |
BIT_ULL(NL80211_STA_INFO_TX_FAILED);
- sinfo->txrate.flags = RATE_INFO_FLAGS_60G;
+ sinfo->txrate.flags = RATE_INFO_FLAGS_DMG;
sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
sinfo->rxrate.mcs = stats->last_mcs_rx;
sinfo->rx_bytes = stats->rx_bytes;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 74834131cf7c..fd3b2b3d1b5c 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -1052,8 +1052,7 @@ static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
if (nr_frags) {
seq_printf(s, " nr_frags = %d\n", nr_frags);
for (i = 0; i < nr_frags; i++) {
- const struct skb_frag_struct *frag =
- &skb_shinfo(skb)->frags[i];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
p = skb_frag_address_safe(frag);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index eae00aafaa88..8b01ef8269da 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1657,7 +1657,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
len);
} else {
frag = &skb_shinfo(skb)->frags[f];
- len = frag->size;
+ len = skb_frag_size(frag);
wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
}
@@ -1678,8 +1678,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct wil6210_vif *vif,
if (!headlen) {
pa = skb_frag_dma_map(dev, frag,
- frag->size - len, lenmss,
- DMA_TO_DEVICE);
+ skb_frag_size(frag) - len,
+ lenmss, DMA_TO_DEVICE);
vring->ctx[i].mapped_as = wil_mapped_as_page;
} else {
pa = dma_map_single(dev,
@@ -1900,8 +1900,7 @@ static int __wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
/* middle segments */
for (; f < nr_frags; f++) {
- const struct skb_frag_struct *frag =
- &skb_shinfo(skb)->frags[f];
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
int len = skb_frag_size(frag);
*_d = *d;
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index dc040cd4ab06..71b7ad4b6454 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -1471,7 +1471,7 @@ static int __wil_tx_ring_tso_edma(struct wil6210_priv *wil,
/* Rest of the descriptors are from the SKB fragments */
for (f = 0; f < nr_frags; f++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- int len = frag->size;
+ int len = skb_frag_size(frag);
wil_dbg_txrx(wil, "TSO: frag[%d]: len %u, descs_used %d\n", f,
len, descs_used);
diff --git a/drivers/net/wireless/broadcom/b43legacy/phy.c b/drivers/net/wireless/broadcom/b43legacy/phy.c
index add7a0ff75b8..a659259bc51a 100644
--- a/drivers/net/wireless/broadcom/b43legacy/phy.c
+++ b/drivers/net/wireless/broadcom/b43legacy/phy.c
@@ -69,17 +69,6 @@ static const s8 b43legacy_tssi2dbm_g_table[] = {
static void b43legacy_phy_initg(struct b43legacy_wldev *dev);
-
-static inline
-void b43legacy_voluntary_preempt(void)
-{
- B43legacy_BUG_ON(!(!in_atomic() && !in_irq() &&
- !in_interrupt() && !irqs_disabled()));
-#ifndef CONFIG_PREEMPT
- cond_resched();
-#endif /* CONFIG_PREEMPT */
-}
-
/* Lock the PHY registers against concurrent access from the microcode.
* This lock is nonrecursive. */
void b43legacy_phy_lock(struct b43legacy_wldev *dev)
@@ -1124,7 +1113,7 @@ static u16 b43legacy_phy_lo_b_r15_loop(struct b43legacy_wldev *dev)
ret += b43legacy_phy_read(dev, 0x002C);
}
local_irq_restore(flags);
- b43legacy_voluntary_preempt();
+ cond_resched();
return ret;
}
@@ -1253,7 +1242,7 @@ u16 b43legacy_phy_lo_g_deviation_subval(struct b43legacy_wldev *dev,
}
ret = b43legacy_phy_read(dev, 0x002D);
local_irq_restore(flags);
- b43legacy_voluntary_preempt();
+ cond_resched();
return ret;
}
@@ -1591,7 +1580,7 @@ void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev)
b43legacy_radio_write16(dev, 0x43, i);
b43legacy_radio_write16(dev, 0x52, phy->txctl2);
udelay(10);
- b43legacy_voluntary_preempt();
+ cond_resched();
b43legacy_phy_set_baseband_attenuation(dev, j * 2);
@@ -1642,7 +1631,7 @@ void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev)
phy->txctl2
| (3/*txctl1*/ << 4));
udelay(10);
- b43legacy_voluntary_preempt();
+ cond_resched();
b43legacy_phy_set_baseband_attenuation(dev, j * 2);
@@ -1665,7 +1654,7 @@ void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev)
b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA2);
udelay(2);
b43legacy_phy_write(dev, 0x0812, (r27 << 8) | 0xA3);
- b43legacy_voluntary_preempt();
+ cond_resched();
} else
b43legacy_phy_write(dev, 0x0015, r27 | 0xEFA0);
b43legacy_phy_lo_adjust(dev, is_initializing);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 322e913ca7aa..2c95a08a5871 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -479,18 +479,11 @@ fail:
return -ENOMEM;
}
-void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr)
-{
- struct brcmf_bcdc *bcdc = drvr->proto->pd;
-
- brcmf_fws_detach_pre_delif(bcdc->fws);
-}
-
-void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr)
+void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr)
{
struct brcmf_bcdc *bcdc = drvr->proto->pd;
drvr->proto->pd = NULL;
- brcmf_fws_detach_post_delif(bcdc->fws);
+ brcmf_fws_detach(bcdc->fws);
kfree(bcdc);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
index 102e6938905c..b051d2860cd1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
@@ -7,16 +7,14 @@
#ifdef CONFIG_BRCMFMAC_PROTO_BCDC
int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
-void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr);
-void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr);
+void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state);
void brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp,
bool success);
struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr);
#else
static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; }
-static void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr) {};
-static inline void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr) {}
+static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {}
#endif
#endif /* BRCMFMAC_BCDC_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b6d0df354b36..581d0013f33e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -189,9 +189,9 @@ static const struct ieee80211_regdomain brcmf_regdom = {
*/
REG_RULE(2484-10, 2484+10, 20, 6, 20, 0),
/* IEEE 802.11a, channel 36..64 */
- REG_RULE(5150-10, 5350+10, 80, 6, 20, 0),
+ REG_RULE(5150-10, 5350+10, 160, 6, 20, 0),
/* IEEE 802.11a, channel 100..165 */
- REG_RULE(5470-10, 5850+10, 80, 6, 20, 0), }
+ REG_RULE(5470-10, 5850+10, 160, 6, 20, 0), }
};
/* Note: brcmf_cipher_suites is an array of int defining which cipher suites
@@ -276,8 +276,26 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
else
ch_inf.sb = BRCMU_CHAN_SB_UU;
break;
- case NL80211_CHAN_WIDTH_80P80:
case NL80211_CHAN_WIDTH_160:
+ ch_inf.bw = BRCMU_CHAN_BW_160;
+ if (primary_offset == -70)
+ ch_inf.sb = BRCMU_CHAN_SB_LLL;
+ else if (primary_offset == -50)
+ ch_inf.sb = BRCMU_CHAN_SB_LLU;
+ else if (primary_offset == -30)
+ ch_inf.sb = BRCMU_CHAN_SB_LUL;
+ else if (primary_offset == -10)
+ ch_inf.sb = BRCMU_CHAN_SB_LUU;
+ else if (primary_offset == 10)
+ ch_inf.sb = BRCMU_CHAN_SB_ULL;
+ else if (primary_offset == 30)
+ ch_inf.sb = BRCMU_CHAN_SB_ULU;
+ else if (primary_offset == 50)
+ ch_inf.sb = BRCMU_CHAN_SB_UUL;
+ else
+ ch_inf.sb = BRCMU_CHAN_SB_UUU;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
default:
@@ -296,6 +314,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
}
d11inf->encchspec(&ch_inf);
+ brcmf_dbg(TRACE, "chanspec: 0x%x\n", ch_inf.chspec);
return ch_inf.chspec;
}
@@ -1267,17 +1286,21 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy);
struct brcmf_pub *drvr = cfg->pub;
+ bool bus_up = drvr->bus_if->state == BRCMF_BUS_UP;
s32 err = 0;
brcmf_dbg(TRACE, "Enter\n");
if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state)) {
- brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n");
- err = brcmf_fil_cmd_data_set(vif->ifp,
- BRCMF_C_DISASSOC, NULL, 0);
- if (err) {
- bphy_err(drvr, "WLC_DISASSOC failed (%d)\n", err);
+ if (bus_up) {
+ brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n");
+ err = brcmf_fil_cmd_data_set(vif->ifp,
+ BRCMF_C_DISASSOC, NULL, 0);
+ if (err)
+ bphy_err(drvr, "WLC_DISASSOC failed (%d)\n",
+ err);
}
+
if ((vif->wdev.iftype == NL80211_IFTYPE_STATION) ||
(vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT))
cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0,
@@ -1287,7 +1310,8 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
brcmf_btcoex_set_mode(vif, BRCMF_BTCOEX_ENABLED, 0);
if (vif->profile.use_fwsup != BRCMF_PROFILE_FWSUP_NONE) {
- brcmf_set_pmk(vif->ifp, NULL, 0);
+ if (bus_up)
+ brcmf_set_pmk(vif->ifp, NULL, 0);
vif->profile.use_fwsup = BRCMF_PROFILE_FWSUP_NONE;
}
brcmf_dbg(TRACE, "Exit\n");
@@ -2958,8 +2982,6 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
struct brcmf_pub *drvr = cfg->pub;
struct brcmf_bss_info_le *bi;
const struct brcmf_tlv *tim;
- u16 beacon_interval;
- u8 dtim_period;
size_t ie_len;
u8 *ie;
s32 err = 0;
@@ -2983,12 +3005,9 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset);
ie_len = le32_to_cpu(bi->ie_length);
- beacon_interval = le16_to_cpu(bi->beacon_period);
tim = brcmf_parse_tlvs(ie, ie_len, WLAN_EID_TIM);
- if (tim)
- dtim_period = tim->data[1];
- else {
+ if (!tim) {
/*
* active scan was done so we could not get dtim
* information out of probe response.
@@ -3000,7 +3019,6 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
bphy_err(drvr, "wl dtim_assoc failed (%d)\n", err);
goto update_bss_info_out;
}
- dtim_period = (u8)var;
}
update_bss_info_out:
@@ -4985,18 +5003,16 @@ static int brcmf_cfg80211_get_channel(struct wiphy *wiphy,
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = wdev->netdev;
struct brcmf_pub *drvr = cfg->pub;
- struct brcmf_if *ifp;
struct brcmu_chan ch;
enum nl80211_band band = 0;
enum nl80211_chan_width width = 0;
u32 chanspec;
int freq, err;
- if (!ndev)
+ if (!ndev || drvr->bus_if->state != BRCMF_BUS_UP)
return -ENODEV;
- ifp = netdev_priv(ndev);
- err = brcmf_fil_iovar_int_get(ifp, "chanspec", &chanspec);
+ err = brcmf_fil_iovar_int_get(netdev_priv(ndev), "chanspec", &chanspec);
if (err) {
bphy_err(drvr, "chanspec failed (%d)\n", err);
return err;
@@ -6714,6 +6730,11 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
}
}
+ if (wiphy->bands[NL80211_BAND_5GHZ] &&
+ brcmf_feat_is_enabled(ifp, BRCMF_FEAT_DOT11H))
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_DFS_OFFLOAD);
+
wiphy_read_of_freq_limits(wiphy);
return 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index aa89d620ee5d..dec25e415619 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -258,7 +258,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
/* query for 'ver' to get version info from firmware */
memset(buf, 0, sizeof(buf));
- strlcpy(buf, "ver", sizeof(buf));
err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
if (err < 0) {
bphy_err(drvr, "Retrieving version information failed, %d\n",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index bf18491a33a5..705b8cc53c3e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -579,7 +579,8 @@ static int brcmf_netdev_stop(struct net_device *ndev)
brcmf_cfg80211_down(ndev);
- brcmf_fil_iovar_data_set(ifp, "arp_hostip_clear", NULL, 0);
+ if (ifp->drvr->bus_if->state == BRCMF_BUS_UP)
+ brcmf_fil_iovar_data_set(ifp, "arp_hostip_clear", NULL, 0);
brcmf_net_setcarrier(ifp, false);
@@ -1307,27 +1308,26 @@ void brcmf_detach(struct device *dev)
unregister_inet6addr_notifier(&drvr->inet6addr_notifier);
#endif
- /* stop firmware event handling */
- brcmf_fweh_detach(drvr);
- if (drvr->config)
- brcmf_p2p_detach(&drvr->config->p2p);
-
brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
+ brcmf_bus_stop(drvr->bus_if);
- brcmf_proto_detach_pre_delif(drvr);
+ brcmf_fweh_detach(drvr);
+ brcmf_proto_detach(drvr);
/* make sure primary interface removed last */
- for (i = BRCMF_MAX_IFS-1; i > -1; i--)
- brcmf_remove_interface(drvr->iflist[i], false);
-
- brcmf_cfg80211_detach(drvr->config);
- drvr->config = NULL;
-
- brcmf_bus_stop(drvr->bus_if);
+ for (i = BRCMF_MAX_IFS - 1; i > -1; i--) {
+ if (drvr->iflist[i])
+ brcmf_del_if(drvr, drvr->iflist[i]->bsscfgidx, false);
+ }
- brcmf_proto_detach_post_delif(drvr);
+ if (drvr->config) {
+ brcmf_p2p_detach(&drvr->config->p2p);
+ brcmf_cfg80211_detach(drvr->config);
+ drvr->config = NULL;
+ }
bus_if->drvr = NULL;
+
wiphy_free(drvr->wiphy);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 73aff4e4039d..2c3526aeca6f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -39,6 +39,7 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
{ BRCMF_FEAT_P2P, "p2p" },
{ BRCMF_FEAT_MONITOR, "monitor" },
{ BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
+ { BRCMF_FEAT_DOT11H, "802.11h" }
};
#ifdef DEBUG
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index f127eb2030a6..736a8179f62f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -25,6 +25,7 @@
* MONITOR: firmware can pass monitor packets to host.
* MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header
* MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header
+ * DOT11H: firmware supports 802.11h
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -43,7 +44,8 @@
BRCMF_FEAT_DEF(FWSUP) \
BRCMF_FEAT_DEF(MONITOR) \
BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) \
- BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR)
+ BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \
+ BRCMF_FEAT_DEF(DOT11H)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index adedd4fac10b..79c8a858b6d6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -303,16 +303,7 @@ void brcmf_fweh_attach(struct brcmf_pub *drvr)
void brcmf_fweh_detach(struct brcmf_pub *drvr)
{
struct brcmf_fweh_info *fweh = &drvr->fweh;
- struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0);
- s8 eventmask[BRCMF_EVENTING_MASK_LEN];
- if (ifp) {
- /* clear all events */
- memset(eventmask, 0, BRCMF_EVENTING_MASK_LEN);
- (void)brcmf_fil_iovar_data_set(ifp, "event_msgs",
- eventmask,
- BRCMF_EVENTING_MASK_LEN);
- }
/* cancel the worker */
cancel_work_sync(&fweh->event_work);
WARN_ON(!list_empty(&fweh->event_q));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index b8452cb46297..2bd892df83cc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -2432,25 +2432,17 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr)
return fws;
fail:
- brcmf_fws_detach_pre_delif(fws);
- brcmf_fws_detach_post_delif(fws);
+ brcmf_fws_detach(fws);
return ERR_PTR(rc);
}
-void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws)
+void brcmf_fws_detach(struct brcmf_fws_info *fws)
{
if (!fws)
return;
- if (fws->fws_wq) {
- destroy_workqueue(fws->fws_wq);
- fws->fws_wq = NULL;
- }
-}
-void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws)
-{
- if (!fws)
- return;
+ if (fws->fws_wq)
+ destroy_workqueue(fws->fws_wq);
/* cleanup */
brcmf_fws_lock(fws);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index 10184eeaad94..b486d578ec96 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -7,8 +7,7 @@
#define FWSIGNAL_H_
struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr);
-void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws);
-void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws);
+void brcmf_fws_detach(struct brcmf_fws_info *fws);
void brcmf_fws_debugfs_create(struct brcmf_pub *drvr);
bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws);
bool brcmf_fws_fc_active(struct brcmf_fws_info *fws);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 241747bd5cb2..8428be8b8d43 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -1398,6 +1398,13 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
u8 ifidx;
int err;
+ /* no need to submit if firmware can not be reached */
+ if (drvr->bus_if->state != BRCMF_BUS_UP) {
+ brcmf_dbg(MSGBUF, "bus down, flowring will be removed\n");
+ brcmf_msgbuf_remove_flowring(msgbuf, flowid);
+ return;
+ }
+
commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
brcmf_commonring_lock(commonring);
ret_ptr = brcmf_commonring_reserve_for_write(commonring);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 4ea5401c4d6b..8d0e74416643 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -794,7 +794,8 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
if (ch == '\n') {
console->log_str[console->log_idx] = 0;
if (error)
- brcmf_err(bus, "CONSOLE: %s", console->log_str);
+ __brcmf_err(bus, __func__, "CONSOLE: %s",
+ console->log_str);
else
pr_debug("CONSOLE: %s", console->log_str);
console->log_idx = 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
index e3d1b075044b..2e911d4874af 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
@@ -56,22 +56,16 @@ fail:
return -ENOMEM;
}
-void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr)
+void brcmf_proto_detach(struct brcmf_pub *drvr)
{
brcmf_dbg(TRACE, "Enter\n");
if (drvr->proto) {
if (drvr->bus_if->proto_type == BRCMF_PROTO_BCDC)
- brcmf_proto_bcdc_detach_post_delif(drvr);
+ brcmf_proto_bcdc_detach(drvr);
else if (drvr->bus_if->proto_type == BRCMF_PROTO_MSGBUF)
brcmf_proto_msgbuf_detach(drvr);
kfree(drvr->proto);
drvr->proto = NULL;
}
}
-
-void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr)
-{
- if (drvr->proto && drvr->bus_if->proto_type == BRCMF_PROTO_BCDC)
- brcmf_proto_bcdc_detach_pre_delif(drvr);
-}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index 8d55fad531d0..bd08d3aaa8f4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -43,8 +43,7 @@ struct brcmf_proto {
int brcmf_proto_attach(struct brcmf_pub *drvr);
-void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr);
-void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr);
+void brcmf_proto_detach(struct brcmf_pub *drvr);
static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws,
struct sk_buff *skb,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 7d4e8f589fdc..080e829da9b3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -5248,15 +5248,7 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
/* Default to 54g Auto */
/* Advertise and use shortslot (-1/0/1 Auto/Off/On) */
s8 shortslot = BRCMS_SHORTSLOT_AUTO;
- bool shortslot_restrict = false; /* Restrict association to stations
- * that support shortslot
- */
bool ofdm_basic = false; /* Make 6, 12, and 24 basic rates */
- /* Advertise and use short preambles (-1/0/1 Auto/Off/On) */
- int preamble = BRCMS_PLCP_LONG;
- bool preamble_restrict = false; /* Restrict association to stations
- * that support short preambles
- */
struct brcms_band *band;
/* if N-support is enabled, allow Gmode set as long as requested
@@ -5297,16 +5289,11 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
case GMODE_ONLY:
ofdm_basic = true;
- preamble = BRCMS_PLCP_SHORT;
- preamble_restrict = true;
break;
case GMODE_PERFORMANCE:
shortslot = BRCMS_SHORTSLOT_ON;
- shortslot_restrict = true;
ofdm_basic = true;
- preamble = BRCMS_PLCP_SHORT;
- preamble_restrict = true;
break;
default:
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 75c0c29d81f0..8dfbaff2d1fe 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -4413,7 +4413,7 @@ static void ipw2100_kill_works(struct ipw2100_priv *priv)
static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
{
- int i, j, err = -EINVAL;
+ int i, j, err;
void *v;
dma_addr_t p;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 4a88e35d58d7..73f7bbf742bc 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -4942,8 +4942,7 @@ EXPORT_SYMBOL(il_add_beacon_time);
static int
il_pci_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct il_priv *il = pci_get_drvdata(pdev);
+ struct il_priv *il = dev_get_drvdata(device);
/*
* This function is called when system goes into suspend state
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index b1e5d64ca60d..74229fcb63a9 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -3256,28 +3256,16 @@ static void rs_add_debugfs(void *priv, void *priv_sta,
struct dentry *dir)
{
struct iwl_lq_sta *lq_sta = priv_sta;
- lq_sta->rs_sta_dbgfs_scale_table_file =
- debugfs_create_file("rate_scale_table", 0600, dir,
- lq_sta, &rs_sta_dbgfs_scale_table_ops);
- lq_sta->rs_sta_dbgfs_stats_table_file =
- debugfs_create_file("rate_stats_table", 0400, dir,
- lq_sta, &rs_sta_dbgfs_stats_table_ops);
- lq_sta->rs_sta_dbgfs_rate_scale_data_file =
- debugfs_create_file("rate_scale_data", 0400, dir,
- lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
- lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
- debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
- &lq_sta->tx_agg_tid_en);
-}
+ debugfs_create_file("rate_scale_table", 0600, dir, lq_sta,
+ &rs_sta_dbgfs_scale_table_ops);
+ debugfs_create_file("rate_stats_table", 0400, dir, lq_sta,
+ &rs_sta_dbgfs_stats_table_ops);
+ debugfs_create_file("rate_scale_data", 0400, dir, lq_sta,
+ &rs_sta_dbgfs_rate_scale_data_ops);
+ debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
+ &lq_sta->tx_agg_tid_en);
-static void rs_remove_debugfs(void *priv, void *priv_sta)
-{
- struct iwl_lq_sta *lq_sta = priv_sta;
- debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
}
#endif
@@ -3303,7 +3291,6 @@ static const struct rate_control_ops rs_ops = {
.free_sta = rs_free_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = rs_add_debugfs,
- .remove_sta_debugfs = rs_remove_debugfs,
#endif
};
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
index b7a1854cd202..68a840d739e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
@@ -356,10 +356,6 @@ struct iwl_lq_sta {
struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
u8 tx_agg_tid_en;
#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *rs_sta_dbgfs_scale_table_file;
- struct dentry *rs_sta_dbgfs_stats_table_file;
- struct dentry *rs_sta_dbgfs_rate_scale_data_file;
- struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
u32 dbg_fixed_rate;
#endif
struct iwl_priv *drv;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index a7bc00d1296f..d6499763f0dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -4045,7 +4045,8 @@ out_unlock:
return ret;
}
-static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
+static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index d3f04acfbacb..e4415e58fa78 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -4127,10 +4127,6 @@ static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta,
MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, 0600);
}
-
-void rs_remove_sta_debugfs(void *mvm, void *mvm_sta)
-{
-}
#endif
/*
@@ -4158,7 +4154,6 @@ static const struct rate_control_ops rs_mvm_ops_drv = {
.rate_update = rs_drv_rate_update,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_sta_debugfs = rs_drv_add_sta_debugfs,
- .remove_sta_debugfs = rs_remove_sta_debugfs,
#endif
.capa = RATE_CTRL_CAPA_VHT_EXT_NSS_BW,
};
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 772e54f0696f..f86c2891310a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2216,7 +2216,8 @@ static int mac80211_hwsim_roc(struct ieee80211_hw *hw,
return 0;
}
-static int mac80211_hwsim_croc(struct ieee80211_hw *hw)
+static int mac80211_hwsim_croc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct mac80211_hwsim_data *hwsim = hw->priv;
@@ -2594,7 +2595,7 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz = {
},
};
-static void mac80211_hswim_he_capab(struct ieee80211_supported_band *sband)
+static void mac80211_hwsim_he_capab(struct ieee80211_supported_band *sband)
{
if (sband->band == NL80211_BAND_2GHZ)
sband->iftype_data =
@@ -2805,12 +2806,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, TDLS_WIDER_BW);
-
- /* We only have SW crypto and only implement the A-MPDU API
- * (but don't really build A-MPDUs) so can have extended key
- * support
- */
- ieee80211_hw_set(hw, EXT_KEY_ID_NATIVE);
if (rctbl)
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
@@ -2897,7 +2892,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sband->ht_cap.mcs.rx_mask[1] = 0xff;
sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- mac80211_hswim_he_capab(sband);
+ mac80211_hwsim_he_capab(sband);
hw->wiphy->bands[band] = sband;
}
@@ -3233,6 +3228,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
{
struct mac80211_hwsim_data *data2;
struct ieee80211_rx_status rx_status;
+ struct ieee80211_hdr *hdr;
const u8 *dst;
int frame_data_len;
void *frame_data;
@@ -3299,6 +3295,12 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
+ hdr = (void *)skb->data;
+
+ if (ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))
+ rx_status.boottime_ns = ktime_get_boottime_ns();
+
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
data2->rx_pkts++;
data2->rx_bytes += skb->len;
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index 27067e79e83f..d07fe82c557e 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -766,19 +766,15 @@ static int if_spi_c2h_data(struct if_spi_card *card)
/* Read the data from the WLAN module into our skb... */
err = spu_read(card, IF_SPI_DATA_RDWRPORT_REG, data, ALIGN(len, 4));
- if (err)
- goto free_skb;
+ if (err) {
+ dev_kfree_skb(skb);
+ goto out;
+ }
/* pass the SKB to libertas */
err = lbs_process_rxed_packet(card->priv, skb);
- if (err)
- goto free_skb;
-
- /* success */
- goto out;
+ /* lbs_process_rxed_packet() consumes the skb */
-free_skb:
- dev_kfree_skb(skb);
out:
if (err)
netdev_err(priv->dev, "%s: err=%d\n", __func__, err);
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index afac2481909b..20436a289d5c 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -50,7 +50,8 @@ static const struct lbs_fw_table fw_table[] = {
{ MODEL_8388, "libertas/usb8388_v5.bin", NULL },
{ MODEL_8388, "libertas/usb8388.bin", NULL },
{ MODEL_8388, "usb8388.bin", NULL },
- { MODEL_8682, "libertas/usb8682.bin", NULL }
+ { MODEL_8682, "libertas/usb8682.bin", NULL },
+ { 0, NULL, NULL }
};
static const struct usb_device_id if_usb_table[] = {
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 5968852b65a7..2233b59cdf44 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -1046,7 +1046,7 @@ int lbs_rtap_supported(struct lbs_private *priv)
int lbs_start_card(struct lbs_private *priv)
{
struct net_device *dev = priv->dev;
- int ret = -1;
+ int ret;
/* poke the firmware */
ret = lbs_setup_firmware(priv);
diff --git a/drivers/net/wireless/marvell/libertas_tf/cmd.c b/drivers/net/wireless/marvell/libertas_tf/cmd.c
index 1eacca0d079b..a0b4c9debc11 100644
--- a/drivers/net/wireless/marvell/libertas_tf/cmd.c
+++ b/drivers/net/wireless/marvell/libertas_tf/cmd.c
@@ -65,7 +65,7 @@ static void lbtf_geo_init(struct lbtf_private *priv)
break;
}
- for (ch = priv->range.start; ch < priv->range.end; ch++)
+ for (ch = range->start; ch < range->end; ch++)
priv->channels[CHAN_TO_IDX(ch)].flags = 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 6c0e52eb8794..1aa93e7e9835 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -59,7 +59,7 @@ static void wakeup_timer_fn(struct timer_list *t)
adapter->hw_status = MWIFIEX_HW_STATUS_RESET;
mwifiex_cancel_all_pending_cmd(adapter);
- if (adapter->if_ops.card_reset && !adapter->hs_activated)
+ if (adapter->if_ops.card_reset)
adapter->if_ops.card_reset(adapter);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index b54f73e3d508..eff06d59e9df 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -150,10 +150,8 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_suspend(struct device *dev)
{
struct mwifiex_adapter *adapter;
- struct pcie_service_card *card;
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcie_service_card *card = dev_get_drvdata(dev);
- card = pci_get_drvdata(pdev);
/* Might still be loading firmware */
wait_for_completion(&card->fw_done);
@@ -195,10 +193,8 @@ static int mwifiex_pcie_suspend(struct device *dev)
static int mwifiex_pcie_resume(struct device *dev)
{
struct mwifiex_adapter *adapter;
- struct pcie_service_card *card;
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcie_service_card *card = dev_get_drvdata(dev);
- card = pci_get_drvdata(pdev);
if (!card->adapter) {
dev_err(dev, "adapter structure is not valid\n");
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 21dda385f6c6..593c594982cb 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1244,7 +1244,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
mwifiex_dbg(adapter, ERROR,
"err: InterpretIE: in processing\t"
"IE, bytes left < IE length\n");
- return -1;
+ return -EINVAL;
}
switch (element_id) {
case WLAN_EID_SSID:
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index 18e654dc34c6..09313047beed 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -731,7 +731,6 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
u16 status_code, struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt;
- u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
int ret;
u16 capab;
struct ieee80211_ht_cap *ht_cap;
@@ -765,7 +764,7 @@ mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv,
memmove(pos + ETH_ALEN, &mgmt->u.action.category,
sizeof(mgmt->u.action.u.tdls_discover_resp));
/* init address 4 */
- memcpy(pos, bc_addr, ETH_ALEN);
+ eth_broadcast_addr(pos);
ret = mwifiex_tdls_append_rates_ie(priv, skb);
if (ret) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
index b920be1f5718..c6c1ce69bcbc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -17,10 +17,8 @@ mt76_wmac_probe(struct platform_device *pdev)
int ret;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get device IRQ\n");
+ if (irq < 0)
return irq;
- }
mem_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mem_base)) {
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index 9bfac9f1d47f..cada48800928 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -557,6 +557,9 @@ mt76_init_sband_2g(struct mt7601u_dev *dev)
{
dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g),
GFP_KERNEL);
+ if (!dev->sband_2g)
+ return -ENOMEM;
+
dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = dev->sband_2g;
WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index 89a7b1234ffb..72e608cc53af 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -351,7 +351,7 @@ mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta = params->sta;
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
- u16 *ssn = &params->ssn;
+ u16 ssn = params->ssn;
struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
WARN_ON(msta->wcid.idx > GROUP_WCID(0));
@@ -371,7 +371,7 @@ mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
break;
case IEEE80211_AMPDU_TX_START:
- msta->agg_ssn[tid] = *ssn << 4;
+ msta->agg_ssn[tid] = ssn << 4;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index e4e9344b6982..8ae318b5fe54 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -430,7 +430,7 @@ static int qtnf_pcie_suspend(struct device *dev)
struct qtnf_pcie_bus_priv *priv;
struct qtnf_bus *bus;
- bus = pci_get_drvdata(to_pci_dev(dev));
+ bus = dev_get_drvdata(dev);
if (!bus)
return -EFAULT;
@@ -443,7 +443,7 @@ static int qtnf_pcie_resume(struct device *dev)
struct qtnf_pcie_bus_priv *priv;
struct qtnf_bus *bus;
- bus = pci_get_drvdata(to_pci_dev(dev));
+ bus = dev_get_drvdata(dev);
if (!bus)
return -EFAULT;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index fdf0504b5f1d..0dfb55c69b73 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -1086,6 +1086,7 @@ static const struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0846, 0x9013) },
{ USB_DEVICE(0x0846, 0x9019) },
/* Planex */
+ { USB_DEVICE(0x2019, 0xed14) },
{ USB_DEVICE(0x2019, 0xed19) },
/* Ralink */
{ USB_DEVICE(0x148f, 0x3573) },
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
index ef5f51512212..4d4e3888ef20 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
@@ -65,26 +65,6 @@ struct rt2x00debug_intf {
* - crypto stats file
*/
struct dentry *driver_folder;
- struct dentry *driver_entry;
- struct dentry *chipset_entry;
- struct dentry *dev_flags;
- struct dentry *cap_flags;
- struct dentry *restart_hw;
- struct dentry *register_folder;
- struct dentry *csr_off_entry;
- struct dentry *csr_val_entry;
- struct dentry *eeprom_off_entry;
- struct dentry *eeprom_val_entry;
- struct dentry *bbp_off_entry;
- struct dentry *bbp_val_entry;
- struct dentry *rf_off_entry;
- struct dentry *rf_val_entry;
- struct dentry *rfcsr_off_entry;
- struct dentry *rfcsr_val_entry;
- struct dentry *queue_folder;
- struct dentry *queue_frame_dump_entry;
- struct dentry *queue_stats_entry;
- struct dentry *crypto_stats_entry;
/*
* The frame dump file only allows a single reader,
@@ -596,39 +576,34 @@ static const struct file_operations rt2x00debug_restart_hw = {
.llseek = generic_file_llseek,
};
-static struct dentry *rt2x00debug_create_file_driver(const char *name,
- struct rt2x00debug_intf
- *intf,
- struct debugfs_blob_wrapper
- *blob)
+static void rt2x00debug_create_file_driver(const char *name,
+ struct rt2x00debug_intf *intf,
+ struct debugfs_blob_wrapper *blob)
{
char *data;
data = kzalloc(3 * MAX_LINE_LENGTH, GFP_KERNEL);
if (!data)
- return NULL;
+ return;
blob->data = data;
data += sprintf(data, "driver:\t%s\n", intf->rt2x00dev->ops->name);
data += sprintf(data, "version:\t%s\n", DRV_VERSION);
blob->size = strlen(blob->data);
- return debugfs_create_blob(name, 0400, intf->driver_folder, blob);
+ debugfs_create_blob(name, 0400, intf->driver_folder, blob);
}
-static struct dentry *rt2x00debug_create_file_chipset(const char *name,
- struct rt2x00debug_intf
- *intf,
- struct
- debugfs_blob_wrapper
- *blob)
+static void rt2x00debug_create_file_chipset(const char *name,
+ struct rt2x00debug_intf *intf,
+ struct debugfs_blob_wrapper *blob)
{
const struct rt2x00debug *debug = intf->debug;
char *data;
data = kzalloc(9 * MAX_LINE_LENGTH, GFP_KERNEL);
if (!data)
- return NULL;
+ return;
blob->data = data;
data += sprintf(data, "rt chip:\t%04x\n", intf->rt2x00dev->chip.rt);
@@ -654,13 +629,15 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name,
blob->size = strlen(blob->data);
- return debugfs_create_blob(name, 0400, intf->driver_folder, blob);
+ debugfs_create_blob(name, 0400, intf->driver_folder, blob);
}
void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
{
const struct rt2x00debug *debug = rt2x00dev->ops->debugfs;
struct rt2x00debug_intf *intf;
+ struct dentry *queue_folder;
+ struct dentry *register_folder;
intf = kzalloc(sizeof(struct rt2x00debug_intf), GFP_KERNEL);
if (!intf) {
@@ -676,43 +653,27 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
debugfs_create_dir(intf->rt2x00dev->ops->name,
rt2x00dev->hw->wiphy->debugfsdir);
- intf->driver_entry =
- rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob);
+ rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob);
+ rt2x00debug_create_file_chipset("chipset", intf, &intf->chipset_blob);
+ debugfs_create_file("dev_flags", 0400, intf->driver_folder, intf,
+ &rt2x00debug_fop_dev_flags);
+ debugfs_create_file("cap_flags", 0400, intf->driver_folder, intf,
+ &rt2x00debug_fop_cap_flags);
+ debugfs_create_file("restart_hw", 0200, intf->driver_folder, intf,
+ &rt2x00debug_restart_hw);
- intf->chipset_entry =
- rt2x00debug_create_file_chipset("chipset",
- intf, &intf->chipset_blob);
-
- intf->dev_flags = debugfs_create_file("dev_flags", 0400,
- intf->driver_folder, intf,
- &rt2x00debug_fop_dev_flags);
-
- intf->cap_flags = debugfs_create_file("cap_flags", 0400,
- intf->driver_folder, intf,
- &rt2x00debug_fop_cap_flags);
-
- intf->restart_hw = debugfs_create_file("restart_hw", 0200,
- intf->driver_folder, intf,
- &rt2x00debug_restart_hw);
-
- intf->register_folder =
- debugfs_create_dir("register", intf->driver_folder);
+ register_folder = debugfs_create_dir("register", intf->driver_folder);
#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \
({ \
if (debug->__name.read) { \
- (__intf)->__name##_off_entry = \
- debugfs_create_u32(__stringify(__name) "_offset", \
- 0600, \
- (__intf)->register_folder, \
- &(__intf)->offset_##__name); \
+ debugfs_create_u32(__stringify(__name) "_offset", 0600, \
+ register_folder, \
+ &(__intf)->offset_##__name); \
\
- (__intf)->__name##_val_entry = \
- debugfs_create_file(__stringify(__name) "_value", \
- 0600, \
- (__intf)->register_folder, \
- (__intf), \
- &rt2x00debug_fop_##__name); \
+ debugfs_create_file(__stringify(__name) "_value", 0600, \
+ register_folder, (__intf), \
+ &rt2x00debug_fop_##__name); \
} \
})
@@ -724,26 +685,21 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
#undef RT2X00DEBUGFS_CREATE_REGISTER_ENTRY
- intf->queue_folder =
- debugfs_create_dir("queue", intf->driver_folder);
+ queue_folder = debugfs_create_dir("queue", intf->driver_folder);
- intf->queue_frame_dump_entry =
- debugfs_create_file("dump", 0400, intf->queue_folder,
- intf, &rt2x00debug_fop_queue_dump);
+ debugfs_create_file("dump", 0400, queue_folder, intf,
+ &rt2x00debug_fop_queue_dump);
skb_queue_head_init(&intf->frame_dump_skbqueue);
init_waitqueue_head(&intf->frame_dump_waitqueue);
- intf->queue_stats_entry =
- debugfs_create_file("queue", 0400, intf->queue_folder,
- intf, &rt2x00debug_fop_queue_stats);
+ debugfs_create_file("queue", 0400, queue_folder, intf,
+ &rt2x00debug_fop_queue_stats);
#ifdef CONFIG_RT2X00_LIB_CRYPTO
if (rt2x00_has_cap_hw_crypto(rt2x00dev))
- intf->crypto_stats_entry =
- debugfs_create_file("crypto", 0444, intf->queue_folder,
- intf,
- &rt2x00debug_fop_crypto_stats);
+ debugfs_create_file("crypto", 0444, queue_folder, intf,
+ &rt2x00debug_fop_crypto_stats);
#endif
return;
@@ -758,29 +714,7 @@ void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
skb_queue_purge(&intf->frame_dump_skbqueue);
-#ifdef CONFIG_RT2X00_LIB_CRYPTO
- debugfs_remove(intf->crypto_stats_entry);
-#endif
- debugfs_remove(intf->queue_stats_entry);
- debugfs_remove(intf->queue_frame_dump_entry);
- debugfs_remove(intf->queue_folder);
- debugfs_remove(intf->rfcsr_val_entry);
- debugfs_remove(intf->rfcsr_off_entry);
- debugfs_remove(intf->rf_val_entry);
- debugfs_remove(intf->rf_off_entry);
- debugfs_remove(intf->bbp_val_entry);
- debugfs_remove(intf->bbp_off_entry);
- debugfs_remove(intf->eeprom_val_entry);
- debugfs_remove(intf->eeprom_off_entry);
- debugfs_remove(intf->csr_val_entry);
- debugfs_remove(intf->csr_off_entry);
- debugfs_remove(intf->register_folder);
- debugfs_remove(intf->dev_flags);
- debugfs_remove(intf->restart_hw);
- debugfs_remove(intf->cap_flags);
- debugfs_remove(intf->chipset_entry);
- debugfs_remove(intf->driver_entry);
- debugfs_remove(intf->driver_folder);
+ debugfs_remove_recursive(intf->driver_folder);
kfree(intf->chipset_blob.data);
kfree(intf->driver_blob.data);
kfree(intf);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 7e3a621b9c0d..bc2dfef0de22 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -349,8 +349,7 @@ static void rt2x00usb_work_rxdone(struct work_struct *work)
while (!rt2x00queue_empty(rt2x00dev->rx)) {
entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE);
- if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
- !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
+ if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
break;
/*
@@ -389,8 +388,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
rt2x00lib_dmadone(entry);
/*
- * Schedule the delayed work for reading the RX status
- * from the device.
+ * Schedule the delayed work for processing RX data
*/
queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
}
@@ -402,8 +400,7 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
struct queue_entry_priv_usb *entry_priv = entry->priv_data;
int status;
- if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
- test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
+ if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
return false;
rt2x00lib_dmastart(entry);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index 3adb1d3d47ac..ceffe05bd65b 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -1525,7 +1525,7 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
/*
* WLAN action by PTA
*/
- rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
+ rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x0c);
/*
* BT select S0/S1 controlled by WiFi
@@ -1568,9 +1568,14 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.ant_sel_rsv));
/*
- * 0x280, 0x00, 0x200, 0x80 - not clear
+ * Different settings per different antenna position.
+ * Antenna Position: | Normal Inverse
+ * --------------------------------------------------
+ * Antenna switch to BT: | 0x280, 0x00
+ * Antenna switch to WiFi: | 0x0, 0x280
+ * Antenna switch to PTA: | 0x200, 0x80
*/
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x80);
/*
* Software control, antenna at WiFi side
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 8136e268b4e6..c6c41fb962ff 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -3891,12 +3891,13 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/* Check if MAC is already powered on */
val8 = rtl8xxxu_read8(priv, REG_CR);
+ val16 = rtl8xxxu_read16(priv, REG_SYS_CLKR);
/*
* Fix 92DU-VC S3 hang with the reason is that secondary mac is not
* initialized. First MAC returns 0xea, second MAC returns 0x00
*/
- if (val8 == 0xea)
+ if (val8 == 0xea || !(val16 & SYS_CLK_MAC_CLK_ENABLE))
macpower = false;
else
macpower = true;
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index 152242ac0aa5..191dafd03189 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -509,13 +509,7 @@ static u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist)
static s32 halbtc_get_wifi_rssi(struct rtl_priv *rtlpriv)
{
- int undec_sm_pwdb = 0;
-
- if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
- undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- else /* associated entry pwdb */
- undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- return undec_sm_pwdb;
+ return rtlpriv->dm.undec_sm_pwdb;
}
static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 4055e0ab75ba..7d96fe5f1a44 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -2409,8 +2409,7 @@ EXPORT_SYMBOL(rtl_pci_disconnect);
****************************************/
int rtl_pci_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->cfg->ops->hw_suspend(hw);
@@ -2422,8 +2421,7 @@ EXPORT_SYMBOL(rtl_pci_suspend);
int rtl_pci_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtlpriv->cfg->ops->hw_resume(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 6ccb5b93a595..c10432cd703e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -276,22 +276,6 @@ static void _rtl_reg_apply_world_flags(struct wiphy *wiphy,
return;
}
-static void _rtl_dump_channel_map(struct wiphy *wiphy)
-{
- enum nl80211_band band;
- struct ieee80211_supported_band *sband;
- struct ieee80211_channel *ch;
- unsigned int i;
-
- for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!wiphy->bands[band])
- continue;
- sband = wiphy->bands[band];
- for (i = 0; i < sband->n_channels; i++)
- ch = &sband->channels[i];
- }
-}
-
static int _rtl_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request,
struct rtl_regulatory *reg)
@@ -309,8 +293,6 @@ static int _rtl_reg_notifier_apply(struct wiphy *wiphy,
break;
}
- _rtl_dump_channel_map(wiphy);
-
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index 85360353f557..333e355c9281 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -1411,12 +1411,13 @@ void rtl88e_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw,
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw));
struct fast_ant_training *pfat_table = &rtldm->fat_table;
+ __le32 *pdesc32 = (__le32 *)pdesc;
if ((rtlefuse->antenna_div_type == CG_TRX_HW_ANTDIV) ||
(rtlefuse->antenna_div_type == CG_TRX_SMART_ANTDIV)) {
- SET_TX_DESC_ANTSEL_A(pdesc, pfat_table->antsel_a[mac_id]);
- SET_TX_DESC_ANTSEL_B(pdesc, pfat_table->antsel_b[mac_id]);
- SET_TX_DESC_ANTSEL_C(pdesc, pfat_table->antsel_c[mac_id]);
+ set_tx_desc_antsel_a(pdesc32, pfat_table->antsel_a[mac_id]);
+ set_tx_desc_antsel_b(pdesc32, pfat_table->antsel_b[mac_id]);
+ set_tx_desc_antsel_c(pdesc32, pfat_table->antsel_c[mac_id]);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index eab48fed61ed..a0eda51e833c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -115,10 +115,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
- rtlpriv->cfg->mod_params->sw_crypto =
- rtlpriv->cfg->mod_params->sw_crypto;
- rtlpriv->cfg->mod_params->disable_watchdog =
- rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
if (!rtlpriv->psc.inactiveps)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index 483dc8bdc555..aa2e9e88be53 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -25,7 +25,7 @@ static u8 _rtl88ee_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
}
static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw,
- struct rtl_stats *pstatus, u8 *pdesc,
+ struct rtl_stats *pstatus, __le32 *pdesc,
struct rx_fwinfo_88e *p_drvinfo,
bool bpacket_match_bssid,
bool bpacket_toself, bool packet_beacon)
@@ -271,7 +271,7 @@ static void _rtl88ee_smart_antenna(struct ieee80211_hw *hw,
static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct rtl_stats *pstatus,
- u8 *pdesc,
+ __le32 *pdesc,
struct rx_fwinfo_88e *p_drvinfo)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -313,13 +313,13 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
rtl_process_phyinfo(hw, tmp_buf, pstatus);
}
-static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
- u8 *virtualaddress)
+static void rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
+ __le32 *virtualaddress)
{
u32 dwtmp = 0;
memset(virtualaddress, 0, 8);
- SET_EARLYMODE_PKTNUM(virtualaddress, ptcb_desc->empkt_num);
+ set_earlymode_pktnum(virtualaddress, ptcb_desc->empkt_num);
if (ptcb_desc->empkt_num == 1) {
dwtmp = ptcb_desc->empkt_len[0];
} else {
@@ -327,7 +327,7 @@ static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4;
dwtmp += ptcb_desc->empkt_len[1];
}
- SET_EARLYMODE_LEN0(virtualaddress, dwtmp);
+ set_earlymode_len0(virtualaddress, dwtmp);
if (ptcb_desc->empkt_num <= 3) {
dwtmp = ptcb_desc->empkt_len[2];
@@ -336,7 +336,7 @@ static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4;
dwtmp += ptcb_desc->empkt_len[3];
}
- SET_EARLYMODE_LEN1(virtualaddress, dwtmp);
+ set_earlymode_len1(virtualaddress, dwtmp);
if (ptcb_desc->empkt_num <= 5) {
dwtmp = ptcb_desc->empkt_len[4];
} else {
@@ -344,8 +344,8 @@ static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4;
dwtmp += ptcb_desc->empkt_len[5];
}
- SET_EARLYMODE_LEN2_1(virtualaddress, dwtmp & 0xF);
- SET_EARLYMODE_LEN2_2(virtualaddress, dwtmp >> 4);
+ set_earlymode_len2_1(virtualaddress, dwtmp & 0xF);
+ set_earlymode_len2_2(virtualaddress, dwtmp >> 4);
if (ptcb_desc->empkt_num <= 7) {
dwtmp = ptcb_desc->empkt_len[6];
} else {
@@ -353,7 +353,7 @@ static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4;
dwtmp += ptcb_desc->empkt_len[7];
}
- SET_EARLYMODE_LEN3(virtualaddress, dwtmp);
+ set_earlymode_len3(virtualaddress, dwtmp);
if (ptcb_desc->empkt_num <= 9) {
dwtmp = ptcb_desc->empkt_len[8];
} else {
@@ -361,50 +361,51 @@ static void _rtl88ee_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
dwtmp += ((dwtmp%4) ? (4-dwtmp%4) : 0)+4;
dwtmp += ptcb_desc->empkt_len[9];
}
- SET_EARLYMODE_LEN4(virtualaddress, dwtmp);
+ set_earlymode_len4(virtualaddress, dwtmp);
}
bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_stats *status,
struct ieee80211_rx_status *rx_status,
- u8 *pdesc, struct sk_buff *skb)
+ u8 *pdesc8, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rx_fwinfo_88e *p_drvinfo;
struct ieee80211_hdr *hdr;
u8 wake_match;
- u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+ __le32 *pdesc = (__le32 *)pdesc8;
+ u32 phystatus = get_rx_desc_physt(pdesc);
- status->packet_report_type = (u8)GET_RX_STATUS_DESC_RPT_SEL(pdesc);
+ status->packet_report_type = (u8)get_rx_status_desc_rpt_sel(pdesc);
if (status->packet_report_type == TX_REPORT2)
- status->length = (u16)GET_RX_RPT2_DESC_PKT_LEN(pdesc);
+ status->length = (u16)get_rx_rpt2_desc_pkt_len(pdesc);
else
- status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc);
- status->rx_drvinfo_size = (u8)GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ status->length = (u16)get_rx_desc_pkt_len(pdesc);
+ status->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) *
RX_DRV_INFO_SIZE_UNIT;
- status->rx_bufshift = (u8)(GET_RX_DESC_SHIFT(pdesc) & 0x03);
- status->icv = (u16)GET_RX_DESC_ICV(pdesc);
- status->crc = (u16)GET_RX_DESC_CRC32(pdesc);
+ status->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03);
+ status->icv = (u16)get_rx_desc_icv(pdesc);
+ status->crc = (u16)get_rx_desc_crc32(pdesc);
status->hwerror = (status->crc | status->icv);
- status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
- status->rate = (u8)GET_RX_DESC_RXMCS(pdesc);
- status->shortpreamble = (u16)GET_RX_DESC_SPLCP(pdesc);
- status->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
- status->isfirst_ampdu = (bool)((GET_RX_DESC_PAGGR(pdesc) == 1) &&
- (GET_RX_DESC_FAGGR(pdesc) == 1));
+ status->decrypted = !get_rx_desc_swdec(pdesc);
+ status->rate = (u8)get_rx_desc_rxmcs(pdesc);
+ status->shortpreamble = (u16)get_rx_desc_splcp(pdesc);
+ status->isampdu = (bool) (get_rx_desc_paggr(pdesc) == 1);
+ status->isfirst_ampdu = (bool)((get_rx_desc_paggr(pdesc) == 1) &&
+ (get_rx_desc_faggr(pdesc) == 1));
if (status->packet_report_type == NORMAL_RX)
- status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
- status->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc);
- status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
+ status->timestamp_low = get_rx_desc_tsfl(pdesc);
+ status->rx_is40mhzpacket = (bool)get_rx_desc_bw(pdesc);
+ status->is_ht = (bool)get_rx_desc_rxht(pdesc);
status->is_cck = RTL8188_RX_HAL_IS_CCK_RATE(status->rate);
- status->macid = GET_RX_DESC_MACID(pdesc);
- if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
+ status->macid = get_rx_desc_macid(pdesc);
+ if (get_rx_status_desc_pattern_match(pdesc))
wake_match = BIT(2);
- else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
+ else if (get_rx_status_desc_magic_match(pdesc))
wake_match = BIT(1);
- else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
+ else if (get_rx_status_desc_unicast_match(pdesc))
wake_match = BIT(0);
else
wake_match = 0;
@@ -465,15 +466,15 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
rx_status->signal = status->recvsignalpower + 10;
if (status->packet_report_type == TX_REPORT2) {
status->macid_valid_entry[0] =
- GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
+ get_rx_rpt2_desc_macid_valid_1(pdesc);
status->macid_valid_entry[1] =
- GET_RX_RPT2_DESC_MACID_VALID_2(pdesc);
+ get_rx_rpt2_desc_macid_valid_2(pdesc);
}
return true;
}
void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_hdr *hdr, u8 *pdesc8,
u8 *txbd, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
@@ -484,7 +485,6 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- u8 *pdesc = (u8 *)pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
unsigned int buf_len = 0;
@@ -497,6 +497,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
dma_addr_t mapping;
u8 bw_40 = 0;
u8 short_gi = 0;
+ __le32 *pdesc = (u32 *)pdesc8;
if (mac->opmode == NL80211_IFTYPE_STATION) {
bw_40 = mac->bw_40;
@@ -521,77 +522,77 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
"DMA mapping error\n");
return;
}
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_88e));
+ clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_88e));
if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
firstseg = true;
lastseg = true;
}
if (firstseg) {
if (rtlhal->earlymode_enable) {
- SET_TX_DESC_PKT_OFFSET(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN +
+ set_tx_desc_pkt_offset(pdesc, 1);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN +
EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"Insert 8 byte.pTcb->EMPktNum:%d\n",
ptcb_desc->empkt_num);
- _rtl88ee_insert_emcontent(ptcb_desc,
- (u8 *)(skb->data));
+ rtl88ee_insert_emcontent(ptcb_desc,
+ (__le32 *)(skb->data));
}
} else {
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
}
ptcb_desc->use_driver_rate = true;
- SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate);
+ set_tx_desc_tx_rate(pdesc, ptcb_desc->hw_rate);
if (ptcb_desc->hw_rate > DESC92C_RATEMCS0)
short_gi = (ptcb_desc->use_shortgi) ? 1 : 0;
else
short_gi = (ptcb_desc->use_shortpreamble) ? 1 : 0;
- SET_TX_DESC_DATA_SHORTGI(pdesc, short_gi);
+ set_tx_desc_data_shortgi(pdesc, short_gi);
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- SET_TX_DESC_AGG_ENABLE(pdesc, 1);
- SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+ set_tx_desc_agg_enable(pdesc, 1);
+ set_tx_desc_max_agg_num(pdesc, 0x14);
}
- SET_TX_DESC_SEQ(pdesc, seq_number);
- SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcb_desc->rts_enable &&
+ set_tx_desc_seq(pdesc, seq_number);
+ set_tx_desc_rts_enable(pdesc, ((ptcb_desc->rts_enable &&
!ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_HW_RTS_ENABLE(pdesc, 0);
- SET_TX_DESC_CTS2SELF(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_RTS_STBC(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
-
- SET_TX_DESC_RTS_RATE(pdesc, ptcb_desc->rts_rate);
- SET_TX_DESC_RTS_BW(pdesc, 0);
- SET_TX_DESC_RTS_SC(pdesc, ptcb_desc->rts_sc);
- SET_TX_DESC_RTS_SHORT(pdesc,
+ set_tx_desc_hw_rts_enable(pdesc, 0);
+ set_tx_desc_cts2self(pdesc, ((ptcb_desc->cts_enable) ? 1 : 0));
+ set_tx_desc_rts_stbc(pdesc, ((ptcb_desc->rts_stbc) ? 1 : 0));
+
+ set_tx_desc_rts_rate(pdesc, ptcb_desc->rts_rate);
+ set_tx_desc_rts_bw(pdesc, 0);
+ set_tx_desc_rts_sc(pdesc, ptcb_desc->rts_sc);
+ set_tx_desc_rts_short(pdesc,
((ptcb_desc->rts_rate <= DESC92C_RATE54M) ?
(ptcb_desc->rts_use_shortpreamble ? 1 : 0) :
(ptcb_desc->rts_use_shortgi ? 1 : 0)));
if (ptcb_desc->tx_enable_sw_calc_duration)
- SET_TX_DESC_NAV_USE_HDR(pdesc, 1);
+ set_tx_desc_nav_use_hdr(pdesc, 1);
if (bw_40) {
if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) {
- SET_TX_DESC_DATA_BW(pdesc, 1);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+ set_tx_desc_data_bw(pdesc, 1);
+ set_tx_desc_tx_sub_carrier(pdesc, 3);
} else {
- SET_TX_DESC_DATA_BW(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc,
+ set_tx_desc_data_bw(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc,
mac->cur_40_prime_sc);
}
} else {
- SET_TX_DESC_DATA_BW(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ set_tx_desc_data_bw(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc, 0);
}
- SET_TX_DESC_LINIP(pdesc, 0);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16)skb_len);
+ set_tx_desc_linip(pdesc, 0);
+ set_tx_desc_pkt_size(pdesc, (u16)skb_len);
if (sta) {
u8 ampdu_density = sta->ht_cap.ampdu_density;
- SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+ set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
if (info->control.hw_key) {
struct ieee80211_key_conf *keyconf;
@@ -601,76 +602,77 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ set_tx_desc_sec_type(pdesc, 0x1);
break;
case WLAN_CIPHER_SUITE_CCMP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ set_tx_desc_sec_type(pdesc, 0x3);
break;
default:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ set_tx_desc_sec_type(pdesc, 0x0);
break;
}
}
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
- SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
- SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
- SET_TX_DESC_DISABLE_FB(pdesc, ptcb_desc->disable_ratefallback ?
+ set_tx_desc_queue_sel(pdesc, fw_qsel);
+ set_tx_desc_data_rate_fb_limit(pdesc, 0x1F);
+ set_tx_desc_rts_rate_fb_limit(pdesc, 0xF);
+ set_tx_desc_disable_fb(pdesc, ptcb_desc->disable_ratefallback ?
1 : 0);
- SET_TX_DESC_USE_RATE(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
+ set_tx_desc_use_rate(pdesc, ptcb_desc->use_driver_rate ? 1 : 0);
- /*SET_TX_DESC_PWR_STATUS(pdesc, pwr_status);*/
+ /*set_tx_desc_pwr_status(pdesc, pwr_status);*/
/* Set TxRate and RTSRate in TxDesc */
/* This prevent Tx initial rate of new-coming packets */
/* from being overwritten by retried packet rate.*/
if (!ptcb_desc->use_driver_rate) {
- /*SET_TX_DESC_RTS_RATE(pdesc, 0x08); */
- /* SET_TX_DESC_TX_RATE(pdesc, 0x0b); */
+ /*set_tx_desc_rts_rate(pdesc, 0x08); */
+ /* set_tx_desc_tx_rate(pdesc, 0x0b); */
}
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"Enable RDG function.\n");
- SET_TX_DESC_RDG_ENABLE(pdesc, 1);
- SET_TX_DESC_HTC(pdesc, 1);
+ set_tx_desc_rdg_enable(pdesc, 1);
+ set_tx_desc_htc(pdesc, 1);
}
}
}
- SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
- SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)buf_len);
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0));
+ set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0));
+ set_tx_desc_tx_buffer_size(pdesc, (u16)buf_len);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
if (rtlpriv->dm.useramask) {
- SET_TX_DESC_RATE_ID(pdesc, ptcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, ptcb_desc->mac_id);
+ set_tx_desc_rate_id(pdesc, ptcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, ptcb_desc->mac_id);
} else {
- SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, ptcb_desc->ratr_index);
+ set_tx_desc_rate_id(pdesc, 0xC + ptcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, ptcb_desc->ratr_index);
}
if (ieee80211_is_data_qos(fc))
- SET_TX_DESC_QOS(pdesc, 1);
+ set_tx_desc_qos(pdesc, 1);
if (!ieee80211_is_data_qos(fc))
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
- SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
+ set_tx_desc_hwseq_en(pdesc, 1);
+ set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
- SET_TX_DESC_BMC(pdesc, 1);
+ set_tx_desc_bmc(pdesc, 1);
}
- rtl88e_dm_set_tx_ant_by_tx_info(hw, pdesc, ptcb_desc->mac_id);
+ rtl88e_dm_set_tx_ant_by_tx_info(hw, pdesc8, ptcb_desc->mac_id);
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
- u8 *pdesc, bool firstseg,
+ u8 *pdesc8, bool firstseg,
bool lastseg, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 fw_queue = QSLT_BEACON;
+ __le32 *pdesc = (__le32 *)pdesc8;
dma_addr_t mapping = pci_map_single(rtlpci->pdev,
skb->data, skb->len,
@@ -684,58 +686,60 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
"DMA mapping error\n");
return;
}
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+ clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
if (firstseg)
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
- SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
+ set_tx_desc_tx_rate(pdesc, DESC92C_RATE1M);
- SET_TX_DESC_SEQ(pdesc, 0);
+ set_tx_desc_seq(pdesc, 0);
- SET_TX_DESC_LINIP(pdesc, 0);
+ set_tx_desc_linip(pdesc, 0);
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+ set_tx_desc_queue_sel(pdesc, fw_queue);
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)(skb->len));
+ set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len));
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
- SET_TX_DESC_RATE_ID(pdesc, 7);
- SET_TX_DESC_MACID(pdesc, 0);
+ set_tx_desc_rate_id(pdesc, 7);
+ set_tx_desc_macid(pdesc, 0);
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16)(skb->len));
+ set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, 0x20);
+ set_tx_desc_offset(pdesc, 0x20);
- SET_TX_DESC_USE_RATE(pdesc, 1);
+ set_tx_desc_use_rate(pdesc, 1);
if (!ieee80211_is_data_qos(fc))
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
+ set_tx_desc_hwseq_en(pdesc, 1);
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
"H2C Tx Cmd Content\n",
pdesc, TX_DESC_SIZE);
}
-void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
+void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc8,
bool istx, u8 desc_name, u8 *val)
{
+ __le32 *pdesc = (__le32 *)pdesc8;
+
if (istx == true) {
switch (desc_name) {
case HW_DESC_OWN:
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
break;
case HW_DESC_TX_NEXTDESC_ADDR:
- SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val);
+ set_tx_desc_next_desc_address(pdesc, *(u32 *)val);
break;
default:
WARN_ONCE(true, "rtl8188ee: ERR txdesc :%d not processed\n",
@@ -745,16 +749,16 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
} else {
switch (desc_name) {
case HW_DESC_RXOWN:
- SET_RX_DESC_OWN(pdesc, 1);
+ set_rx_desc_own(pdesc, 1);
break;
case HW_DESC_RXBUFF_ADDR:
- SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *)val);
+ set_rx_desc_buff_addr(pdesc, *(u32 *)val);
break;
case HW_DESC_RXPKT_LEN:
- SET_RX_DESC_PKT_LEN(pdesc, *(u32 *)val);
+ set_rx_desc_pkt_len(pdesc, *(u32 *)val);
break;
case HW_DESC_RXERO:
- SET_RX_DESC_EOR(pdesc, 1);
+ set_rx_desc_eor(pdesc, 1);
break;
default:
WARN_ONCE(true, "rtl8188ee: ERR rxdesc :%d not processed\n",
@@ -765,17 +769,18 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
}
u64 rtl88ee_get_desc(struct ieee80211_hw *hw,
- u8 *pdesc, bool istx, u8 desc_name)
+ u8 *pdesc8, bool istx, u8 desc_name)
{
u32 ret = 0;
+ __le32 *pdesc = (__le32 *)pdesc8;
if (istx == true) {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_TX_DESC_OWN(pdesc);
+ ret = get_tx_desc_own(pdesc);
break;
case HW_DESC_TXBUFF_ADDR:
- ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
+ ret = get_tx_desc_tx_buffer_address(pdesc);
break;
default:
WARN_ONCE(true, "rtl8188ee: ERR txdesc :%d not processed\n",
@@ -785,13 +790,13 @@ u64 rtl88ee_get_desc(struct ieee80211_hw *hw,
} else {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_DESC_OWN(pdesc);
+ ret = get_rx_desc_own(pdesc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_DESC_PKT_LEN(pdesc);
+ ret = get_rx_desc_pkt_len(pdesc);
break;
case HW_DESC_RXBUFF_ADDR:
- ret = GET_RX_DESC_BUFF_ADDR(pdesc);
+ ret = get_rx_desc_buff_addr(pdesc);
break;
default:
WARN_ONCE(true, "rtl8188ee: ERR rxdesc :%d not processed\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
index c29d9bfa5bd4..bd862732d6ae 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
@@ -14,505 +14,545 @@
#define USB_HWDESC_HEADER_LEN 32
#define CRCLENGTH 4
-#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
-#define SET_TX_DESC_OFFSET(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
-#define SET_TX_DESC_BMC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
-#define SET_TX_DESC_HTC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
-#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
-#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
-#define SET_TX_DESC_LINIP(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
-#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
-#define SET_TX_DESC_GF(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
-#define SET_TX_DESC_OWN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
-
-#define GET_TX_DESC_PKT_SIZE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 0, 16)
-#define GET_TX_DESC_OFFSET(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 16, 8)
-#define GET_TX_DESC_BMC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 24, 1)
-#define GET_TX_DESC_HTC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 25, 1)
-#define GET_TX_DESC_LAST_SEG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 26, 1)
-#define GET_TX_DESC_FIRST_SEG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 27, 1)
-#define GET_TX_DESC_LINIP(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 28, 1)
-#define GET_TX_DESC_NO_ACM(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 29, 1)
-#define GET_TX_DESC_GF(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 30, 1)
-#define GET_TX_DESC_OWN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 31, 1)
-
-#define SET_TX_DESC_MACID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 6, __val)
-#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
-#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
-#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
-#define SET_TX_DESC_PIFS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
-#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val)
-#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val)
-#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
-#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
-#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 26, 5, __val)
-#define SET_TX_DESC_PADDING_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val)
-
-#define GET_TX_DESC_MACID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 0, 5)
-#define GET_TX_DESC_AGG_ENABLE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 5, 1)
-#define GET_TX_DESC_AGG_BREAK(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 6, 1)
-#define GET_TX_DESC_RDG_ENABLE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 7, 1)
-#define GET_TX_DESC_QUEUE_SEL(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 8, 5)
-#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
-#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
-#define GET_TX_DESC_PIFS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
-#define GET_TX_DESC_RATE_ID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
-#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
-#define GET_TX_DESC_EN_DESC_ID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
-#define GET_TX_DESC_SEC_TYPE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 22, 2)
-#define GET_TX_DESC_PKT_OFFSET(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 24, 8)
-
-#define SET_TX_DESC_RTS_RC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 6, __val)
-#define SET_TX_DESC_DATA_RC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 6, 6, __val)
-#define SET_TX_DESC_AGG_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 12, 1, __val)
-#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 13, 1, __val)
-#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
-#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 16, 1, __val)
-#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
-#define SET_TX_DESC_RAW(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
-#define SET_TX_DESC_CCX(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
-#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
-#define SET_TX_DESC_BT_INT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 23, 1, __val)
-#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 1, __val)
-#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 25, 1, __val)
-#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 26, 2, __val)
-#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 28, 2, __val)
-#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+8, 30, 2, __val)
-
-#define GET_TX_DESC_RTS_RC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 0, 6)
-#define GET_TX_DESC_DATA_RC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 6, 6)
-#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 14, 2)
-#define GET_TX_DESC_MORE_FRAG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 17, 1)
-#define GET_TX_DESC_RAW(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 18, 1)
-#define GET_TX_DESC_CCX(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 19, 1)
-#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 20, 3)
-#define GET_TX_DESC_ANTSEL_A(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 24, 1)
-#define GET_TX_DESC_ANTSEL_B(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 25, 1)
-#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 26, 2)
-#define GET_TX_DESC_TX_ANTL(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 28, 2)
-#define GET_TX_DESC_TX_ANT_HT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 30, 2)
-
-#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 8, __val)
-#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 8, __val)
-#define SET_TX_DESC_SEQ(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 12, __val)
-#define SET_TX_DESC_CPU_HANDLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 28, 1, __val)
-#define SET_TX_DESC_TAG1(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 29, 1, __val)
-#define SET_TX_DESC_TRIGGER_INT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 30, 1, __val)
-#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+12, 31, 1, __val)
-
-#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 0, 8)
-#define GET_TX_DESC_TAIL_PAGE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 8, 8)
-#define GET_TX_DESC_SEQ(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 16, 12)
-
-#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val)
-#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 5, 1, __val)
-#define SET_TX_DESC_QOS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 6, 1, __val)
-#define SET_TX_DESC_HWSEQ_SSN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val)
-#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 1, __val)
-#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 9, 1, __val)
-#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 10, 1, __val)
-#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 11, 1, __val)
-#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 12, 1, __val)
-#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 1, __val)
-#define SET_TX_DESC_PORT_ID(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 14, 1, __val)
-#define SET_TX_DESC_PWR_STATUS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 15, 3, __val)
-#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 1, __val)
-#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 19, 1, __val)
-#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 20, 2, __val)
-#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 22, 2, __val)
-#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 1, __val)
-#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 25, 1, __val)
-#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 26, 1, __val)
-#define SET_TX_DESC_RTS_BW(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 27, 1, __val)
-#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 28, 2, __val)
-#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val)
-
-#define GET_TX_DESC_RTS_RATE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 0, 5)
-#define GET_TX_DESC_AP_DCFE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 5, 1)
-#define GET_TX_DESC_QOS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 6, 1)
-#define GET_TX_DESC_HWSEQ_EN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 7, 1)
-#define GET_TX_DESC_USE_RATE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 8, 1)
-#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 9, 1)
-#define GET_TX_DESC_DISABLE_FB(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 10, 1)
-#define GET_TX_DESC_CTS2SELF(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 11, 1)
-#define GET_TX_DESC_RTS_ENABLE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 12, 1)
-#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 13, 1)
-#define GET_TX_DESC_PORT_ID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 14, 1)
-#define GET_TX_DESC_WAIT_DCTS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 18, 1)
-#define GET_TX_DESC_CTS2AP_EN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 19, 1)
-#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 20, 2)
-#define GET_TX_DESC_TX_STBC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 22, 2)
-#define GET_TX_DESC_DATA_SHORT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 24, 1)
-#define GET_TX_DESC_DATA_BW(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 25, 1)
-#define GET_TX_DESC_RTS_SHORT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 26, 1)
-#define GET_TX_DESC_RTS_BW(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 27, 1)
-#define GET_TX_DESC_RTS_SC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 28, 2)
-#define GET_TX_DESC_RTS_STBC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 30, 2)
-
-#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 6, __val)
-#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 6, 1, __val)
-#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
-#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 5, __val)
-#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
-#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 17, 1, __val)
-#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 18, 6, __val)
-#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 8, __val)
-
-#define GET_TX_DESC_TX_RATE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 0, 6)
-#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 6, 1)
-#define GET_TX_DESC_CCX_TAG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 7, 1)
-#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 8, 5)
-#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 13, 4)
-#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 17, 1)
-#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 18, 6)
-#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 24, 8)
-
-#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 5, __val)
-#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 5, 5, __val)
-#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 10, 1, __val)
-#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 11, 5, __val)
-#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 4, __val)
-#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 20, 4, __val)
-#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 24, 4, __val)
-#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 28, 4, __val)
-
-#define GET_TX_DESC_TXAGC_A(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 0, 5)
-#define GET_TX_DESC_TXAGC_B(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 5, 5)
-#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 10, 1)
-#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 11, 5)
-#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 16, 4)
-#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 20, 4)
-#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 24, 4)
-#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 28, 4)
-
-#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
-#define SET_TX_DESC_SW_OFFSET30(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 16, 8, __val)
-#define SET_TX_DESC_SW_OFFSET31(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 4, __val)
-#define SET_TX_DESC_ANTSEL_C(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 29, 1, __val)
-#define SET_TX_DESC_NULL_0(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 30, 1, __val)
-#define SET_TX_DESC_NULL_1(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 30, 1, __val)
-
-#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
-
-#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 32, __val)
-#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val)
-
-#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+32, 0, 32)
-#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+36, 0, 32)
-
-#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
-#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+44, 0, 32, __val)
-
-#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
-#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+44, 0, 32)
-
-#define GET_RX_DESC_PKT_LEN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 0, 14)
-#define GET_RX_DESC_CRC32(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 14, 1)
-#define GET_RX_DESC_ICV(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 15, 1)
-#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 16, 4)
-#define GET_RX_DESC_SECURITY(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 20, 3)
-#define GET_RX_DESC_QOS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 23, 1)
-#define GET_RX_DESC_SHIFT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 24, 2)
-#define GET_RX_DESC_PHYST(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 26, 1)
-#define GET_RX_DESC_SWDEC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 27, 1)
-#define GET_RX_DESC_LS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 28, 1)
-#define GET_RX_DESC_FS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 29, 1)
-#define GET_RX_DESC_EOR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 30, 1)
-#define GET_RX_DESC_OWN(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc, 31, 1)
-
-#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
-#define SET_RX_DESC_EOR(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
-#define SET_RX_DESC_OWN(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
-
-#define GET_RX_DESC_MACID(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 0, 6)
-#define GET_RX_DESC_PAGGR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
-#define GET_RX_DESC_FAGGR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
-#define GET_RX_DESC_A1_FIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
-#define GET_RX_DESC_A2_FIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 20, 4)
-#define GET_RX_DESC_PAM(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
-#define GET_RX_DESC_PWR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
-#define GET_RX_DESC_MD(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
-#define GET_RX_DESC_MF(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
-#define GET_RX_DESC_TYPE(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
-#define GET_RX_DESC_MC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
-#define GET_RX_DESC_BC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
-#define GET_RX_DESC_SEQ(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
-#define GET_RX_DESC_FRAG(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
-
-#define GET_RX_DESC_RXMCS(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 0, 6)
-#define GET_RX_DESC_RXHT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
-#define GET_RX_STATUS_DESC_RX_GF(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 7, 1)
-#define GET_RX_DESC_SPLCP(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 8, 1)
-#define GET_RX_DESC_BW(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 9, 1)
-#define GET_RX_DESC_HTC(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
-#define GET_RX_STATUS_DESC_EOSP(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 11, 1)
-#define GET_RX_STATUS_DESC_BSSID_FIT(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 12, 2)
-#define GET_RX_STATUS_DESC_RPT_SEL(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 14, 2)
-
-#define GET_RX_STATUS_DESC_PATTERN_MATCH(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 29, 1)
-#define GET_RX_STATUS_DESC_UNICAST_MATCH(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 30, 1)
-#define GET_RX_STATUS_DESC_MAGIC_MATCH(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+12, 31, 1)
-
-#define GET_RX_DESC_IV1(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+16, 0, 32)
-#define GET_RX_DESC_TSFL(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
-
-#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
-#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
- LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
-
-#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
-#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
- SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
+static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(23, 16));
+}
+
+static inline void set_tx_desc_bmc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(24));
+}
+
+static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(25));
+}
+
+static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(26));
+}
+
+static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(27));
+}
+
+static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(28));
+}
+
+static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline int get_tx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
+
+static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, GENMASK(5, 0));
+}
+
+static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, GENMASK(19, 16));
+}
+
+static inline void set_tx_desc_nav_use_hdr(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, BIT(20));
+}
+
+static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, GENMASK(23, 22));
+}
+
+static inline void set_tx_desc_pkt_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 1, __val, GENMASK(30, 26));
+}
+
+static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, BIT(12));
+}
+
+static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, BIT(13));
+}
+
+static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, BIT(17));
+}
+
+static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, GENMASK(22, 20));
+}
+
+static inline void set_tx_desc_antsel_a(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, BIT(24));
+}
+
+static inline void set_tx_desc_antsel_b(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 2, __val, BIT(25));
+}
+
+static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 3, __val, GENMASK(27, 16));
+}
+
+static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 3, __val, BIT(31));
+}
+
+static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_qos(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(6));
+}
+
+static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(8));
+}
+
+static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(10));
+}
+
+static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(11));
+}
+
+static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(12));
+}
+
+static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(13));
+}
+
+static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, GENMASK(21, 20));
+}
+
+static inline void set_tx_desc_tx_stbc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, GENMASK(23, 22));
+}
+
+static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(25));
+}
+
+static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(26));
+}
+
+static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, BIT(27));
+}
+
+static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, GENMASK(29, 28));
+}
+
+static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 4, __val, GENMASK(31, 30));
+}
+
+static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 5, __val, GENMASK(5, 0));
+}
+
+static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 5, __val, BIT(6));
+}
+
+static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 5, __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 5, __val, GENMASK(16, 13));
+}
+
+static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 6, __val, GENMASK(15, 11));
+}
+
+static inline void set_tx_desc_antsel_c(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 7, __val, BIT(29));
+}
+
+static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc + 7, __val, GENMASK(15, 0));
+}
+
+static inline int get_tx_desc_tx_buffer_size(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 7), GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 8) = cpu_to_le32(__val);
+}
+
+static inline int get_tx_desc_tx_buffer_address(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 8));
+}
+
+static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 10) = cpu_to_le32(__val);
+}
+
+static inline int get_rx_desc_pkt_len(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(13, 0));
+}
+
+static inline int get_rx_desc_crc32(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(14));
+}
+
+static inline int get_rx_desc_icv(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(15));
+}
+
+static inline int get_rx_desc_drv_info_size(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(19, 16));
+}
+
+static inline int get_rx_desc_security(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(22, 20));
+}
+
+static inline int get_rx_desc_qos(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(23));
+}
+
+static inline int get_rx_desc_shift(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(25, 24));
+}
+
+static inline int get_rx_desc_physt(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(26));
+}
+
+static inline int get_rx_desc_swdec(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(27));
+}
+
+static inline int get_rx_desc_ls(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(28));
+}
+
+static inline int get_rx_desc_fs(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(29));
+}
+
+static inline int get_rx_desc_eor(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(30));
+}
+
+static inline int get_rx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
+
+static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(13, 0));
+}
+
+static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(30));
+}
+
+static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline int get_rx_desc_macid(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), GENMASK(5, 0));
+}
+
+static inline int get_rx_desc_paggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(14));
+}
+
+static inline int get_rx_desc_faggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(15));
+}
+
+static inline int get_rx_desc_a1_fit(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), GENMASK(19, 16));
+}
+
+static inline int get_rx_desc_a2_fit(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), GENMASK(23, 20));
+}
+
+static inline int get_rx_desc_pam(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(24));
+}
+
+static inline int get_rx_desc_pwr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(25));
+}
+
+static inline int get_rx_desc_md(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(26));
+}
+
+static inline int get_rx_desc_mf(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(27));
+}
+
+static inline int get_rx_desc_type(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), GENMASK(29, 28));
+}
+
+static inline int get_rx_desc_mc(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(30));
+}
+
+static inline int get_rx_desc_bc(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(31));
+}
+
+static inline int get_rx_desc_seq(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 2), GENMASK(11, 0));
+}
+
+static inline int get_rx_desc_frag(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 2), GENMASK(15, 12));
+}
+
+static inline int get_rx_desc_rxmcs(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0));
+}
+
+static inline int get_rx_desc_rxht(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(6));
+}
+
+static inline int get_rx_status_desc_rx_gf(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(7));
+}
+
+static inline int get_rx_desc_splcp(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(8));
+}
+
+static inline int get_rx_desc_bw(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(9));
+}
+
+static inline int get_rx_desc_htc(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(10));
+}
+
+static inline int get_rx_status_desc_eosp(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(11));
+}
+
+static inline int get_rx_status_desc_bssid_fit(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), GENMASK(13, 12));
+}
+
+static inline int get_rx_status_desc_rpt_sel(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), GENMASK(15, 14));
+}
+
+static inline int get_rx_status_desc_pattern_match(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(29));
+}
+
+static inline int get_rx_status_desc_unicast_match(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(30));
+}
+
+static inline int get_rx_status_desc_magic_match(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(31));
+}
+
+static inline int get_rx_desc_iv1(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 4));
+}
+
+static inline int get_rx_desc_tsfl(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 5));
+}
+
+static inline int get_rx_desc_buff_addr(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 6));
+}
+
+static inline int get_rx_desc_buff_addr64(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 7));
+}
+
+static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 6) = cpu_to_le32(__val);
+}
+
+static inline void set_rx_desc_buff_addr64(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 7) = cpu_to_le32(__val);
+}
/* TX report 2 format in Rx desc*/
-#define GET_RX_RPT2_DESC_PKT_LEN(__status) \
- LE_BITS_TO_4BYTE(__status, 0, 9)
-#define GET_RX_RPT2_DESC_MACID_VALID_1(__status) \
- LE_BITS_TO_4BYTE(__status+16, 0, 32)
-#define GET_RX_RPT2_DESC_MACID_VALID_2(__status) \
- LE_BITS_TO_4BYTE(__status+20, 0, 32)
-
-#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr, 0, 4, __value)
-#define SET_EARLYMODE_LEN0(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr, 4, 12, __value)
-#define SET_EARLYMODE_LEN1(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr, 16, 12, __value)
-#define SET_EARLYMODE_LEN2_1(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr, 28, 4, __value)
-#define SET_EARLYMODE_LEN2_2(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr+4, 0, 8, __value)
-#define SET_EARLYMODE_LEN3(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr+4, 8, 12, __value)
-#define SET_EARLYMODE_LEN4(__paddr, __value) \
- SET_BITS_TO_LE_4BYTE(__paddr+4, 20, 12, __value)
-
-#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
-do { \
- if (_size > TX_DESC_NEXT_DESC_OFFSET) \
- memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
- else \
- memset(__pdesc, 0, _size); \
-} while (0)
+static inline int get_rx_rpt2_desc_pkt_len(__le32 *__status)
+{
+ return le32_get_bits(*(__status), GENMASK(8, 0));
+}
+
+static inline int get_rx_rpt2_desc_macid_valid_1(__le32 *__status)
+{
+ return le32_to_cpu(*(__status + 4));
+}
+
+static inline int get_rx_rpt2_desc_macid_valid_2(__le32 *__status)
+{
+ return le32_to_cpu(*(__status + 5));
+}
+
+static inline void set_earlymode_pktnum(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(3, 0));
+}
+
+static inline void set_earlymode_len0(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(15, 4));
+}
+
+static inline void set_earlymode_len1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(27, 16));
+}
+
+static inline void set_earlymode_len2_1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(31, 28));
+}
+
+static inline void set_earlymode_len2_2(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr + 1, __value, GENMASK(7, 0));
+}
+
+static inline void set_earlymode_len3(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr + 1, __value, GENMASK(19, 8));
+}
+
+static inline void set_earlymode_len4(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr + 1, __value, GENMASK(31, 20));
+}
+
+static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size)
+{
+ if (_size > TX_DESC_NEXT_DESC_OFFSET)
+ memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET);
+ else
+ memset(__pdesc, 0, _size);
+}
#define RTL8188_RX_HAL_IS_CCK_RATE(rxmcs)\
(rxmcs == DESC92C_RATE1M ||\
@@ -520,17 +560,7 @@ do { \
rxmcs == DESC92C_RATE5_5M ||\
rxmcs == DESC92C_RATE11M)
-#define IS_LITTLE_ENDIAN 1
-
-struct phy_rx_agc_info_t {
- #if IS_LITTLE_ENDIAN
- u8 gain:7, trsw:1;
- #else
- u8 trsw:1, gain:7;
- #endif
-};
struct phy_status_rpt {
- struct phy_rx_agc_info_t path_agc[2];
u8 ch_corr[2];
u8 cck_sig_qual_ofdm_pwdb_all;
u8 cck_agc_rpt_ofdm_cfosho_a;
@@ -547,7 +577,7 @@ struct phy_status_rpt {
u8 stream_target_csi[2];
u8 sig_evm;
u8 rsvd_3;
-#if IS_LITTLE_ENDIAN
+#if defined(__LITTLE_ENDIAN)
u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
u8 sgi_en:1;
u8 rxsc:2;
@@ -555,7 +585,7 @@ struct phy_status_rpt {
u8 r_ant_train_en:1;
u8 ant_sel_b:1;
u8 ant_sel:1;
-#else /* _BIG_ENDIAN_ */
+#else /* __BIG_ENDIAN */
u8 ant_sel:1;
u8 ant_sel_b:1;
u8 r_ant_train_en:1;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index a9c0111444bc..900788e4018c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -113,8 +113,6 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
- rtlpriv->cfg->mod_params->sw_crypto =
- rtlpriv->cfg->mod_params->sw_crypto;
if (!rtlpriv->psc.inactiveps)
pr_info("rtl8192ce: Power Save off (module option)\n");
if (!rtlpriv->psc.fwctrl_lps)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index 18a0ab59631a..123dbf0903a1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -251,8 +251,8 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
*/
if (i == 0)
pstats->signalquality =
- (u8) (evm & 0xff);
- pstats->rx_mimo_sig_qual[i] = (u8) (evm & 0xff);
+ (u8)(evm & 0xff);
+ pstats->rx_mimo_sig_qual[i] = (u8)(evm & 0xff);
}
}
}
@@ -262,10 +262,10 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
*/
if (is_cck_rate)
pstats->signalstrength =
- (u8) (_rtl92ce_signal_scale_mapping(hw, pwdb_all));
+ (u8)(_rtl92ce_signal_scale_mapping(hw, pwdb_all));
else if (rf_rx_num != 0)
pstats->signalstrength =
- (u8) (_rtl92ce_signal_scale_mapping
+ (u8)(_rtl92ce_signal_scale_mapping
(hw, total_rssi /= rf_rx_num));
}
@@ -317,29 +317,30 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_stats *stats,
struct ieee80211_rx_status *rx_status,
- u8 *p_desc, struct sk_buff *skb)
+ u8 *p_desc8, struct sk_buff *skb)
{
struct rx_fwinfo_92c *p_drvinfo;
- struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
+ struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc8;
struct ieee80211_hdr *hdr;
- u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+ __le32 *p_desc = (__le32 *)p_desc8;
+ u32 phystatus = get_rx_desc_physt(p_desc);
- stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
- stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
+ stats->length = (u16)get_rx_desc_pkt_len(p_desc);
+ stats->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(p_desc) *
RX_DRV_INFO_SIZE_UNIT;
- stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
- stats->icv = (u16) GET_RX_DESC_ICV(pdesc);
- stats->crc = (u16) GET_RX_DESC_CRC32(pdesc);
+ stats->rx_bufshift = (u8)(get_rx_desc_shift(p_desc) & 0x03);
+ stats->icv = (u16)get_rx_desc_icv(p_desc);
+ stats->crc = (u16)get_rx_desc_crc32(p_desc);
stats->hwerror = (stats->crc | stats->icv);
- stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
- stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
- stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
- stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
- stats->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
- && (GET_RX_DESC_FAGGR(pdesc) == 1));
- stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
- stats->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc);
- stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
+ stats->decrypted = !get_rx_desc_swdec(p_desc);
+ stats->rate = (u8)get_rx_desc_rxmcs(p_desc);
+ stats->shortpreamble = (u16)get_rx_desc_splcp(p_desc);
+ stats->isampdu = (bool)(get_rx_desc_paggr(p_desc) == 1);
+ stats->isfirst_ampdu = (bool)((get_rx_desc_paggr(p_desc) == 1) &&
+ (get_rx_desc_faggr(p_desc) == 1));
+ stats->timestamp_low = get_rx_desc_tsfl(p_desc);
+ stats->rx_is40mhzpacket = (bool)get_rx_desc_bw(p_desc);
+ stats->is_ht = (bool)get_rx_desc_rxht(p_desc);
stats->is_cck = RX_HAL_IS_CCK_RATE(pdesc->rxmcs);
@@ -400,7 +401,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
}
void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ struct ieee80211_hdr *hdr, u8 *pdesc8,
u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb,
@@ -411,7 +412,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool defaultadapter = true;
- u8 *pdesc = pdesc_tx;
+ __le32 *pdesc = (__le32 *)pdesc8;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 fw_qsel = _rtl92ce_map_hwqueue_to_fwqueue(skb, hw_queue);
@@ -447,64 +448,64 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc);
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92c));
+ clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_92c));
if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
firstseg = true;
lastseg = true;
}
if (firstseg) {
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
- SET_TX_DESC_TX_RATE(pdesc, tcb_desc->hw_rate);
+ set_tx_desc_tx_rate(pdesc, tcb_desc->hw_rate);
if (tcb_desc->use_shortgi || tcb_desc->use_shortpreamble)
- SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
+ set_tx_desc_data_shortgi(pdesc, 1);
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- SET_TX_DESC_AGG_BREAK(pdesc, 1);
- SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
+ set_tx_desc_agg_break(pdesc, 1);
+ set_tx_desc_max_agg_num(pdesc, 0x14);
}
- SET_TX_DESC_SEQ(pdesc, seq_number);
+ set_tx_desc_seq(pdesc, seq_number);
- SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc->rts_enable &&
+ set_tx_desc_rts_enable(pdesc, ((tcb_desc->rts_enable &&
!tcb_desc->
cts_enable) ? 1 : 0));
- SET_TX_DESC_HW_RTS_ENABLE(pdesc,
+ set_tx_desc_hw_rts_enable(pdesc,
((tcb_desc->rts_enable
|| tcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc->cts_enable) ? 1 : 0));
- SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc->rts_stbc) ? 1 : 0));
+ set_tx_desc_cts2self(pdesc, ((tcb_desc->cts_enable) ? 1 : 0));
+ set_tx_desc_rts_stbc(pdesc, ((tcb_desc->rts_stbc) ? 1 : 0));
- SET_TX_DESC_RTS_RATE(pdesc, tcb_desc->rts_rate);
- SET_TX_DESC_RTS_BW(pdesc, 0);
- SET_TX_DESC_RTS_SC(pdesc, tcb_desc->rts_sc);
- SET_TX_DESC_RTS_SHORT(pdesc,
+ set_tx_desc_rts_rate(pdesc, tcb_desc->rts_rate);
+ set_tx_desc_rts_bw(pdesc, 0);
+ set_tx_desc_rts_sc(pdesc, tcb_desc->rts_sc);
+ set_tx_desc_rts_short(pdesc,
((tcb_desc->rts_rate <= DESC_RATE54M) ?
(tcb_desc->rts_use_shortpreamble ? 1 : 0)
: (tcb_desc->rts_use_shortgi ? 1 : 0)));
if (bw_40) {
if (tcb_desc->packet_bw) {
- SET_TX_DESC_DATA_BW(pdesc, 1);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
+ set_tx_desc_data_bw(pdesc, 1);
+ set_tx_desc_tx_sub_carrier(pdesc, 3);
} else {
- SET_TX_DESC_DATA_BW(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc,
+ set_tx_desc_data_bw(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc,
mac->cur_40_prime_sc);
}
} else {
- SET_TX_DESC_DATA_BW(pdesc, 0);
- SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
+ set_tx_desc_data_bw(pdesc, 0);
+ set_tx_desc_tx_sub_carrier(pdesc, 0);
}
- SET_TX_DESC_LINIP(pdesc, 0);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
+ set_tx_desc_linip(pdesc, 0);
+ set_tx_desc_pkt_size(pdesc, (u16)skb->len);
if (sta) {
u8 ampdu_density = sta->ht_cap.ampdu_density;
- SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
+ set_tx_desc_ampdu_density(pdesc, ampdu_density);
}
if (info->control.hw_key) {
@@ -515,77 +516,78 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
+ set_tx_desc_sec_type(pdesc, 0x1);
break;
case WLAN_CIPHER_SUITE_CCMP:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
+ set_tx_desc_sec_type(pdesc, 0x3);
break;
default:
- SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
+ set_tx_desc_sec_type(pdesc, 0x0);
break;
}
}
- SET_TX_DESC_PKT_ID(pdesc, 0);
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
+ set_tx_desc_pkt_id(pdesc, 0);
+ set_tx_desc_queue_sel(pdesc, fw_qsel);
- SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
- SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
- SET_TX_DESC_DISABLE_FB(pdesc, 0);
- SET_TX_DESC_USE_RATE(pdesc, tcb_desc->use_driver_rate ? 1 : 0);
+ set_tx_desc_data_rate_fb_limit(pdesc, 0x1F);
+ set_tx_desc_rts_rate_fb_limit(pdesc, 0xF);
+ set_tx_desc_disable_fb(pdesc, 0);
+ set_tx_desc_use_rate(pdesc, tcb_desc->use_driver_rate ? 1 : 0);
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
"Enable RDG function\n");
- SET_TX_DESC_RDG_ENABLE(pdesc, 1);
- SET_TX_DESC_HTC(pdesc, 1);
+ set_tx_desc_rdg_enable(pdesc, 1);
+ set_tx_desc_htc(pdesc, 1);
}
}
}
rcu_read_unlock();
- SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
- SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
+ set_tx_desc_first_seg(pdesc, (firstseg ? 1 : 0));
+ set_tx_desc_last_seg(pdesc, (lastseg ? 1 : 0));
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
+ set_tx_desc_tx_buffer_size(pdesc, (u16)skb->len);
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
if (rtlpriv->dm.useramask) {
- SET_TX_DESC_RATE_ID(pdesc, tcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, tcb_desc->mac_id);
+ set_tx_desc_rate_id(pdesc, tcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, tcb_desc->mac_id);
} else {
- SET_TX_DESC_RATE_ID(pdesc, 0xC + tcb_desc->ratr_index);
- SET_TX_DESC_MACID(pdesc, tcb_desc->ratr_index);
+ set_tx_desc_rate_id(pdesc, 0xC + tcb_desc->ratr_index);
+ set_tx_desc_macid(pdesc, tcb_desc->ratr_index);
}
if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) {
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
- SET_TX_DESC_PKT_ID(pdesc, 8);
+ set_tx_desc_hwseq_en(pdesc, 1);
+ set_tx_desc_pkt_id(pdesc, 8);
if (!defaultadapter)
- SET_TX_DESC_QOS(pdesc, 1);
+ set_tx_desc_qos(pdesc, 1);
}
- SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
+ set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
- SET_TX_DESC_BMC(pdesc, 1);
+ set_tx_desc_bmc(pdesc, 1);
}
RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
- u8 *pdesc, bool firstseg,
+ u8 *pdesc8, bool firstseg,
bool lastseg, struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 fw_queue = QSLT_BEACON;
+ __le32 *pdesc = (__le32 *)pdesc8;
dma_addr_t mapping = pci_map_single(rtlpci->pdev,
skb->data, skb->len,
@@ -599,60 +601,62 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
"DMA mapping error\n");
return;
}
- CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
+ clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
if (firstseg)
- SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
+ set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN);
- SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M);
+ set_tx_desc_tx_rate(pdesc, DESC_RATE1M);
- SET_TX_DESC_SEQ(pdesc, 0);
+ set_tx_desc_seq(pdesc, 0);
- SET_TX_DESC_LINIP(pdesc, 0);
+ set_tx_desc_linip(pdesc, 0);
- SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
+ set_tx_desc_queue_sel(pdesc, fw_queue);
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len));
+ set_tx_desc_tx_buffer_size(pdesc, (u16)(skb->len));
- SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
+ set_tx_desc_tx_buffer_address(pdesc, mapping);
- SET_TX_DESC_RATE_ID(pdesc, 7);
- SET_TX_DESC_MACID(pdesc, 0);
+ set_tx_desc_rate_id(pdesc, 7);
+ set_tx_desc_macid(pdesc, 0);
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
- SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
+ set_tx_desc_pkt_size(pdesc, (u16)(skb->len));
- SET_TX_DESC_FIRST_SEG(pdesc, 1);
- SET_TX_DESC_LAST_SEG(pdesc, 1);
+ set_tx_desc_first_seg(pdesc, 1);
+ set_tx_desc_last_seg(pdesc, 1);
- SET_TX_DESC_OFFSET(pdesc, 0x20);
+ set_tx_desc_offset(pdesc, 0x20);
- SET_TX_DESC_USE_RATE(pdesc, 1);
+ set_tx_desc_use_rate(pdesc, 1);
if (!ieee80211_is_data_qos(fc)) {
- SET_TX_DESC_HWSEQ_EN(pdesc, 1);
- SET_TX_DESC_PKT_ID(pdesc, 8);
+ set_tx_desc_hwseq_en(pdesc, 1);
+ set_tx_desc_pkt_id(pdesc, 8);
}
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
"H2C Tx Cmd Content", pdesc, TX_DESC_SIZE);
}
-void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx,
u8 desc_name, u8 *val)
{
+ __le32 *pdesc = (__le32 *)pdesc8;
+
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
wmb();
- SET_TX_DESC_OWN(pdesc, 1);
+ set_tx_desc_own(pdesc, 1);
break;
case HW_DESC_TX_NEXTDESC_ADDR:
- SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
+ set_tx_desc_next_desc_address(pdesc, *(u32 *)val);
break;
default:
WARN_ONCE(true, "rtl8192ce: ERR txdesc :%d not processed\n",
@@ -663,16 +667,16 @@ void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
switch (desc_name) {
case HW_DESC_RXOWN:
wmb();
- SET_RX_DESC_OWN(pdesc, 1);
+ set_rx_desc_own(pdesc, 1);
break;
case HW_DESC_RXBUFF_ADDR:
- SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val);
+ set_rx_desc_buff_addr(pdesc, *(u32 *)val);
break;
case HW_DESC_RXPKT_LEN:
- SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val);
+ set_rx_desc_pkt_len(pdesc, *(u32 *)val);
break;
case HW_DESC_RXERO:
- SET_RX_DESC_EOR(pdesc, 1);
+ set_rx_desc_eor(pdesc, 1);
break;
default:
WARN_ONCE(true, "rtl8192ce: ERR rxdesc :%d not processed\n",
@@ -682,18 +686,19 @@ void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
}
}
-u64 rtl92ce_get_desc(struct ieee80211_hw *hw, u8 *p_desc,
+u64 rtl92ce_get_desc(struct ieee80211_hw *hw, u8 *p_desc8,
bool istx, u8 desc_name)
{
u32 ret = 0;
+ __le32 *p_desc = (__le32 *)p_desc8;
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_TX_DESC_OWN(p_desc);
+ ret = get_tx_desc_own(p_desc);
break;
case HW_DESC_TXBUFF_ADDR:
- ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc);
+ ret = get_tx_desc_tx_buffer_address(p_desc);
break;
default:
WARN_ONCE(true, "rtl8192ce: ERR txdesc :%d not processed\n",
@@ -703,13 +708,13 @@ u64 rtl92ce_get_desc(struct ieee80211_hw *hw, u8 *p_desc,
} else {
switch (desc_name) {
case HW_DESC_OWN:
- ret = GET_RX_DESC_OWN(p_desc);
+ ret = get_rx_desc_own(p_desc);
break;
case HW_DESC_RXPKT_LEN:
- ret = GET_RX_DESC_PKT_LEN(p_desc);
+ ret = get_rx_desc_pkt_len(p_desc);
break;
case HW_DESC_RXBUFF_ADDR:
- ret = GET_RX_DESC_BUFF_ADDR(p_desc);
+ ret = get_rx_desc_buff_addr(p_desc);
break;
default:
WARN_ONCE(true, "rtl8192ce: ERR rxdesc :%d not processed\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
index fb1d4444a52f..709dcac9d84b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h
@@ -14,497 +14,322 @@
#define USB_HWDESC_HEADER_LEN 32
#define CRCLENGTH 4
-/* Define a macro that takes a le32 word, converts it to host ordering,
- * right shifts by a specified count, creates a mask of the specified
- * bit count, and extracts that number of bits.
- */
-
-#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \
- ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
- BIT_LEN_MASK_32(__mask))
-
-/* Define a macro that clears a bit field in an le32 word and
- * sets the specified value into that bit field. The resulting
- * value remains in le32 ordering; however, it is properly converted
- * to host ordering for the clear and set operations before conversion
- * back to le32.
- */
-
-#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \
- (*(__le32 *)(__pdesc) = \
- (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \
- (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \
- (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
-
/* macros to read/write various fields in RX or TX descriptors */
-#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val)
-#define SET_TX_DESC_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val)
-#define SET_TX_DESC_BMC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 24, 1, __val)
-#define SET_TX_DESC_HTC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 25, 1, __val)
-#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val)
-#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val)
-#define SET_TX_DESC_LINIP(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val)
-#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val)
-#define SET_TX_DESC_GF(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_TX_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_TX_DESC_PKT_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 0, 16)
-#define GET_TX_DESC_OFFSET(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 16, 8)
-#define GET_TX_DESC_BMC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 24, 1)
-#define GET_TX_DESC_HTC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 25, 1)
-#define GET_TX_DESC_LAST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 26, 1)
-#define GET_TX_DESC_FIRST_SEG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 27, 1)
-#define GET_TX_DESC_LINIP(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 28, 1)
-#define GET_TX_DESC_NO_ACM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 29, 1)
-#define GET_TX_DESC_GF(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 30, 1)
-#define GET_TX_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
-
-#define SET_TX_DESC_MACID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 0, 5, __val)
-#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 5, 1, __val)
-#define SET_TX_DESC_BK(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 6, 1, __val)
-#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 7, 1, __val)
-#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 8, 5, __val)
-#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 13, 1, __val)
-#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 14, 1, __val)
-#define SET_TX_DESC_PIFS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 15, 1, __val)
-#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 16, 4, __val)
-#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 20, 1, __val)
-#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 21, 1, __val)
-#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 22, 2, __val)
-#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+4, 24, 8, __val)
-
-#define GET_TX_DESC_MACID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
-#define GET_TX_DESC_AGG_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 5, 1)
-#define GET_TX_DESC_AGG_BREAK(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 6, 1)
-#define GET_TX_DESC_RDG_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 7, 1)
-#define GET_TX_DESC_QUEUE_SEL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 8, 5)
-#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 13, 1)
-#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
-#define GET_TX_DESC_PIFS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
-#define GET_TX_DESC_RATE_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
-#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 20, 1)
-#define GET_TX_DESC_EN_DESC_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 21, 1)
-#define GET_TX_DESC_SEC_TYPE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 22, 2)
-#define GET_TX_DESC_PKT_OFFSET(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 24, 8)
-
-#define SET_TX_DESC_RTS_RC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 0, 6, __val)
-#define SET_TX_DESC_DATA_RC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 6, 6, __val)
-#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 14, 2, __val)
-#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 17, 1, __val)
-#define SET_TX_DESC_RAW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 18, 1, __val)
-#define SET_TX_DESC_CCX(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 19, 1, __val)
-#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 20, 3, __val)
-#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 24, 1, __val)
-#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 25, 1, __val)
-#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 26, 2, __val)
-#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 28, 2, __val)
-#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+8, 30, 2, __val)
-
-#define GET_TX_DESC_RTS_RC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 0, 6)
-#define GET_TX_DESC_DATA_RC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 6, 6)
-#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 14, 2)
-#define GET_TX_DESC_MORE_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 17, 1)
-#define GET_TX_DESC_RAW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 18, 1)
-#define GET_TX_DESC_CCX(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 19, 1)
-#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 20, 3)
-#define GET_TX_DESC_ANTSEL_A(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 24, 1)
-#define GET_TX_DESC_ANTSEL_B(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 25, 1)
-#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 26, 2)
-#define GET_TX_DESC_TX_ANTL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 28, 2)
-#define GET_TX_DESC_TX_ANT_HT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 30, 2)
-
-#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 0, 8, __val)
-#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 8, 8, __val)
-#define SET_TX_DESC_SEQ(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 16, 12, __val)
-#define SET_TX_DESC_PKT_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+12, 28, 4, __val)
-
-#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 0, 8)
-#define GET_TX_DESC_TAIL_PAGE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 8, 8)
-#define GET_TX_DESC_SEQ(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 16, 12)
-#define GET_TX_DESC_PKT_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 28, 4)
-
-#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 0, 5, __val)
-#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 5, 1, __val)
-#define SET_TX_DESC_QOS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 6, 1, __val)
-#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 7, 1, __val)
-#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 8, 1, __val)
-#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 9, 1, __val)
-#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 10, 1, __val)
-#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 11, 1, __val)
-#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 12, 1, __val)
-#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 13, 1, __val)
-#define SET_TX_DESC_PORT_ID(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 14, 1, __val)
-#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 18, 1, __val)
-#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 19, 1, __val)
-#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 20, 2, __val)
-#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 22, 2, __val)
-#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 24, 1, __val)
-#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 25, 1, __val)
-#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 26, 1, __val)
-#define SET_TX_DESC_RTS_BW(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 27, 1, __val)
-#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 28, 2, __val)
-#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+16, 30, 2, __val)
-
-#define GET_TX_DESC_RTS_RATE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 0, 5)
-#define GET_TX_DESC_AP_DCFE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 5, 1)
-#define GET_TX_DESC_QOS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 6, 1)
-#define GET_TX_DESC_HWSEQ_EN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 7, 1)
-#define GET_TX_DESC_USE_RATE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 8, 1)
-#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 9, 1)
-#define GET_TX_DESC_DISABLE_FB(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 10, 1)
-#define GET_TX_DESC_CTS2SELF(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 11, 1)
-#define GET_TX_DESC_RTS_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 12, 1)
-#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 13, 1)
-#define GET_TX_DESC_PORT_ID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 14, 1)
-#define GET_TX_DESC_WAIT_DCTS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 18, 1)
-#define GET_TX_DESC_CTS2AP_EN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 19, 1)
-#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 20, 2)
-#define GET_TX_DESC_TX_STBC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 22, 2)
-#define GET_TX_DESC_DATA_SHORT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 24, 1)
-#define GET_TX_DESC_DATA_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 25, 1)
-#define GET_TX_DESC_RTS_SHORT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 26, 1)
-#define GET_TX_DESC_RTS_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 27, 1)
-#define GET_TX_DESC_RTS_SC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 28, 2)
-#define GET_TX_DESC_RTS_STBC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 30, 2)
-
-#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
-#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
-#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
-#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 8, 5, __val)
-#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 13, 4, __val)
-#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 17, 1, __val)
-#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 18, 6, __val)
-#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+20, 24, 8, __val)
-
-#define GET_TX_DESC_TX_RATE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 0, 6)
-#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 6, 1)
-#define GET_TX_DESC_CCX_TAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 7, 1)
-#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 8, 5)
-#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 13, 4)
-#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 17, 1)
-#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 18, 6)
-#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 24, 8)
-
-#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 0, 5, __val)
-#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 5, 5, __val)
-#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 10, 1, __val)
-#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 11, 5, __val)
-#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 16, 4, __val)
-#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 20, 4, __val)
-#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 24, 4, __val)
-#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 28, 4, __val)
-
-#define GET_TX_DESC_TXAGC_A(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 0, 5)
-#define GET_TX_DESC_TXAGC_B(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 5, 5)
-#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 10, 1)
-#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 11, 5)
-#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 16, 4)
-#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 20, 4)
-#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 24, 4)
-#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 28, 4)
-
-#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 0, 16, __val)
-#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 16, 4, __val)
-#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 20, 4, __val)
-#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 24, 4, __val)
-#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 28, 4, __val)
-
-#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 0, 16)
-#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 16, 4)
-#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 20, 4)
-#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 24, 4)
-#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 28, 4)
-
-#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+32, 0, 32, __val)
-#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+36, 0, 32, __val)
-
-#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+32, 0, 32)
-#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+36, 0, 32)
-
-#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+40, 0, 32, __val)
-#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+44, 0, 32, __val)
-
-#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+40, 0, 32)
-#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+44, 0, 32)
-
-#define GET_RX_DESC_PKT_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 0, 14)
-#define GET_RX_DESC_CRC32(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 14, 1)
-#define GET_RX_DESC_ICV(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 15, 1)
-#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 16, 4)
-#define GET_RX_DESC_SECURITY(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 20, 3)
-#define GET_RX_DESC_QOS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 23, 1)
-#define GET_RX_DESC_SHIFT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 24, 2)
-#define GET_RX_DESC_PHYST(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 26, 1)
-#define GET_RX_DESC_SWDEC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 27, 1)
-#define GET_RX_DESC_LS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 28, 1)
-#define GET_RX_DESC_FS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 29, 1)
-#define GET_RX_DESC_EOR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 30, 1)
-#define GET_RX_DESC_OWN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc, 31, 1)
-
-#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val)
-#define SET_RX_DESC_EOR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val)
-#define SET_RX_DESC_OWN(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val)
-
-#define GET_RX_DESC_MACID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 0, 5)
-#define GET_RX_DESC_TID(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 5, 4)
-#define GET_RX_DESC_HWRSVD(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 9, 5)
-#define GET_RX_DESC_PAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 14, 1)
-#define GET_RX_DESC_FAGGR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 15, 1)
-#define GET_RX_DESC_A1_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 16, 4)
-#define GET_RX_DESC_A2_FIT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 20, 4)
-#define GET_RX_DESC_PAM(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 24, 1)
-#define GET_RX_DESC_PWR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 25, 1)
-#define GET_RX_DESC_MD(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 26, 1)
-#define GET_RX_DESC_MF(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 27, 1)
-#define GET_RX_DESC_TYPE(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 28, 2)
-#define GET_RX_DESC_MC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 30, 1)
-#define GET_RX_DESC_BC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+4, 31, 1)
-#define GET_RX_DESC_SEQ(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 0, 12)
-#define GET_RX_DESC_FRAG(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 12, 4)
-#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 16, 14)
-#define GET_RX_DESC_NEXT_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 30, 1)
-#define GET_RX_DESC_RSVD(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+8, 31, 1)
-
-#define GET_RX_DESC_RXMCS(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 0, 6)
-#define GET_RX_DESC_RXHT(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 6, 1)
-#define GET_RX_DESC_SPLCP(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 8, 1)
-#define GET_RX_DESC_BW(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 9, 1)
-#define GET_RX_DESC_HTC(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 10, 1)
-#define GET_RX_DESC_HWPC_ERR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 14, 1)
-#define GET_RX_DESC_HWPC_IND(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 15, 1)
-#define GET_RX_DESC_IV0(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+12, 16, 16)
-
-#define GET_RX_DESC_IV1(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+16, 0, 32)
-#define GET_RX_DESC_TSFL(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+20, 0, 32)
-
-#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+24, 0, 32)
-#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
- SHIFT_AND_MASK_LE(__pdesc+28, 0, 32)
-
-#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+24, 0, 32, __val)
-#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
- SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
-
-#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
- memset(__pdesc, 0, min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET))
+static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(23, 16));
+}
+
+static inline void set_tx_desc_bmc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(24));
+}
+
+static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(25));
+}
+
+static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(26));
+}
+
+static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(27));
+}
+
+static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(28));
+}
+
+static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline int get_tx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
+
+static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_agg_break(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(5));
+}
+
+static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(7));
+}
+
+static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(19, 16));
+}
+
+static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22));
+}
+
+static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, BIT(17));
+}
+
+static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20));
+}
+
+static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16));
+}
+
+static inline void set_tx_desc_pkt_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(31, 28));
+}
+
+static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_qos(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(6));
+}
+
+static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(7));
+}
+
+static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(8));
+}
+
+static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(10));
+}
+
+static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(11));
+}
+
+static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(12));
+}
+
+static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(13));
+}
+
+static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(21, 20));
+}
+
+static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(25));
+}
+
+static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(26));
+}
+
+static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(27));
+}
+
+static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(29, 28));
+}
+
+static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(31, 30));
+}
+
+static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(5, 0));
+}
+
+static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, BIT(6));
+}
+
+static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13));
+}
+
+static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 6), __val, GENMASK(15, 11));
+}
+
+static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 8) = cpu_to_le32(__val);
+}
+
+static inline int get_tx_desc_tx_buffer_address(__le32 *__pdesc)
+{
+ return le32_to_cpu(*((__pdesc + 8)));
+}
+
+static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 10) = cpu_to_le32(__val);
+}
+
+static inline int get_rx_desc_pkt_len(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(13, 0));
+}
+
+static inline int get_rx_desc_crc32(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(14));
+}
+
+static inline int get_rx_desc_icv(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(15));
+}
+
+static inline int get_rx_desc_drv_info_size(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(19, 16));
+}
+
+static inline int get_rx_desc_shift(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), GENMASK(25, 24));
+}
+
+static inline int get_rx_desc_physt(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(26));
+}
+
+static inline int get_rx_desc_swdec(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(27));
+}
+
+static inline int get_rx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc), BIT(31));
+}
+
+static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(13, 0));
+}
+
+static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(30));
+}
+
+static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline int get_rx_desc_paggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*((__pdesc + 1)), BIT(14));
+}
+
+static inline int get_rx_desc_faggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*((__pdesc + 1)), BIT(15));
+}
+
+static inline int get_rx_desc_rxmcs(__le32 *__pdesc)
+{
+ return le32_get_bits(*((__pdesc + 3)), GENMASK(5, 0));
+}
+
+static inline int get_rx_desc_rxht(__le32 *__pdesc)
+{
+ return le32_get_bits(*((__pdesc + 3)), BIT(6));
+}
+
+static inline int get_rx_desc_splcp(__le32 *__pdesc)
+{
+ return le32_get_bits(*((__pdesc + 3)), BIT(8));
+}
+
+static inline int get_rx_desc_bw(__le32 *__pdesc)
+{
+ return le32_get_bits(*((__pdesc + 3)), BIT(9));
+}
+
+static inline int get_rx_desc_tsfl(__le32 *__pdesc)
+{
+ return le32_to_cpu(*((__pdesc + 5)));
+}
+
+static inline int get_rx_desc_buff_addr(__le32 *__pdesc)
+{
+ return le32_to_cpu(*((__pdesc + 6)));
+}
+
+static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 6) = cpu_to_le32(__val);
+}
+
+static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size)
+{
+ memset(__pdesc, 0, min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET));
+}
struct rx_fwinfo_92c {
u8 gain_trsw[4];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index c1c34dca39d2..ab3e4aebad39 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -39,8 +39,6 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->dm.dm_flag = 0;
rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
- rtlpriv->cfg->mod_params->sw_crypto =
- rtlpriv->cfg->mod_params->sw_crypto;
/* for firmware buf */
rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index d1d84e7d47a4..1c7ee569f4bf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -161,8 +161,6 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
- rtlpriv->cfg->mod_params->sw_crypto =
- rtlpriv->cfg->mod_params->sw_crypto;
if (!rtlpriv->psc.inactiveps)
pr_info("Power Save off (module option)\n");
if (!rtlpriv->psc.fwctrl_lps)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index 4b370410c83c..5702ac6deebf 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -129,10 +129,6 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
- rtlpriv->cfg->mod_params->sw_crypto =
- rtlpriv->cfg->mod_params->sw_crypto;
- rtlpriv->cfg->mod_params->disable_watchdog =
- rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
rtlpriv->psc.reg_fwctrl_lps = 3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 00e6254bf82b..3c8528f0ecb3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -128,10 +128,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
- rtlpriv->cfg->mod_params->sw_crypto =
- rtlpriv->cfg->mod_params->sw_crypto;
- rtlpriv->cfg->mod_params->disable_watchdog =
- rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
rtlpriv->psc.reg_fwctrl_lps = 2;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index eec7c4ecf3ad..3def6a2b3450 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -145,10 +145,6 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear;
- rtlpriv->cfg->mod_params->sw_crypto =
- rtlpriv->cfg->mod_params->sw_crypto;
- rtlpriv->cfg->mod_params->disable_watchdog =
- rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
rtlpriv->psc.reg_fwctrl_lps = 2;
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index e0bfefd154af..77edee2df8b8 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -9,6 +9,7 @@ rtw88-y += main.o \
rx.o \
mac.o \
phy.o \
+ coex.o \
efuse.o \
fw.o \
ps.o \
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
new file mode 100644
index 000000000000..4577fceddc5e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -0,0 +1,2507 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "coex.h"
+#include "fw.h"
+#include "ps.h"
+#include "debug.h"
+#include "reg.h"
+
+static u8 rtw_coex_next_rssi_state(struct rtw_dev *rtwdev, u8 pre_state,
+ u8 rssi, u8 rssi_thresh)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 tol = chip->rssi_tolerance;
+ u8 next_state;
+
+ if (pre_state == COEX_RSSI_STATE_LOW ||
+ pre_state == COEX_RSSI_STATE_STAY_LOW) {
+ if (rssi >= (rssi_thresh + tol))
+ next_state = COEX_RSSI_STATE_HIGH;
+ else
+ next_state = COEX_RSSI_STATE_STAY_LOW;
+ } else {
+ if (rssi < rssi_thresh)
+ next_state = COEX_RSSI_STATE_LOW;
+ else
+ next_state = COEX_RSSI_STATE_STAY_HIGH;
+ }
+
+ return next_state;
+}
+
+static void rtw_coex_limited_tx(struct rtw_dev *rtwdev,
+ bool tx_limit_en, bool ampdu_limit_en)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ bool wifi_under_b_mode = false;
+
+ if (!chip->scbd_support)
+ return;
+
+ /* force max tx retry limit = 8 */
+ if (coex_stat->wl_tx_limit_en == tx_limit_en &&
+ coex_stat->wl_ampdu_limit_en == ampdu_limit_en)
+ return;
+
+ if (!coex_stat->wl_tx_limit_en) {
+ coex_stat->darfrc = rtw_read32(rtwdev, REG_DARFRC);
+ coex_stat->darfrch = rtw_read32(rtwdev, REG_DARFRCH);
+ coex_stat->retry_limit = rtw_read16(rtwdev, REG_RETRY_LIMIT);
+ }
+
+ if (!coex_stat->wl_ampdu_limit_en)
+ coex_stat->ampdu_max_time =
+ rtw_read8(rtwdev, REG_AMPDU_MAX_TIME_V1);
+
+ coex_stat->wl_tx_limit_en = tx_limit_en;
+ coex_stat->wl_ampdu_limit_en = ampdu_limit_en;
+
+ if (tx_limit_en) {
+ /* set BT polluted packet on for tx rate adaptive,
+ * not including tx retry broken by PTA
+ */
+ rtw_write8_set(rtwdev, REG_TX_HANG_CTRL, BIT_EN_GNT_BT_AWAKE);
+
+ /* set queue life time to avoid can't reach tx retry limit
+ * if tx is always broken by GNT_BT
+ */
+ rtw_write8_set(rtwdev, REG_LIFETIME_EN, 0xf);
+ rtw_write16(rtwdev, REG_RETRY_LIMIT, 0x0808);
+
+ /* auto rate fallback step within 8 retries */
+ if (wifi_under_b_mode) {
+ rtw_write32(rtwdev, REG_DARFRC, 0x1000000);
+ rtw_write32(rtwdev, REG_DARFRCH, 0x1010101);
+ } else {
+ rtw_write32(rtwdev, REG_DARFRC, 0x1000000);
+ rtw_write32(rtwdev, REG_DARFRCH, 0x4030201);
+ }
+ } else {
+ rtw_write8_clr(rtwdev, REG_TX_HANG_CTRL, BIT_EN_GNT_BT_AWAKE);
+ rtw_write8_clr(rtwdev, REG_LIFETIME_EN, 0xf);
+
+ rtw_write16(rtwdev, REG_RETRY_LIMIT, coex_stat->retry_limit);
+ rtw_write32(rtwdev, REG_DARFRC, coex_stat->darfrc);
+ rtw_write32(rtwdev, REG_DARFRCH, coex_stat->darfrch);
+ }
+
+ if (ampdu_limit_en)
+ rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1, 0x20);
+ else
+ rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1,
+ coex_stat->ampdu_max_time);
+}
+
+static void rtw_coex_limited_wl(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ bool tx_limit = false;
+ bool tx_agg_ctrl = false;
+
+ if (coex->under_5g ||
+ coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) {
+ /* no need to limit tx */
+ } else {
+ tx_limit = true;
+ if (coex_stat->bt_hid_exist || coex_stat->bt_hfp_exist ||
+ coex_stat->bt_hid_pair_num > 0)
+ tx_agg_ctrl = true;
+ }
+
+ rtw_coex_limited_tx(rtwdev, tx_limit, tx_agg_ctrl);
+}
+
+static void rtw_coex_wl_ccklock_action(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u8 para[6] = {0};
+
+ if (coex->stop_dm)
+ return;
+
+ para[0] = COEX_H2C69_WL_LEAKAP;
+
+ if (coex_stat->tdma_timer_base == 3 && coex_stat->wl_slot_extend) {
+ para[1] = PARA1_H2C69_DIS_5MS; /* disable 5ms extend */
+ rtw_fw_bt_wifi_control(rtwdev, para[0], &para[1]);
+ coex_stat->wl_slot_extend = false;
+ coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] = 0;
+ return;
+ }
+
+ if (coex_stat->wl_slot_extend && coex_stat->wl_force_lps_ctrl &&
+ !coex_stat->wl_cck_lock_ever) {
+ if (coex_stat->wl_fw_dbg_info[7] <= 5)
+ coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND]++;
+ else
+ coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] = 0;
+
+ if (coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] == 7) {
+ para[1] = 0x1; /* disable 5ms extend */
+ rtw_fw_bt_wifi_control(rtwdev, para[0], &para[1]);
+ coex_stat->wl_slot_extend = false;
+ coex_stat->cnt_wl[COEX_CNT_WL_5MS_NOEXTEND] = 0;
+ }
+ } else if (!coex_stat->wl_slot_extend && coex_stat->wl_cck_lock) {
+ para[1] = 0x0; /* enable 5ms extend */
+ rtw_fw_bt_wifi_control(rtwdev, para[0], &para[1]);
+ coex_stat->wl_slot_extend = true;
+ }
+}
+
+static void rtw_coex_wl_ccklock_detect(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+
+ /* TODO: wait for rx_rate_change_notify implement */
+ coex_stat->wl_cck_lock = false;
+ coex_stat->wl_cck_lock_pre = false;
+ coex_stat->wl_cck_lock_ever = false;
+}
+
+static void rtw_coex_wl_noisy_detect(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 cnt_cck;
+
+ /* wifi noisy environment identification */
+ cnt_cck = dm_info->cck_ok_cnt + dm_info->cck_err_cnt;
+
+ if (!coex_stat->wl_gl_busy) {
+ if (cnt_cck > 250) {
+ if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] < 5)
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY2]++;
+
+ if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] == 5) {
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] = 0;
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] = 0;
+ }
+ } else if (cnt_cck < 100) {
+ if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] < 5)
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY0]++;
+
+ if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] == 5) {
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] = 0;
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] = 0;
+ }
+ } else {
+ if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] < 5)
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY1]++;
+
+ if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] == 5) {
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY0] = 0;
+ coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] = 0;
+ }
+ }
+
+ if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY2] == 5)
+ coex_stat->wl_noisy_level = 2;
+ else if (coex_stat->cnt_wl[COEX_CNT_WL_NOISY1] == 5)
+ coex_stat->wl_noisy_level = 1;
+ else
+ coex_stat->wl_noisy_level = 0;
+ }
+}
+
+static void rtw_coex_tdma_timer_base(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u8 para[2] = {0};
+
+ if (coex_stat->tdma_timer_base == type)
+ return;
+
+ coex_stat->tdma_timer_base = type;
+
+ para[0] = COEX_H2C69_TDMA_SLOT;
+
+ if (type == 3) /* 4-slot */
+ para[1] = PARA1_H2C69_TDMA_4SLOT; /* 4-slot */
+ else /* 2-slot */
+ para[1] = PARA1_H2C69_TDMA_2SLOT;
+
+ rtw_fw_bt_wifi_control(rtwdev, para[0], &para[1]);
+
+ /* no 5ms_wl_slot_extend for 4-slot mode */
+ if (coex_stat->tdma_timer_base == 3)
+ rtw_coex_wl_ccklock_action(rtwdev);
+}
+
+static void rtw_coex_set_wl_pri_mask(struct rtw_dev *rtwdev, u8 bitmap,
+ u8 data)
+{
+ u32 addr;
+
+ addr = REG_BT_COEX_TABLE_H + (bitmap / 8);
+ bitmap = bitmap % 8;
+
+ rtw_write8_mask(rtwdev, addr, BIT(bitmap), data);
+}
+
+void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u16 val = 0x2;
+
+ if (!chip->scbd_support)
+ return;
+
+ val |= coex_stat->score_board;
+
+ /* for 8822b, scbd[10] is CQDDR on
+ * for 8822c, scbd[10] is no fix 2M
+ */
+ if (!chip->new_scbd10_def && (bitpos & COEX_SCBD_FIX2M)) {
+ if (set)
+ val &= ~COEX_SCBD_FIX2M;
+ else
+ val |= COEX_SCBD_FIX2M;
+ } else {
+ if (set)
+ val |= bitpos;
+ else
+ val &= ~bitpos;
+ }
+
+ if (val != coex_stat->score_board) {
+ coex_stat->score_board = val;
+ val |= BIT_BT_INT_EN;
+ rtw_write16(rtwdev, REG_WIFI_BT_INFO, val);
+ }
+}
+
+static u16 rtw_coex_read_scbd(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ if (!chip->scbd_support)
+ return 0;
+
+ return (rtw_read16(rtwdev, REG_WIFI_BT_INFO)) & ~(BIT_BT_INT_EN);
+}
+
+static void rtw_coex_check_rfk(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+ u8 cnt = 0;
+ u32 wait_cnt;
+ bool btk, wlk;
+
+ if (coex_rfe->wlg_at_btg && chip->scbd_support &&
+ coex_stat->bt_iqk_state != 0xff) {
+ wait_cnt = COEX_RFK_TIMEOUT / COEX_MIN_DELAY;
+ do {
+ /* BT RFK */
+ btk = !!(rtw_coex_read_scbd(rtwdev) & COEX_SCBD_BT_RFK);
+
+ /* WL RFK */
+ wlk = !!(rtw_read8(rtwdev, REG_ARFR4) & BIT_WL_RFK);
+
+ if (!btk && !wlk)
+ break;
+
+ mdelay(COEX_MIN_DELAY);
+ } while (++cnt < wait_cnt);
+
+ if (cnt >= wait_cnt)
+ coex_stat->bt_iqk_state = 0xff;
+ }
+}
+
+static void rtw_coex_query_bt_info(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+
+ if (coex_stat->bt_disabled)
+ return;
+
+ rtw_fw_query_bt_info(rtwdev);
+}
+
+static void rtw_coex_monitor_bt_enable(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ bool bt_disabled = false;
+ u16 score_board;
+
+ if (chip->scbd_support) {
+ score_board = rtw_coex_read_scbd(rtwdev);
+ bt_disabled = !(score_board & COEX_SCBD_ONOFF);
+ }
+
+ if (coex_stat->bt_disabled != bt_disabled) {
+ rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: BT state changed (%d) -> (%d)\n",
+ coex_stat->bt_disabled, bt_disabled);
+
+ coex_stat->bt_disabled = bt_disabled;
+ coex_stat->bt_ble_scan_type = 0;
+ coex_dm->cur_bt_lna_lvl = 0;
+ }
+
+ if (!coex_stat->bt_disabled) {
+ coex_stat->bt_reenable = true;
+ ieee80211_queue_delayed_work(rtwdev->hw,
+ &coex->bt_reenable_work, 15 * HZ);
+ } else {
+ coex_stat->bt_mailbox_reply = false;
+ coex_stat->bt_reenable = false;
+ }
+}
+
+static void rtw_coex_update_wl_link_info(struct rtw_dev *rtwdev, u8 reason)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_traffic_stats *stats = &rtwdev->stats;
+ bool is_5G = false;
+ bool scan = false, link = false;
+ int i;
+ u8 rssi_state;
+ u8 rssi_step;
+ u8 rssi;
+
+ scan = rtw_flag_check(rtwdev, RTW_FLAG_SCANNING);
+ coex_stat->wl_connected = !!rtwdev->sta_cnt;
+ coex_stat->wl_gl_busy = rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+
+ if (stats->tx_throughput > stats->rx_throughput)
+ coex_stat->wl_tput_dir = COEX_WL_TPUT_TX;
+ else
+ coex_stat->wl_tput_dir = COEX_WL_TPUT_RX;
+
+ if (scan || link || reason == COEX_RSN_2GCONSTART ||
+ reason == COEX_RSN_2GSCANSTART || reason == COEX_RSN_2GSWITCHBAND)
+ coex_stat->wl_linkscan_proc = true;
+ else
+ coex_stat->wl_linkscan_proc = false;
+
+ rtw_coex_wl_noisy_detect(rtwdev);
+
+ for (i = 0; i < 4; i++) {
+ rssi_state = coex_dm->wl_rssi_state[i];
+ rssi_step = chip->wl_rssi_step[i];
+ rssi = rtwdev->dm_info.min_rssi;
+ rssi_state = rtw_coex_next_rssi_state(rtwdev, rssi_state,
+ rssi, rssi_step);
+ coex_dm->wl_rssi_state[i] = rssi_state;
+ }
+
+ switch (reason) {
+ case COEX_RSN_5GSCANSTART:
+ case COEX_RSN_5GSWITCHBAND:
+ case COEX_RSN_5GCONSTART:
+
+ is_5G = true;
+ break;
+ case COEX_RSN_2GSCANSTART:
+ case COEX_RSN_2GSWITCHBAND:
+ case COEX_RSN_2GCONSTART:
+
+ is_5G = false;
+ break;
+ default:
+ if (rtwdev->hal.current_band_type == RTW_BAND_5G)
+ is_5G = true;
+ else
+ is_5G = false;
+ break;
+ }
+
+ coex->under_5g = is_5G;
+}
+
+static inline u8 *get_payload_from_coex_resp(struct sk_buff *resp)
+{
+ struct rtw_c2h_cmd *c2h;
+ u32 pkt_offset;
+
+ pkt_offset = *((u32 *)resp->cb);
+ c2h = (struct rtw_c2h_cmd *)(resp->data + pkt_offset);
+
+ return c2h->payload;
+}
+
+void rtw_coex_info_response(struct rtw_dev *rtwdev, struct sk_buff *skb)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ u8 *payload = get_payload_from_coex_resp(skb);
+
+ if (payload[0] != COEX_RESP_ACK_BY_WL_FW)
+ return;
+
+ skb_queue_tail(&coex->queue, skb);
+ wake_up(&coex->wait);
+}
+
+static struct sk_buff *rtw_coex_info_request(struct rtw_dev *rtwdev,
+ struct rtw_coex_info_req *req)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct sk_buff *skb_resp = NULL;
+
+ mutex_lock(&coex->mutex);
+
+ rtw_fw_query_bt_mp_info(rtwdev, req);
+
+ if (!wait_event_timeout(coex->wait, !skb_queue_empty(&coex->queue),
+ COEX_REQUEST_TIMEOUT)) {
+ rtw_err(rtwdev, "coex request time out\n");
+ goto out;
+ }
+
+ skb_resp = skb_dequeue(&coex->queue);
+ if (!skb_resp) {
+ rtw_err(rtwdev, "failed to get coex info response\n");
+ goto out;
+ }
+
+out:
+ mutex_unlock(&coex->mutex);
+ return skb_resp;
+}
+
+static bool rtw_coex_get_bt_scan_type(struct rtw_dev *rtwdev, u8 *scan_type)
+{
+ struct rtw_coex_info_req req = {0};
+ struct sk_buff *skb;
+ u8 *payload;
+ bool ret = false;
+
+ req.op_code = BT_MP_INFO_OP_SCAN_TYPE;
+ skb = rtw_coex_info_request(rtwdev, &req);
+ if (!skb)
+ goto out;
+
+ payload = get_payload_from_coex_resp(skb);
+ *scan_type = GET_COEX_RESP_BT_SCAN_TYPE(payload);
+ dev_kfree_skb_any(skb);
+ ret = true;
+
+out:
+ return ret;
+}
+
+static bool rtw_coex_set_lna_constrain_level(struct rtw_dev *rtwdev,
+ u8 lna_constrain_level)
+{
+ struct rtw_coex_info_req req = {0};
+ struct sk_buff *skb;
+ bool ret = false;
+
+ req.op_code = BT_MP_INFO_OP_LNA_CONSTRAINT;
+ req.para1 = lna_constrain_level;
+ skb = rtw_coex_info_request(rtwdev, &req);
+ if (!skb)
+ goto out;
+
+ dev_kfree_skb_any(skb);
+ ret = true;
+
+out:
+ return ret;
+}
+
+static void rtw_coex_update_bt_link_info(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 i;
+ u8 rssi_state;
+ u8 rssi_step;
+ u8 rssi;
+
+ /* update wl/bt rssi by btinfo */
+ for (i = 0; i < COEX_RSSI_STEP; i++) {
+ rssi_state = coex_dm->bt_rssi_state[i];
+ rssi_step = chip->bt_rssi_step[i];
+ rssi = coex_stat->bt_rssi;
+ rssi_state = rtw_coex_next_rssi_state(rtwdev, rssi_state,
+ rssi, rssi_step);
+ coex_dm->bt_rssi_state[i] = rssi_state;
+ }
+
+ for (i = 0; i < COEX_RSSI_STEP; i++) {
+ rssi_state = coex_dm->wl_rssi_state[i];
+ rssi_step = chip->wl_rssi_step[i];
+ rssi = rtwdev->dm_info.min_rssi;
+ rssi_state = rtw_coex_next_rssi_state(rtwdev, rssi_state,
+ rssi, rssi_step);
+ coex_dm->wl_rssi_state[i] = rssi_state;
+ }
+
+ if (coex_stat->bt_ble_scan_en &&
+ coex_stat->cnt_bt[COEX_CNT_BT_INFOUPDATE] % 3 == 0) {
+ u8 scan_type;
+
+ if (rtw_coex_get_bt_scan_type(rtwdev, &scan_type)) {
+ coex_stat->bt_ble_scan_type = scan_type;
+ if ((coex_stat->bt_ble_scan_type & 0x1) == 0x1)
+ coex_stat->bt_init_scan = true;
+ else
+ coex_stat->bt_init_scan = false;
+ }
+ }
+
+ coex_stat->bt_profile_num = 0;
+
+ /* set link exist status */
+ if (!(coex_stat->bt_info_lb2 & COEX_INFO_CONNECTION)) {
+ coex_stat->bt_link_exist = false;
+ coex_stat->bt_pan_exist = false;
+ coex_stat->bt_a2dp_exist = false;
+ coex_stat->bt_hid_exist = false;
+ coex_stat->bt_hfp_exist = false;
+ } else {
+ /* connection exists */
+ coex_stat->bt_link_exist = true;
+ if (coex_stat->bt_info_lb2 & COEX_INFO_FTP) {
+ coex_stat->bt_pan_exist = true;
+ coex_stat->bt_profile_num++;
+ } else {
+ coex_stat->bt_pan_exist = false;
+ }
+
+ if (coex_stat->bt_info_lb2 & COEX_INFO_A2DP) {
+ coex_stat->bt_a2dp_exist = true;
+ coex_stat->bt_profile_num++;
+ } else {
+ coex_stat->bt_a2dp_exist = false;
+ }
+
+ if (coex_stat->bt_info_lb2 & COEX_INFO_HID) {
+ coex_stat->bt_hid_exist = true;
+ coex_stat->bt_profile_num++;
+ } else {
+ coex_stat->bt_hid_exist = false;
+ }
+
+ if (coex_stat->bt_info_lb2 & COEX_INFO_SCO_ESCO) {
+ coex_stat->bt_hfp_exist = true;
+ coex_stat->bt_profile_num++;
+ } else {
+ coex_stat->bt_hfp_exist = false;
+ }
+ }
+
+ if (coex_stat->bt_info_lb2 & COEX_INFO_INQ_PAGE) {
+ coex_dm->bt_status = COEX_BTSTATUS_INQ_PAGE;
+ } else if (!(coex_stat->bt_info_lb2 & COEX_INFO_CONNECTION)) {
+ coex_dm->bt_status = COEX_BTSTATUS_NCON_IDLE;
+ } else if (coex_stat->bt_info_lb2 == COEX_INFO_CONNECTION) {
+ coex_dm->bt_status = COEX_BTSTATUS_CON_IDLE;
+ } else if ((coex_stat->bt_info_lb2 & COEX_INFO_SCO_ESCO) ||
+ (coex_stat->bt_info_lb2 & COEX_INFO_SCO_BUSY)) {
+ if (coex_stat->bt_info_lb2 & COEX_INFO_ACL_BUSY)
+ coex_dm->bt_status = COEX_BTSTATUS_ACL_SCO_BUSY;
+ else
+ coex_dm->bt_status = COEX_BTSTATUS_SCO_BUSY;
+ } else if (coex_stat->bt_info_lb2 & COEX_INFO_ACL_BUSY) {
+ coex_dm->bt_status = COEX_BTSTATUS_ACL_BUSY;
+ } else {
+ coex_dm->bt_status = COEX_BTSTATUS_MAX;
+ }
+
+ coex_stat->cnt_bt[COEX_CNT_BT_INFOUPDATE]++;
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: bt status(%d)\n", coex_dm->bt_status);
+}
+
+static void rtw_coex_update_wl_ch_info(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex_dm *coex_dm = &rtwdev->coex.dm;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 link = 0;
+ u8 center_chan = 0;
+ u8 bw;
+ int i;
+
+ bw = rtwdev->hal.current_band_width;
+
+ if (type != COEX_MEDIA_DISCONNECT)
+ center_chan = rtwdev->hal.current_channel;
+
+ if (center_chan == 0 || (efuse->share_ant && center_chan <= 14)) {
+ link = 0;
+ } else if (center_chan <= 14) {
+ link = 0x1;
+
+ if (bw == RTW_CHANNEL_WIDTH_40)
+ bw = chip->bt_afh_span_bw40;
+ else
+ bw = chip->bt_afh_span_bw20;
+ } else if (chip->afh_5g_num > 1) {
+ for (i = 0; i < chip->afh_5g_num; i++) {
+ if (center_chan == chip->afh_5g[i].wl_5g_ch) {
+ link = 0x3;
+ center_chan = chip->afh_5g[i].bt_skip_ch;
+ bw = chip->afh_5g[i].bt_skip_span;
+ break;
+ }
+ }
+ }
+
+ coex_dm->wl_ch_info[0] = link;
+ coex_dm->wl_ch_info[1] = center_chan;
+ coex_dm->wl_ch_info[2] = bw;
+
+ rtw_fw_wl_ch_info(rtwdev, link, center_chan, bw);
+}
+
+static void rtw_coex_set_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+
+ if (bt_pwr_dec_lvl == coex_dm->cur_bt_pwr_lvl)
+ return;
+
+ coex_dm->cur_bt_pwr_lvl = bt_pwr_dec_lvl;
+
+ rtw_fw_force_bt_tx_power(rtwdev, bt_pwr_dec_lvl);
+}
+
+static void rtw_coex_set_bt_rx_gain(struct rtw_dev *rtwdev, u8 bt_lna_lvl)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+
+ if (bt_lna_lvl == coex_dm->cur_bt_lna_lvl)
+ return;
+
+ coex_dm->cur_bt_lna_lvl = bt_lna_lvl;
+
+ /* notify BT rx gain table changed */
+ if (bt_lna_lvl < 7) {
+ rtw_coex_set_lna_constrain_level(rtwdev, bt_lna_lvl);
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_RXGAIN, true);
+ } else {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_RXGAIN, false);
+ }
+}
+
+static void rtw_coex_set_rf_para(struct rtw_dev *rtwdev,
+ struct coex_rf_para para)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u8 offset = 0;
+
+ if (coex->freerun && coex_stat->wl_noisy_level <= 1)
+ offset = 3;
+
+ rtw_coex_set_wl_tx_power(rtwdev, para.wl_pwr_dec_lvl);
+ rtw_coex_set_bt_tx_power(rtwdev, para.bt_pwr_dec_lvl + offset);
+ rtw_coex_set_wl_rx_gain(rtwdev, para.wl_low_gain_en);
+ rtw_coex_set_bt_rx_gain(rtwdev, para.bt_lna_lvl);
+}
+
+static u32 rtw_coex_read_indirect_reg(struct rtw_dev *rtwdev, u16 addr)
+{
+ u32 val;
+
+ if (!ltecoex_read_reg(rtwdev, addr, &val)) {
+ rtw_err(rtwdev, "failed to read indirect register\n");
+ return 0;
+ }
+
+ return val;
+}
+
+void rtw_coex_write_indirect_reg(struct rtw_dev *rtwdev, u16 addr,
+ u32 mask, u32 val)
+{
+ u32 shift = __ffs(mask);
+ u32 tmp;
+
+ tmp = rtw_coex_read_indirect_reg(rtwdev, addr);
+ tmp = (tmp & (~mask)) | ((val << shift) & mask);
+
+ if (!ltecoex_reg_write(rtwdev, addr, tmp))
+ rtw_err(rtwdev, "failed to write indirect register\n");
+}
+
+static void rtw_coex_coex_ctrl_owner(struct rtw_dev *rtwdev, bool wifi_control)
+{
+ if (wifi_control)
+ rtw_write32_set(rtwdev, REG_SYS_SDIO_CTRL, BIT_LTE_MUX_CTRL_PATH);
+ else
+ rtw_write32_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT_LTE_MUX_CTRL_PATH);
+}
+
+static void rtw_coex_set_gnt_bt(struct rtw_dev *rtwdev, u8 state)
+{
+ rtw_coex_write_indirect_reg(rtwdev, 0x38, 0xc000, state);
+ rtw_coex_write_indirect_reg(rtwdev, 0x38, 0x0c00, state);
+}
+
+static void rtw_coex_set_gnt_wl(struct rtw_dev *rtwdev, u8 state)
+{
+ rtw_coex_write_indirect_reg(rtwdev, 0x38, 0x3000, state);
+ rtw_coex_write_indirect_reg(rtwdev, 0x38, 0x0300, state);
+}
+
+static void rtw_coex_set_table(struct rtw_dev *rtwdev, u32 table0, u32 table1)
+{
+#define DEF_BRK_TABLE_VAL 0xf0ffffff
+ rtw_write32(rtwdev, REG_BT_COEX_TABLE0, table0);
+ rtw_write32(rtwdev, REG_BT_COEX_TABLE1, table1);
+ rtw_write32(rtwdev, REG_BT_COEX_BRK_TABLE, DEF_BRK_TABLE_VAL);
+}
+
+static void rtw_coex_table(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+
+ coex_dm->cur_table = type;
+
+ if (efuse->share_ant) {
+ if (type < chip->table_sant_num)
+ rtw_coex_set_table(rtwdev,
+ chip->table_sant[type].bt,
+ chip->table_sant[type].wl);
+ } else {
+ type = type - 100;
+ if (type < chip->table_nsant_num)
+ rtw_coex_set_table(rtwdev,
+ chip->table_nsant[type].bt,
+ chip->table_nsant[type].wl);
+ }
+}
+
+static void rtw_coex_ignore_wlan_act(struct rtw_dev *rtwdev, bool enable)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+
+ if (coex->stop_dm)
+ return;
+
+ rtw_fw_bt_ignore_wlan_action(rtwdev, enable);
+}
+
+static void rtw_coex_power_save_state(struct rtw_dev *rtwdev, u8 ps_type,
+ u8 lps_val, u8 rpwm_val)
+{
+ struct rtw_lps_conf *lps_conf = &rtwdev->lps_conf;
+ struct rtw_vif *rtwvif;
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u8 lps_mode = 0x0;
+
+ lps_mode = rtwdev->lps_conf.mode;
+
+ switch (ps_type) {
+ case COEX_PS_WIFI_NATIVE:
+ /* recover to original 32k low power setting */
+ coex_stat->wl_force_lps_ctrl = false;
+
+ rtwvif = lps_conf->rtwvif;
+ if (rtwvif && rtw_in_lps(rtwdev))
+ rtw_leave_lps(rtwdev, rtwvif);
+ break;
+ case COEX_PS_LPS_OFF:
+ coex_stat->wl_force_lps_ctrl = true;
+ if (lps_mode)
+ rtw_fw_coex_tdma_type(rtwdev, 0x8, 0, 0, 0, 0);
+
+ rtwvif = lps_conf->rtwvif;
+ if (rtwvif && rtw_in_lps(rtwdev))
+ rtw_leave_lps(rtwdev, rtwvif);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtw_coex_set_tdma(struct rtw_dev *rtwdev, u8 byte1, u8 byte2,
+ u8 byte3, u8 byte4, u8 byte5)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 ps_type = COEX_PS_WIFI_NATIVE;
+ bool ap_enable = false;
+
+ if (ap_enable && (byte1 & BIT(4) && !(byte1 & BIT(5)))) {
+ byte1 &= ~BIT(4);
+ byte1 |= BIT(5);
+
+ byte5 |= BIT(5);
+ byte5 &= ~BIT(6);
+
+ ps_type = COEX_PS_WIFI_NATIVE;
+ rtw_coex_power_save_state(rtwdev, ps_type, 0x0, 0x0);
+ } else if (byte1 & BIT(4) && !(byte1 & BIT(5))) {
+ if (chip->pstdma_type == COEX_PSTDMA_FORCE_LPSOFF)
+ ps_type = COEX_PS_LPS_OFF;
+ else
+ ps_type = COEX_PS_LPS_ON;
+ rtw_coex_power_save_state(rtwdev, ps_type, 0x50, 0x4);
+ } else {
+ ps_type = COEX_PS_WIFI_NATIVE;
+ rtw_coex_power_save_state(rtwdev, ps_type, 0x0, 0x0);
+ }
+
+ coex_dm->ps_tdma_para[0] = byte1;
+ coex_dm->ps_tdma_para[1] = byte2;
+ coex_dm->ps_tdma_para[2] = byte3;
+ coex_dm->ps_tdma_para[3] = byte4;
+ coex_dm->ps_tdma_para[4] = byte5;
+
+ rtw_fw_coex_tdma_type(rtwdev, byte1, byte2, byte3, byte4, byte5);
+}
+
+static void rtw_coex_tdma(struct rtw_dev *rtwdev, bool force, u32 tcase)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 n, type;
+ bool turn_on;
+
+ if (tcase & TDMA_4SLOT)/* 4-slot (50ms) mode */
+ rtw_coex_tdma_timer_base(rtwdev, 3);
+ else
+ rtw_coex_tdma_timer_base(rtwdev, 0);
+
+ type = (u8)(tcase & 0xff);
+
+ turn_on = (type == 0 || type == 100) ? false : true;
+
+ if (!force) {
+ if (turn_on == coex_dm->cur_ps_tdma_on &&
+ type == coex_dm->cur_ps_tdma) {
+ return;
+ }
+ }
+
+ if (turn_on) {
+ /* enable TBTT interrupt */
+ rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_TDMA, true);
+ } else {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_TDMA, false);
+ }
+
+ if (efuse->share_ant) {
+ if (type < chip->tdma_sant_num)
+ rtw_coex_set_tdma(rtwdev,
+ chip->tdma_sant[type].para[0],
+ chip->tdma_sant[type].para[1],
+ chip->tdma_sant[type].para[2],
+ chip->tdma_sant[type].para[3],
+ chip->tdma_sant[type].para[4]);
+ } else {
+ n = type - 100;
+ if (n < chip->tdma_nsant_num)
+ rtw_coex_set_tdma(rtwdev,
+ chip->tdma_nsant[n].para[0],
+ chip->tdma_nsant[n].para[1],
+ chip->tdma_nsant[n].para[2],
+ chip->tdma_nsant[n].para[3],
+ chip->tdma_nsant[n].para[4]);
+ }
+
+ /* update pre state */
+ coex_dm->cur_ps_tdma_on = turn_on;
+ coex_dm->cur_ps_tdma = type;
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: coex tdma type (%d)\n", type);
+}
+
+static void rtw_coex_set_ant_path(struct rtw_dev *rtwdev, bool force, u8 phase)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ u8 ctrl_type = COEX_SWITCH_CTRL_MAX;
+ u8 pos_type = COEX_SWITCH_TO_MAX;
+
+ if (!force && coex_dm->cur_ant_pos_type == phase)
+ return;
+
+ coex_dm->cur_ant_pos_type = phase;
+
+ /* avoid switch coex_ctrl_owner during BT IQK */
+ rtw_coex_check_rfk(rtwdev);
+
+ switch (phase) {
+ case COEX_SET_ANT_POWERON:
+ /* set path control owner to BT at power-on */
+ if (coex_stat->bt_disabled)
+ rtw_coex_coex_ctrl_owner(rtwdev, true);
+ else
+ rtw_coex_coex_ctrl_owner(rtwdev, false);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_BBSW;
+ pos_type = COEX_SWITCH_TO_BT;
+ break;
+ case COEX_SET_ANT_INIT:
+ if (coex_stat->bt_disabled) {
+ /* set GNT_BT to SW low */
+ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_LOW);
+
+ /* set GNT_WL to SW high */
+ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH);
+ } else {
+ /* set GNT_BT to SW high */
+ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_HIGH);
+
+ /* set GNT_WL to SW low */
+ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_LOW);
+ }
+
+ /* set path control owner to wl at initial step */
+ rtw_coex_coex_ctrl_owner(rtwdev, true);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_BBSW;
+ pos_type = COEX_SWITCH_TO_BT;
+ break;
+ case COEX_SET_ANT_WONLY:
+ /* set GNT_BT to SW Low */
+ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_LOW);
+
+ /* Set GNT_WL to SW high */
+ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH);
+
+ /* set path control owner to wl at initial step */
+ rtw_coex_coex_ctrl_owner(rtwdev, true);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_BBSW;
+ pos_type = COEX_SWITCH_TO_WLG;
+ break;
+ case COEX_SET_ANT_WOFF:
+ /* set path control owner to BT */
+ rtw_coex_coex_ctrl_owner(rtwdev, false);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_BT;
+ pos_type = COEX_SWITCH_TO_NOCARE;
+ break;
+ case COEX_SET_ANT_2G:
+ /* set GNT_BT to PTA */
+ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_HW_PTA);
+
+ /* set GNT_WL to PTA */
+ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_HW_PTA);
+
+ /* set path control owner to wl at runtime step */
+ rtw_coex_coex_ctrl_owner(rtwdev, true);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_PTA;
+ pos_type = COEX_SWITCH_TO_NOCARE;
+ break;
+ case COEX_SET_ANT_5G:
+ /* set GNT_BT to PTA */
+ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_HIGH);
+
+ /* set GNT_WL to SW high */
+ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH);
+
+ /* set path control owner to wl at runtime step */
+ rtw_coex_coex_ctrl_owner(rtwdev, true);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_BBSW;
+ pos_type = COEX_SWITCH_TO_WLA;
+ break;
+ case COEX_SET_ANT_2G_FREERUN:
+ /* set GNT_BT to SW high */
+ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_SW_HIGH);
+
+ /* Set GNT_WL to SW high */
+ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_SW_HIGH);
+
+ /* set path control owner to wl at runtime step */
+ rtw_coex_coex_ctrl_owner(rtwdev, true);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_BBSW;
+ pos_type = COEX_SWITCH_TO_WLG_BT;
+ break;
+ case COEX_SET_ANT_2G_WLBT:
+ /* set GNT_BT to SW high */
+ rtw_coex_set_gnt_bt(rtwdev, COEX_GNT_SET_HW_PTA);
+
+ /* Set GNT_WL to SW high */
+ rtw_coex_set_gnt_wl(rtwdev, COEX_GNT_SET_HW_PTA);
+
+ /* set path control owner to wl at runtime step */
+ rtw_coex_coex_ctrl_owner(rtwdev, true);
+
+ ctrl_type = COEX_SWITCH_CTRL_BY_BBSW;
+ pos_type = COEX_SWITCH_TO_WLG_BT;
+ break;
+ default:
+ WARN_ON("unknown phase when setting antenna path\n");
+ return;
+ }
+
+ if (ctrl_type < COEX_SWITCH_CTRL_MAX && pos_type < COEX_SWITCH_TO_MAX)
+ rtw_coex_set_ant_switch(rtwdev, ctrl_type, pos_type);
+}
+
+static u8 rtw_coex_algorithm(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u8 algorithm = COEX_ALGO_NOPROFILE;
+ u8 profile_map = 0;
+
+ if (coex_stat->bt_hfp_exist)
+ profile_map |= BPM_HFP;
+ if (coex_stat->bt_hid_exist)
+ profile_map |= BPM_HID;
+ if (coex_stat->bt_a2dp_exist)
+ profile_map |= BPM_A2DP;
+ if (coex_stat->bt_pan_exist)
+ profile_map |= BPM_PAN;
+
+ switch (profile_map) {
+ case BPM_HFP:
+ algorithm = COEX_ALGO_HFP;
+ break;
+ case BPM_HID:
+ case BPM_HFP + BPM_HID:
+ algorithm = COEX_ALGO_HID;
+ break;
+ case BPM_HFP + BPM_A2DP:
+ case BPM_HID + BPM_A2DP:
+ case BPM_HFP + BPM_HID + BPM_A2DP:
+ algorithm = COEX_ALGO_A2DP_HID;
+ break;
+ case BPM_HFP + BPM_PAN:
+ case BPM_HID + BPM_PAN:
+ case BPM_HFP + BPM_HID + BPM_PAN:
+ algorithm = COEX_ALGO_PAN_HID;
+ break;
+ case BPM_HFP + BPM_A2DP + BPM_PAN:
+ case BPM_HID + BPM_A2DP + BPM_PAN:
+ case BPM_HFP + BPM_HID + BPM_A2DP + BPM_PAN:
+ algorithm = COEX_ALGO_A2DP_PAN_HID;
+ break;
+ case BPM_PAN:
+ algorithm = COEX_ALGO_PAN;
+ break;
+ case BPM_A2DP + BPM_PAN:
+ algorithm = COEX_ALGO_A2DP_PAN;
+ break;
+ case BPM_A2DP:
+ if (coex_stat->bt_multi_link) {
+ if (coex_stat->bt_hid_pair_num > 0)
+ algorithm = COEX_ALGO_A2DP_HID;
+ else
+ algorithm = COEX_ALGO_A2DP_PAN;
+ } else {
+ algorithm = COEX_ALGO_A2DP;
+ }
+ break;
+ default:
+ algorithm = COEX_ALGO_NOPROFILE;
+ break;
+ }
+
+ return algorithm;
+}
+
+static void rtw_coex_action_coex_all_off(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 2;
+ tdma_case = 0;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 100;
+ tdma_case = 100;
+ }
+
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_freerun(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 level = 0;
+
+ if (efuse->share_ant)
+ return;
+
+ coex->freerun = true;
+
+ if (coex_stat->wl_connected)
+ rtw_coex_update_wl_ch_info(rtwdev, COEX_MEDIA_CONNECT);
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G_FREERUN);
+
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false);
+
+ if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[0]))
+ level = 2;
+ else if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1]))
+ level = 3;
+ else if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[2]))
+ level = 4;
+ else
+ level = 5;
+
+ if (level > chip->wl_rf_para_num - 1)
+ level = chip->wl_rf_para_num - 1;
+
+ if (coex_stat->wl_tput_dir == COEX_WL_TPUT_TX)
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_tx[level]);
+ else
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[level]);
+
+ rtw_coex_table(rtwdev, 100);
+ rtw_coex_tdma(rtwdev, false, 100);
+}
+
+static void rtw_coex_action_bt_whql_test(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 2;
+ tdma_case = 0;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 100;
+ tdma_case = 100;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_relink(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 1;
+ tdma_case = 0;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 100;
+ tdma_case = 100;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_idle(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+ u8 table_case = 0xff, tdma_case = 0xff;
+
+ if (coex_rfe->ant_switch_with_bt &&
+ coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) {
+ if (efuse->share_ant &&
+ COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1])) {
+ table_case = 0;
+ tdma_case = 0;
+ } else if (!efuse->share_ant) {
+ table_case = 100;
+ tdma_case = 100;
+ }
+ }
+
+ if (table_case != 0xff && tdma_case != 0xff) {
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G_FREERUN);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+ return;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (!coex_stat->wl_gl_busy) {
+ table_case = 10;
+ tdma_case = 3;
+ } else if (coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) {
+ table_case = 6;
+ tdma_case = 7;
+ } else {
+ table_case = 12;
+ tdma_case = 7;
+ }
+ } else {
+ /* Non-Shared-Ant */
+ if (!coex_stat->wl_gl_busy) {
+ table_case = 112;
+ tdma_case = 104;
+ } else if ((coex_stat->bt_ble_scan_type & 0x2) &&
+ coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE) {
+ table_case = 114;
+ tdma_case = 103;
+ } else {
+ table_case = 112;
+ tdma_case = 103;
+ }
+ }
+
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ bool wl_hi_pri = false;
+ u8 table_case, tdma_case;
+
+ if (coex_stat->wl_linkscan_proc || coex_stat->wl_hi_pri_task1 ||
+ coex_stat->wl_hi_pri_task2)
+ wl_hi_pri = true;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (wl_hi_pri) {
+ table_case = 15;
+ if (coex_stat->bt_a2dp_exist &&
+ !coex_stat->bt_pan_exist)
+ tdma_case = 11;
+ else if (coex_stat->wl_hi_pri_task1)
+ tdma_case = 6;
+ else if (!coex_stat->bt_page)
+ tdma_case = 8;
+ else
+ tdma_case = 9;
+ } else if (coex_stat->wl_connected) {
+ table_case = 10;
+ tdma_case = 10;
+ } else {
+ table_case = 1;
+ tdma_case = 0;
+ }
+ } else {
+ /* Non_Shared-Ant */
+ if (wl_hi_pri) {
+ table_case = 113;
+ if (coex_stat->bt_a2dp_exist &&
+ !coex_stat->bt_pan_exist)
+ tdma_case = 111;
+ else if (coex_stat->wl_hi_pri_task1)
+ tdma_case = 106;
+ else if (!coex_stat->bt_page)
+ tdma_case = 108;
+ else
+ tdma_case = 109;
+ } else if (coex_stat->wl_connected) {
+ table_case = 101;
+ tdma_case = 110;
+ } else {
+ table_case = 100;
+ tdma_case = 100;
+ }
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_COEX, "coex: wifi hi(%d), bt page(%d)\n",
+ wl_hi_pri, coex_stat->bt_page);
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_hfp(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (coex_stat->bt_multi_link) {
+ table_case = 10;
+ tdma_case = 17;
+ } else {
+ table_case = 10;
+ tdma_case = 5;
+ }
+ } else {
+ /* Non-Shared-Ant */
+ if (coex_stat->bt_multi_link) {
+ table_case = 112;
+ tdma_case = 117;
+ } else {
+ table_case = 105;
+ tdma_case = 100;
+ }
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_hid(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+ u32 wl_bw;
+
+ wl_bw = rtwdev->hal.current_band_width;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (coex_stat->bt_ble_exist) {
+ /* RCU */
+ if (!coex_stat->wl_gl_busy)
+ table_case = 14;
+ else
+ table_case = 15;
+
+ if (coex_stat->bt_a2dp_active || wl_bw == 0)
+ tdma_case = 18;
+ else if (coex_stat->wl_gl_busy)
+ tdma_case = 8;
+ else
+ tdma_case = 4;
+ } else {
+ if (coex_stat->bt_a2dp_active || wl_bw == 0) {
+ table_case = 8;
+ tdma_case = 4;
+ } else {
+ /* for 4/18 HID */
+ if (coex_stat->bt_418_hid_exist &&
+ coex_stat->wl_gl_busy)
+ table_case = 12;
+ else
+ table_case = 10;
+ tdma_case = 4;
+ }
+ }
+ } else {
+ /* Non-Shared-Ant */
+ if (coex_stat->bt_a2dp_active) {
+ table_case = 113;
+ tdma_case = 118;
+ } else if (coex_stat->bt_ble_exist) {
+ /* BLE */
+ table_case = 113;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 106;
+ else
+ tdma_case = 104;
+ } else {
+ table_case = 113;
+ tdma_case = 104;
+ }
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_a2dp(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+ u32 slot_type = 0;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (coex_stat->wl_gl_busy && coex_stat->wl_noisy_level == 0)
+ table_case = 10;
+ else
+ table_case = 9;
+
+ slot_type = TDMA_4SLOT;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 13;
+ else
+ tdma_case = 14;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 112;
+
+ if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1]))
+ tdma_case = 112;
+ else
+ tdma_case = 113;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
+}
+
+static void rtw_coex_action_bt_a2dpsink(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+ bool ap_enable = false;
+
+ if (efuse->share_ant) { /* Shared-Ant */
+ if (ap_enable) {
+ table_case = 2;
+ tdma_case = 0;
+ } else if (coex_stat->wl_gl_busy) {
+ table_case = 28;
+ tdma_case = 20;
+ } else {
+ table_case = 28;
+ tdma_case = 26;
+ }
+ } else { /* Non-Shared-Ant */
+ if (ap_enable) {
+ table_case = 100;
+ tdma_case = 100;
+ } else {
+ table_case = 119;
+ tdma_case = 120;
+ }
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_pan(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (coex_stat->wl_gl_busy && coex_stat->wl_noisy_level == 0)
+ table_case = 14;
+ else
+ table_case = 10;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 17;
+ else
+ tdma_case = 19;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 112;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 117;
+ else
+ tdma_case = 119;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_a2dp_hid(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+ u32 slot_type = 0;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (coex_stat->bt_ble_exist)
+ table_case = 26;
+ else
+ table_case = 9;
+
+ if (coex_stat->wl_gl_busy) {
+ slot_type = TDMA_4SLOT;
+ tdma_case = 13;
+ } else {
+ tdma_case = 14;
+ }
+ } else {
+ /* Non-Shared-Ant */
+ if (coex_stat->bt_ble_exist)
+ table_case = 121;
+ else
+ table_case = 113;
+
+ if (COEX_RSSI_HIGH(coex_dm->wl_rssi_state[1]))
+ tdma_case = 112;
+ else
+ tdma_case = 113;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case | slot_type);
+}
+
+static void rtw_coex_action_bt_a2dp_pan(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (coex_stat->wl_gl_busy &&
+ coex_stat->wl_noisy_level == 0)
+ table_case = 14;
+ else
+ table_case = 10;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 15;
+ else
+ tdma_case = 20;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 112;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 115;
+ else
+ tdma_case = 120;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_pan_hid(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 9;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 18;
+ else
+ tdma_case = 19;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 113;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 117;
+ else
+ tdma_case = 119;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_bt_a2dp_pan_hid(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 10;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 15;
+ else
+ tdma_case = 20;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 113;
+
+ if (coex_stat->wl_gl_busy)
+ tdma_case = 115;
+ else
+ tdma_case = 120;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, false, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_wl_under5g(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false);
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 0;
+ tdma_case = 0;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 100;
+ tdma_case = 100;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_wl_only(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 2;
+ tdma_case = 0;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 100;
+ tdma_case = 100;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_wl_native_lps(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (coex->under_5g)
+ return;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 28;
+ tdma_case = 0;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 100;
+ tdma_case = 100;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_wl_linkscan(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ if (coex_stat->bt_a2dp_exist) {
+ table_case = 9;
+ tdma_case = 11;
+ } else {
+ table_case = 9;
+ tdma_case = 7;
+ }
+ } else {
+ /* Non-Shared-Ant */
+ if (coex_stat->bt_a2dp_exist) {
+ table_case = 112;
+ tdma_case = 111;
+ } else {
+ table_case = 112;
+ tdma_case = 107;
+ }
+ }
+
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_wl_not_connected(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 table_case, tdma_case;
+
+ if (efuse->share_ant) {
+ /* Shared-Ant */
+ table_case = 1;
+ tdma_case = 0;
+ } else {
+ /* Non-Shared-Ant */
+ table_case = 100;
+ tdma_case = 100;
+ }
+
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
+ rtw_coex_set_rf_para(rtwdev, chip->wl_rf_para_rx[0]);
+ rtw_coex_table(rtwdev, table_case);
+ rtw_coex_tdma(rtwdev, false, tdma_case);
+}
+
+static void rtw_coex_action_wl_connected(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 algorithm;
+
+ /* Non-Shared-Ant */
+ if (!efuse->share_ant && coex_stat->wl_gl_busy &&
+ COEX_RSSI_HIGH(coex_dm->wl_rssi_state[3]) &&
+ COEX_RSSI_HIGH(coex_dm->bt_rssi_state[0])) {
+ rtw_coex_action_freerun(rtwdev);
+ return;
+ }
+
+ algorithm = rtw_coex_algorithm(rtwdev);
+
+ switch (algorithm) {
+ case COEX_ALGO_HFP:
+ rtw_coex_action_bt_hfp(rtwdev);
+ break;
+ case COEX_ALGO_HID:
+ rtw_coex_action_bt_hid(rtwdev);
+ break;
+ case COEX_ALGO_A2DP:
+ if (coex_stat->bt_a2dp_sink)
+ rtw_coex_action_bt_a2dpsink(rtwdev);
+ else
+ rtw_coex_action_bt_a2dp(rtwdev);
+ break;
+ case COEX_ALGO_PAN:
+ rtw_coex_action_bt_pan(rtwdev);
+ break;
+ case COEX_ALGO_A2DP_HID:
+ rtw_coex_action_bt_a2dp_hid(rtwdev);
+ break;
+ case COEX_ALGO_A2DP_PAN:
+ rtw_coex_action_bt_a2dp_pan(rtwdev);
+ break;
+ case COEX_ALGO_PAN_HID:
+ rtw_coex_action_bt_pan_hid(rtwdev);
+ break;
+ case COEX_ALGO_A2DP_PAN_HID:
+ rtw_coex_action_bt_a2dp_pan_hid(rtwdev);
+ break;
+ default:
+ case COEX_ALGO_NOPROFILE:
+ rtw_coex_action_bt_idle(rtwdev);
+ break;
+ }
+}
+
+static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ coex_dm->reason = reason;
+
+ /* update wifi_link_info_ext variable */
+ rtw_coex_update_wl_link_info(rtwdev, reason);
+
+ rtw_coex_monitor_bt_enable(rtwdev);
+
+ if (coex->stop_dm)
+ return;
+
+ if (coex_stat->wl_under_ips)
+ return;
+
+ if (coex->freeze && !coex_stat->bt_setup_link)
+ return;
+
+ coex_stat->cnt_wl[COEX_CNT_WL_COEXRUN]++;
+ coex->freerun = false;
+
+ /* Pure-5G Coex Process */
+ if (coex->under_5g) {
+ coex_stat->wl_coex_mode = COEX_WLINK_5G;
+ rtw_coex_action_wl_under5g(rtwdev);
+ goto exit;
+ }
+
+ coex_stat->wl_coex_mode = COEX_WLINK_2G1PORT;
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_FIX2M, false);
+ if (coex_stat->bt_disabled) {
+ rtw_coex_action_wl_only(rtwdev);
+ goto exit;
+ }
+
+ if (coex_stat->wl_under_lps && !coex_stat->wl_force_lps_ctrl) {
+ rtw_coex_action_wl_native_lps(rtwdev);
+ goto exit;
+ }
+
+ if (coex_stat->bt_whck_test) {
+ rtw_coex_action_bt_whql_test(rtwdev);
+ goto exit;
+ }
+
+ if (coex_stat->bt_setup_link) {
+ rtw_coex_action_bt_relink(rtwdev);
+ goto exit;
+ }
+
+ if (coex_stat->bt_inq_page) {
+ rtw_coex_action_bt_inquiry(rtwdev);
+ goto exit;
+ }
+
+ if ((coex_dm->bt_status == COEX_BTSTATUS_NCON_IDLE ||
+ coex_dm->bt_status == COEX_BTSTATUS_CON_IDLE) &&
+ coex_stat->wl_connected) {
+ rtw_coex_action_bt_idle(rtwdev);
+ goto exit;
+ }
+
+ if (coex_stat->wl_linkscan_proc) {
+ rtw_coex_action_wl_linkscan(rtwdev);
+ goto exit;
+ }
+
+ if (coex_stat->wl_connected)
+ rtw_coex_action_wl_connected(rtwdev);
+ else
+ rtw_coex_action_wl_not_connected(rtwdev);
+
+exit:
+ rtw_coex_set_gnt_fix(rtwdev);
+ rtw_coex_limited_wl(rtwdev);
+}
+
+static void rtw_coex_init_coex_var(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ u8 i;
+
+ memset(coex_dm, 0, sizeof(*coex_dm));
+ memset(coex_stat, 0, sizeof(*coex_stat));
+
+ for (i = 0; i < COEX_CNT_WL_MAX; i++)
+ coex_stat->cnt_wl[i] = 0;
+
+ for (i = 0; i < COEX_CNT_BT_MAX; i++)
+ coex_stat->cnt_bt[i] = 0;
+
+ for (i = 0; i < ARRAY_SIZE(coex_dm->bt_rssi_state); i++)
+ coex_dm->bt_rssi_state[i] = COEX_RSSI_STATE_LOW;
+
+ for (i = 0; i < ARRAY_SIZE(coex_dm->wl_rssi_state); i++)
+ coex_dm->wl_rssi_state[i] = COEX_RSSI_STATE_LOW;
+
+ coex_stat->wl_coex_mode = COEX_WLINK_MAX;
+}
+
+static void __rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+
+ rtw_coex_init_coex_var(rtwdev);
+ rtw_coex_monitor_bt_enable(rtwdev);
+ rtw_coex_set_rfe_type(rtwdev);
+ rtw_coex_set_init(rtwdev);
+
+ /* set Tx response = Hi-Pri (ex: Transmitting ACK,BA,CTS) */
+ rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_TX_RSP, 1);
+
+ /* set Tx beacon = Hi-Pri */
+ rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_TX_BEACON, 1);
+
+ /* set Tx beacon queue = Hi-Pri */
+ rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_TX_BEACONQ, 1);
+
+ /* antenna config */
+ if (coex->wl_rf_off) {
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_WOFF);
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ALL, false);
+ coex->stop_dm = true;
+ } else if (wifi_only) {
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_WONLY);
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN,
+ true);
+ coex->stop_dm = true;
+ } else {
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_INIT);
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN,
+ true);
+ coex->stop_dm = false;
+ coex->freeze = true;
+ }
+
+ /* PTA parameter */
+ rtw_coex_table(rtwdev, 0);
+ rtw_coex_tdma(rtwdev, true, 0);
+ rtw_coex_query_bt_info(rtwdev);
+}
+
+void rtw_coex_power_on_setting(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+
+ coex->stop_dm = true;
+ coex->wl_rf_off = false;
+
+ /* enable BB, we can write 0x948 */
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN, BIT(0) | BIT(1));
+
+ rtw_coex_monitor_bt_enable(rtwdev);
+ rtw_coex_set_rfe_type(rtwdev);
+
+ /* set antenna path to BT */
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_POWERON);
+
+ /* red x issue */
+ rtw_write8(rtwdev, 0xff1a, 0x0);
+}
+
+void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only)
+{
+ __rtw_coex_init_hw_config(rtwdev, wifi_only);
+}
+
+void rtw_coex_ips_notify(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+
+ if (coex->stop_dm)
+ return;
+
+ if (type == COEX_IPS_ENTER) {
+ coex_stat->wl_under_ips = true;
+
+ /* for lps off */
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ALL, false);
+
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_WOFF);
+ rtw_coex_action_coex_all_off(rtwdev);
+ } else if (type == COEX_IPS_LEAVE) {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_ONOFF, true);
+
+ /* run init hw config (exclude wifi only) */
+ __rtw_coex_init_hw_config(rtwdev, false);
+ /* sw all off */
+
+ coex_stat->wl_under_ips = false;
+ }
+}
+
+void rtw_coex_lps_notify(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+
+ if (coex->stop_dm)
+ return;
+
+ if (type == COEX_LPS_ENABLE) {
+ coex_stat->wl_under_lps = true;
+
+ if (coex_stat->wl_force_lps_ctrl) {
+ /* for ps-tdma */
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true);
+ } else {
+ /* for native ps */
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, false);
+
+ rtw_coex_run_coex(rtwdev, COEX_RSN_LPS);
+ }
+ } else if (type == COEX_LPS_DISABLE) {
+ coex_stat->wl_under_lps = false;
+
+ /* for lps off */
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true);
+
+ if (!coex_stat->wl_force_lps_ctrl)
+ rtw_coex_query_bt_info(rtwdev);
+ }
+}
+
+void rtw_coex_scan_notify(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+
+ if (coex->stop_dm)
+ return;
+
+ coex->freeze = false;
+
+ if (type != COEX_SCAN_FINISH)
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN |
+ COEX_SCBD_ONOFF, true);
+
+ if (type == COEX_SCAN_START_5G) {
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_5GSCANSTART);
+ } else if ((type == COEX_SCAN_START_2G) || (type == COEX_SCAN_START)) {
+ coex_stat->wl_hi_pri_task2 = true;
+
+ /* Force antenna setup for no scan result issue */
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_2GSCANSTART);
+ } else {
+ coex_stat->wl_hi_pri_task2 = false;
+ rtw_coex_run_coex(rtwdev, COEX_RSN_SCANFINISH);
+ }
+}
+
+void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+
+ if (coex->stop_dm)
+ return;
+
+ if (type == COEX_SWITCH_TO_5G)
+ rtw_coex_run_coex(rtwdev, COEX_RSN_5GSWITCHBAND);
+ else if (type == COEX_SWITCH_TO_24G_NOFORSCAN)
+ rtw_coex_run_coex(rtwdev, COEX_RSN_2GSWITCHBAND);
+ else
+ rtw_coex_scan_notify(rtwdev, COEX_SCAN_START_2G);
+}
+
+void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+
+ if (coex->stop_dm)
+ return;
+
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE | COEX_SCBD_SCAN |
+ COEX_SCBD_ONOFF, true);
+
+ if (type == COEX_ASSOCIATE_5G_START) {
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_5GCONSTART);
+ } else if (type == COEX_ASSOCIATE_5G_FINISH) {
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_5GCONFINISH);
+ } else if (type == COEX_ASSOCIATE_START) {
+ coex_stat->wl_hi_pri_task1 = true;
+ coex_stat->cnt_wl[COEX_CNT_WL_CONNPKT] = 2;
+
+ /* Force antenna setup for no scan result issue */
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
+
+ rtw_coex_run_coex(rtwdev, COEX_RSN_2GCONSTART);
+
+ /* To keep TDMA case during connect process,
+ * to avoid changed by Btinfo and runcoexmechanism
+ */
+ coex->freeze = true;
+ ieee80211_queue_delayed_work(rtwdev->hw, &coex->defreeze_work,
+ 5 * HZ);
+ } else {
+ coex_stat->wl_hi_pri_task1 = false;
+ coex->freeze = false;
+
+ rtw_coex_run_coex(rtwdev, COEX_RSN_2GCONFINISH);
+ }
+}
+
+void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u8 para[6] = {0};
+
+ if (coex->stop_dm)
+ return;
+
+ if (type == COEX_MEDIA_CONNECT_5G) {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true);
+
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_5G);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_5GMEDIA);
+ } else if (type == COEX_MEDIA_CONNECT) {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, true);
+
+ /* Force antenna setup for no scan result issue */
+ rtw_coex_set_ant_path(rtwdev, true, COEX_SET_ANT_2G);
+
+ /* Set CCK Rx high Pri */
+ rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_RX_CCK, 1);
+
+ /* always enable 5ms extend if connect */
+ para[0] = COEX_H2C69_WL_LEAKAP;
+ para[1] = PARA1_H2C69_EN_5MS; /* enable 5ms extend */
+ rtw_fw_bt_wifi_control(rtwdev, para[0], &para[1]);
+ coex_stat->wl_slot_extend = true;
+ rtw_coex_run_coex(rtwdev, COEX_RSN_2GMEDIA);
+ } else {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_ACTIVE, false);
+
+ rtw_coex_set_wl_pri_mask(rtwdev, COEX_WLPRI_RX_CCK, 0);
+
+ rtw_coex_run_coex(rtwdev, COEX_RSN_MEDIADISCON);
+ }
+
+ rtw_coex_update_wl_ch_info(rtwdev, type);
+}
+
+void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ unsigned long bt_relink_time;
+ u8 i, rsp_source = 0, type;
+
+ rsp_source = buf[0] & 0xf;
+ if (rsp_source >= COEX_BTINFO_SRC_MAX)
+ rsp_source = COEX_BTINFO_SRC_WL_FW;
+
+ if (rsp_source == COEX_BTINFO_SRC_BT_IQK) {
+ coex_stat->bt_iqk_state = buf[1];
+ if (coex_stat->bt_iqk_state == 1)
+ coex_stat->cnt_bt[COEX_CNT_BT_IQK]++;
+ else if (coex_stat->bt_iqk_state == 2)
+ coex_stat->cnt_bt[COEX_CNT_BT_IQKFAIL]++;
+
+ return;
+ }
+
+ if (rsp_source == COEX_BTINFO_SRC_BT_SCBD) {
+ rtw_coex_monitor_bt_enable(rtwdev);
+ if (coex_stat->bt_disabled != coex_stat->bt_disabled_pre) {
+ coex_stat->bt_disabled_pre = coex_stat->bt_disabled;
+ rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO);
+ }
+ return;
+ }
+
+ if (rsp_source == COEX_BTINFO_SRC_BT_RSP ||
+ rsp_source == COEX_BTINFO_SRC_BT_ACT) {
+ if (coex_stat->bt_disabled) {
+ coex_stat->bt_disabled = false;
+ coex_stat->bt_reenable = true;
+ ieee80211_queue_delayed_work(rtwdev->hw,
+ &coex->bt_reenable_work,
+ 15 * HZ);
+ }
+ }
+
+ for (i = 0; i < length; i++) {
+ if (i < COEX_BTINFO_LENGTH_MAX)
+ coex_stat->bt_info_c2h[rsp_source][i] = buf[i];
+ else
+ break;
+ }
+
+ if (rsp_source == COEX_BTINFO_SRC_WL_FW) {
+ rtw_coex_update_bt_link_info(rtwdev);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO);
+ return;
+ }
+
+ /* get the same info from bt, skip it */
+ if (coex_stat->bt_info_c2h[rsp_source][1] == coex_stat->bt_info_lb2 &&
+ coex_stat->bt_info_c2h[rsp_source][2] == coex_stat->bt_info_lb3 &&
+ coex_stat->bt_info_c2h[rsp_source][3] == coex_stat->bt_info_hb0 &&
+ coex_stat->bt_info_c2h[rsp_source][4] == coex_stat->bt_info_hb1 &&
+ coex_stat->bt_info_c2h[rsp_source][5] == coex_stat->bt_info_hb2 &&
+ coex_stat->bt_info_c2h[rsp_source][6] == coex_stat->bt_info_hb3)
+ return;
+
+ coex_stat->bt_info_lb2 = coex_stat->bt_info_c2h[rsp_source][1];
+ coex_stat->bt_info_lb3 = coex_stat->bt_info_c2h[rsp_source][2];
+ coex_stat->bt_info_hb0 = coex_stat->bt_info_c2h[rsp_source][3];
+ coex_stat->bt_info_hb1 = coex_stat->bt_info_c2h[rsp_source][4];
+ coex_stat->bt_info_hb2 = coex_stat->bt_info_c2h[rsp_source][5];
+ coex_stat->bt_info_hb3 = coex_stat->bt_info_c2h[rsp_source][6];
+
+ /* 0xff means BT is under WHCK test */
+ coex_stat->bt_whck_test = (coex_stat->bt_info_lb2 == 0xff);
+ coex_stat->bt_inq_page = ((coex_stat->bt_info_lb2 & BIT(2)) == BIT(2));
+ coex_stat->bt_acl_busy = ((coex_stat->bt_info_lb2 & BIT(3)) == BIT(3));
+ coex_stat->cnt_bt[COEX_CNT_BT_RETRY] = coex_stat->bt_info_lb3 & 0xf;
+ if (coex_stat->cnt_bt[COEX_CNT_BT_RETRY] >= 1)
+ coex_stat->cnt_bt[COEX_CNT_BT_POPEVENT]++;
+
+ coex_stat->bt_fix_2M = ((coex_stat->bt_info_lb3 & BIT(4)) == BIT(4));
+ coex_stat->bt_inq = ((coex_stat->bt_info_lb3 & BIT(5)) == BIT(5));
+ if (coex_stat->bt_inq)
+ coex_stat->cnt_bt[COEX_CNT_BT_INQ]++;
+
+ coex_stat->bt_page = ((coex_stat->bt_info_lb3 & BIT(7)) == BIT(7));
+ if (coex_stat->bt_page) {
+ coex_stat->cnt_bt[COEX_CNT_BT_PAGE]++;
+ if (coex_stat->wl_linkscan_proc ||
+ coex_stat->wl_hi_pri_task1 ||
+ coex_stat->wl_hi_pri_task2 || coex_stat->wl_gl_busy)
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, true);
+ else
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, false);
+ } else {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_SCAN, false);
+ }
+
+ /* unit: % (value-100 to translate to unit: dBm in coex info) */
+ if (chip->bt_rssi_type == COEX_BTRSSI_RATIO) {
+ coex_stat->bt_rssi = coex_stat->bt_info_hb0 * 2 + 10;
+ } else { /* original unit: dbm -> unit: % -> value-100 in coex info */
+ if (coex_stat->bt_info_hb0 <= 127)
+ coex_stat->bt_rssi = 100;
+ else if (256 - coex_stat->bt_info_hb0 <= 100)
+ coex_stat->bt_rssi = 100 - (256 - coex_stat->bt_info_hb0);
+ else
+ coex_stat->bt_rssi = 0;
+ }
+
+ coex_stat->bt_ble_exist = ((coex_stat->bt_info_hb1 & BIT(0)) == BIT(0));
+ if (coex_stat->bt_info_hb1 & BIT(1))
+ coex_stat->cnt_bt[COEX_CNT_BT_REINIT]++;
+
+ if (coex_stat->bt_info_hb1 & BIT(2)) {
+ coex_stat->cnt_bt[COEX_CNT_BT_SETUPLINK]++;
+ coex_stat->bt_setup_link = true;
+ if (coex_stat->bt_reenable)
+ bt_relink_time = 6 * HZ;
+ else
+ bt_relink_time = 2 * HZ;
+
+ ieee80211_queue_delayed_work(rtwdev->hw,
+ &coex->bt_relink_work,
+ bt_relink_time);
+ }
+
+ if (coex_stat->bt_info_hb1 & BIT(3))
+ coex_stat->cnt_bt[COEX_CNT_BT_IGNWLANACT]++;
+
+ coex_stat->bt_ble_voice = ((coex_stat->bt_info_hb1 & BIT(4)) == BIT(4));
+ coex_stat->bt_ble_scan_en = ((coex_stat->bt_info_hb1 & BIT(5)) == BIT(5));
+ if (coex_stat->bt_info_hb1 & BIT(6))
+ coex_stat->cnt_bt[COEX_CNT_BT_ROLESWITCH]++;
+
+ coex_stat->bt_multi_link = ((coex_stat->bt_info_hb1 & BIT(7)) == BIT(7));
+ /* resend wifi info to bt, it is reset and lost the info */
+ if ((coex_stat->bt_info_hb1 & BIT(1))) {
+ if (coex_stat->wl_connected)
+ type = COEX_MEDIA_CONNECT;
+ else
+ type = COEX_MEDIA_DISCONNECT;
+ rtw_coex_update_wl_ch_info(rtwdev, type);
+ }
+
+ /* if ignore_wlan_act && not set_up_link */
+ if ((coex_stat->bt_info_hb1 & BIT(3)) &&
+ (!(coex_stat->bt_info_hb1 & BIT(2))))
+ rtw_coex_ignore_wlan_act(rtwdev, false);
+
+ coex_stat->bt_opp_exist = ((coex_stat->bt_info_hb2 & BIT(0)) == BIT(0));
+ if (coex_stat->bt_info_hb2 & BIT(1))
+ coex_stat->cnt_bt[COEX_CNT_BT_AFHUPDATE]++;
+
+ coex_stat->bt_a2dp_active = (coex_stat->bt_info_hb2 & BIT(2)) == BIT(2);
+ coex_stat->bt_slave = ((coex_stat->bt_info_hb2 & BIT(3)) == BIT(3));
+ coex_stat->bt_hid_slot = (coex_stat->bt_info_hb2 & 0x30) >> 4;
+ coex_stat->bt_hid_pair_num = (coex_stat->bt_info_hb2 & 0xc0) >> 6;
+ if (coex_stat->bt_hid_pair_num > 0 && coex_stat->bt_hid_slot >= 2)
+ coex_stat->bt_418_hid_exist = true;
+ else if (coex_stat->bt_hid_pair_num == 0)
+ coex_stat->bt_418_hid_exist = false;
+
+ if ((coex_stat->bt_info_lb2 & 0x49) == 0x49)
+ coex_stat->bt_a2dp_bitpool = (coex_stat->bt_info_hb3 & 0x7f);
+ else
+ coex_stat->bt_a2dp_bitpool = 0;
+
+ coex_stat->bt_a2dp_sink = ((coex_stat->bt_info_hb3 & BIT(7)) == BIT(7));
+
+ rtw_coex_update_bt_link_info(rtwdev);
+ rtw_coex_run_coex(rtwdev, COEX_RSN_BTINFO);
+}
+
+void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ u8 val;
+ int i;
+
+ if (WARN(length < 8, "invalid wl info c2h length\n"))
+ return;
+
+ if (buf[0] != 0x08)
+ return;
+
+ for (i = 1; i < 8; i++) {
+ val = coex_stat->wl_fw_dbg_info_pre[i];
+ if (buf[i] >= val)
+ coex_stat->wl_fw_dbg_info[i] = buf[i] - val;
+ else
+ coex_stat->wl_fw_dbg_info[i] = val - buf[i];
+
+ coex_stat->wl_fw_dbg_info_pre[i] = buf[i];
+ }
+
+ coex_stat->cnt_wl[COEX_CNT_WL_FW_NOTIFY]++;
+ rtw_coex_wl_ccklock_action(rtwdev);
+ rtw_coex_wl_ccklock_detect(rtwdev);
+}
+
+void rtw_coex_coex_dm_reset(struct rtw_dev *rtwdev)
+{
+ __rtw_coex_init_hw_config(rtwdev, false);
+}
+
+void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+
+ if (coex->stop_dm)
+ return;
+
+ rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS);
+}
+
+void rtw_coex_bt_relink_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+ coex.bt_relink_work.work);
+ struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+ mutex_lock(&rtwdev->mutex);
+ coex_stat->bt_setup_link = false;
+ rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw_coex_bt_reenable_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+ coex.bt_reenable_work.work);
+ struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+ mutex_lock(&rtwdev->mutex);
+ coex_stat->bt_reenable = false;
+ mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw_coex_defreeze_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+ coex.defreeze_work.work);
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+ mutex_lock(&rtwdev->mutex);
+ coex->freeze = false;
+ coex_stat->wl_hi_pri_task1 = false;
+ rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS);
+ mutex_unlock(&rtwdev->mutex);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h
new file mode 100644
index 000000000000..56e871b2d6c2
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/coex.h
@@ -0,0 +1,369 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_COEX_H__
+#define __RTW_COEX_H__
+
+/* BT profile map bit definition */
+#define BPM_HFP BIT(0)
+#define BPM_HID BIT(1)
+#define BPM_A2DP BIT(2)
+#define BPM_PAN BIT(3)
+
+#define COEX_RESP_ACK_BY_WL_FW 0x1
+#define COEX_REQUEST_TIMEOUT msecs_to_jiffies(10)
+
+#define COEX_MIN_DELAY 10 /* delay unit in ms */
+#define COEX_RFK_TIMEOUT 600 /* RFK timeout in ms */
+
+#define COEX_RF_OFF 0x0
+#define COEX_RF_ON 0x1
+
+#define COEX_H2C69_WL_LEAKAP 0xc
+#define PARA1_H2C69_DIS_5MS 0x1
+#define PARA1_H2C69_EN_5MS 0x0
+
+#define COEX_H2C69_TDMA_SLOT 0xb
+#define PARA1_H2C69_TDMA_4SLOT 0xc1
+#define PARA1_H2C69_TDMA_2SLOT 0x1
+
+#define TDMA_4SLOT BIT(8)
+
+#define COEX_RSSI_STEP 4
+#define COEX_RSSI_HIGH(rssi) \
+ ({ typeof(rssi) __rssi__ = rssi; \
+ (__rssi__ == COEX_RSSI_STATE_HIGH || \
+ __rssi__ == COEX_RSSI_STATE_STAY_HIGH ? true : false); })
+
+#define COEX_RSSI_MEDIUM(rssi) \
+ ({ typeof(rssi) __rssi__ = rssi; \
+ (__rssi__ == COEX_RSSI_STATE_MEDIUM || \
+ __rssi__ == COEX_RSSI_STATE_STAY_MEDIUM ? true : false); })
+
+#define COEX_RSSI_LOW(rssi) \
+ ({ typeof(rssi) __rssi__ = rssi; \
+ (__rssi__ == COEX_RSSI_STATE_LOW || \
+ __rssi__ == COEX_RSSI_STATE_STAY_LOW ? true : false); })
+
+#define GET_COEX_RESP_BT_SCAN_TYPE(payload) \
+ le64_get_bits(*((__le64 *)(payload)), GENMASK(31, 24))
+
+enum coex_mp_info_op {
+ BT_MP_INFO_OP_PATCH_VER = 0x00,
+ BT_MP_INFO_OP_READ_REG = 0x11,
+ BT_MP_INFO_OP_SUPP_FEAT = 0x2a,
+ BT_MP_INFO_OP_SUPP_VER = 0x2b,
+ BT_MP_INFO_OP_SCAN_TYPE = 0x2d,
+ BT_MP_INFO_OP_LNA_CONSTRAINT = 0x32,
+};
+
+enum coex_set_ant_phase {
+ COEX_SET_ANT_INIT,
+ COEX_SET_ANT_WONLY,
+ COEX_SET_ANT_WOFF,
+ COEX_SET_ANT_2G,
+ COEX_SET_ANT_5G,
+ COEX_SET_ANT_POWERON,
+ COEX_SET_ANT_2G_WLBT,
+ COEX_SET_ANT_2G_FREERUN,
+
+ COEX_SET_ANT_MAX
+};
+
+enum coex_runreason {
+ COEX_RSN_2GSCANSTART = 0,
+ COEX_RSN_5GSCANSTART = 1,
+ COEX_RSN_SCANFINISH = 2,
+ COEX_RSN_2GSWITCHBAND = 3,
+ COEX_RSN_5GSWITCHBAND = 4,
+ COEX_RSN_2GCONSTART = 5,
+ COEX_RSN_5GCONSTART = 6,
+ COEX_RSN_2GCONFINISH = 7,
+ COEX_RSN_5GCONFINISH = 8,
+ COEX_RSN_2GMEDIA = 9,
+ COEX_RSN_5GMEDIA = 10,
+ COEX_RSN_MEDIADISCON = 11,
+ COEX_RSN_BTINFO = 12,
+ COEX_RSN_LPS = 13,
+ COEX_RSN_WLSTATUS = 14,
+
+ COEX_RSN_MAX
+};
+
+enum coex_lte_coex_table_type {
+ COEX_CTT_WL_VS_LTE,
+ COEX_CTT_BT_VS_LTE,
+};
+
+enum coex_gnt_setup_state {
+ COEX_GNT_SET_HW_PTA = 0x0,
+ COEX_GNT_SET_SW_LOW = 0x1,
+ COEX_GNT_SET_SW_HIGH = 0x3,
+};
+
+enum coex_ext_ant_switch_pos_type {
+ COEX_SWITCH_TO_BT,
+ COEX_SWITCH_TO_WLG,
+ COEX_SWITCH_TO_WLA,
+ COEX_SWITCH_TO_NOCARE,
+ COEX_SWITCH_TO_WLG_BT,
+
+ COEX_SWITCH_TO_MAX
+};
+
+enum coex_ext_ant_switch_ctrl_type {
+ COEX_SWITCH_CTRL_BY_BBSW,
+ COEX_SWITCH_CTRL_BY_PTA,
+ COEX_SWITCH_CTRL_BY_ANTDIV,
+ COEX_SWITCH_CTRL_BY_MAC,
+ COEX_SWITCH_CTRL_BY_BT,
+ COEX_SWITCH_CTRL_BY_FW,
+
+ COEX_SWITCH_CTRL_MAX
+};
+
+enum coex_algorithm {
+ COEX_ALGO_NOPROFILE = 0,
+ COEX_ALGO_HFP = 1,
+ COEX_ALGO_HID = 2,
+ COEX_ALGO_A2DP = 3,
+ COEX_ALGO_PAN = 4,
+ COEX_ALGO_A2DP_HID = 5,
+ COEX_ALGO_A2DP_PAN = 6,
+ COEX_ALGO_PAN_HID = 7,
+ COEX_ALGO_A2DP_PAN_HID = 8,
+
+ COEX_ALGO_MAX
+};
+
+enum coex_wl_link_mode {
+ COEX_WLINK_2G1PORT = 0x0,
+ COEX_WLINK_5G = 0x3,
+ COEX_WLINK_MAX
+};
+
+enum coex_wl2bt_scoreboard {
+ COEX_SCBD_ACTIVE = BIT(0),
+ COEX_SCBD_ONOFF = BIT(1),
+ COEX_SCBD_SCAN = BIT(2),
+ COEX_SCBD_UNDERTEST = BIT(3),
+ COEX_SCBD_RXGAIN = BIT(4),
+ COEX_SCBD_BT_RFK = BIT(5),
+ COEX_SCBD_WLBUSY = BIT(6),
+ COEX_SCBD_EXTFEM = BIT(8),
+ COEX_SCBD_TDMA = BIT(9),
+ COEX_SCBD_FIX2M = BIT(10),
+ COEX_SCBD_ALL = GENMASK(15, 0),
+};
+
+enum coex_power_save_type {
+ COEX_PS_WIFI_NATIVE = 0,
+ COEX_PS_LPS_ON = 1,
+ COEX_PS_LPS_OFF = 2,
+};
+
+enum coex_rssi_state {
+ COEX_RSSI_STATE_HIGH,
+ COEX_RSSI_STATE_MEDIUM,
+ COEX_RSSI_STATE_LOW,
+ COEX_RSSI_STATE_STAY_HIGH,
+ COEX_RSSI_STATE_STAY_MEDIUM,
+ COEX_RSSI_STATE_STAY_LOW,
+};
+
+enum coex_notify_type_ips {
+ COEX_IPS_LEAVE = 0x0,
+ COEX_IPS_ENTER = 0x1,
+};
+
+enum coex_notify_type_lps {
+ COEX_LPS_DISABLE = 0x0,
+ COEX_LPS_ENABLE = 0x1,
+};
+
+enum coex_notify_type_scan {
+ COEX_SCAN_FINISH,
+ COEX_SCAN_START,
+ COEX_SCAN_START_2G,
+ COEX_SCAN_START_5G,
+};
+
+enum coex_notify_type_switchband {
+ COEX_NOT_SWITCH,
+ COEX_SWITCH_TO_24G,
+ COEX_SWITCH_TO_5G,
+ COEX_SWITCH_TO_24G_NOFORSCAN,
+};
+
+enum coex_notify_type_associate {
+ COEX_ASSOCIATE_FINISH,
+ COEX_ASSOCIATE_START,
+ COEX_ASSOCIATE_5G_FINISH,
+ COEX_ASSOCIATE_5G_START,
+};
+
+enum coex_notify_type_media_status {
+ COEX_MEDIA_DISCONNECT,
+ COEX_MEDIA_CONNECT,
+ COEX_MEDIA_CONNECT_5G,
+};
+
+enum coex_bt_status {
+ COEX_BTSTATUS_NCON_IDLE = 0,
+ COEX_BTSTATUS_CON_IDLE = 1,
+ COEX_BTSTATUS_INQ_PAGE = 2,
+ COEX_BTSTATUS_ACL_BUSY = 3,
+ COEX_BTSTATUS_SCO_BUSY = 4,
+ COEX_BTSTATUS_ACL_SCO_BUSY = 5,
+
+ COEX_BTSTATUS_MAX
+};
+
+enum coex_wl_tput_dir {
+ COEX_WL_TPUT_TX = 0x0,
+ COEX_WL_TPUT_RX = 0x1,
+ COEX_WL_TPUT_MAX
+};
+
+enum coex_wl_priority_mask {
+ COEX_WLPRI_RX_RSP = 2,
+ COEX_WLPRI_TX_RSP = 3,
+ COEX_WLPRI_TX_BEACON = 4,
+ COEX_WLPRI_TX_OFDM = 11,
+ COEX_WLPRI_TX_CCK = 12,
+ COEX_WLPRI_TX_BEACONQ = 27,
+ COEX_WLPRI_RX_CCK = 28,
+ COEX_WLPRI_RX_OFDM = 29,
+ COEX_WLPRI_MAX
+};
+
+enum coex_commom_chip_setup {
+ COEX_CSETUP_INIT_HW = 0x0,
+ COEX_CSETUP_ANT_SWITCH = 0x1,
+ COEX_CSETUP_GNT_FIX = 0x2,
+ COEX_CSETUP_GNT_DEBUG = 0x3,
+ COEX_CSETUP_RFE_TYPE = 0x4,
+ COEX_CSETUP_COEXINFO_HW = 0x5,
+ COEX_CSETUP_WL_TX_POWER = 0x6,
+ COEX_CSETUP_WL_RX_GAIN = 0x7,
+ COEX_CSETUP_WLAN_ACT_IPS = 0x8,
+ COEX_CSETUP_MAX
+};
+
+enum coex_indirect_reg_type {
+ COEX_INDIRECT_1700 = 0x0,
+ COEX_INDIRECT_7C0 = 0x1,
+ COEX_INDIRECT_MAX
+};
+
+enum coex_pstdma_type {
+ COEX_PSTDMA_FORCE_LPSOFF = 0x0,
+ COEX_PSTDMA_FORCE_LPSON = 0x1,
+ COEX_PSTDMA_MAX
+};
+
+enum coex_btrssi_type {
+ COEX_BTRSSI_RATIO = 0x0,
+ COEX_BTRSSI_DBM = 0x1,
+ COEX_BTRSSI_MAX
+};
+
+struct coex_table_para {
+ u32 bt;
+ u32 wl;
+};
+
+struct coex_tdma_para {
+ u8 para[5];
+};
+
+struct coex_5g_afh_map {
+ u32 wl_5g_ch;
+ u8 bt_skip_ch;
+ u8 bt_skip_span;
+};
+
+struct coex_rf_para {
+ u8 wl_pwr_dec_lvl;
+ u8 bt_pwr_dec_lvl;
+ bool wl_low_gain_en;
+ u8 bt_lna_lvl;
+};
+
+static inline void rtw_coex_set_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->coex_set_init(rtwdev);
+}
+
+static inline
+void rtw_coex_set_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type, u8 pos_type)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ if (!chip->ops->coex_set_ant_switch)
+ return;
+
+ chip->ops->coex_set_ant_switch(rtwdev, ctrl_type, pos_type);
+}
+
+static inline void rtw_coex_set_gnt_fix(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->coex_set_gnt_fix(rtwdev);
+}
+
+static inline void rtw_coex_set_gnt_debug(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->coex_set_gnt_debug(rtwdev);
+}
+
+static inline void rtw_coex_set_rfe_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->coex_set_rfe_type(rtwdev);
+}
+
+static inline void rtw_coex_set_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->coex_set_wl_tx_power(rtwdev, wl_pwr);
+}
+
+static inline
+void rtw_coex_set_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->coex_set_wl_rx_gain(rtwdev, low_gain);
+}
+
+void rtw_coex_info_response(struct rtw_dev *rtwdev, struct sk_buff *skb);
+void rtw_coex_write_indirect_reg(struct rtw_dev *rtwdev, u16 addr,
+ u32 mask, u32 val);
+void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set);
+
+void rtw_coex_bt_relink_work(struct work_struct *work);
+void rtw_coex_bt_reenable_work(struct work_struct *work);
+void rtw_coex_defreeze_work(struct work_struct *work);
+
+void rtw_coex_power_on_setting(struct rtw_dev *rtwdev);
+void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only);
+void rtw_coex_ips_notify(struct rtw_dev *rtwdev, u8 type);
+void rtw_coex_lps_notify(struct rtw_dev *rtwdev, u8 type);
+void rtw_coex_scan_notify(struct rtw_dev *rtwdev, u8 type);
+void rtw_coex_connect_notify(struct rtw_dev *rtwdev, u8 action);
+void rtw_coex_media_status_notify(struct rtw_dev *rtwdev, u8 status);
+void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 len);
+void rtw_coex_wl_fwdbginfo_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length);
+void rtw_coex_switchband_notify(struct rtw_dev *rtwdev, u8 type);
+void rtw_coex_wl_status_change_notify(struct rtw_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index f0ae26018f97..383b04c16703 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -8,6 +8,7 @@
#include "sec.h"
#include "fw.h"
#include "debug.h"
+#include "phy.h"
#ifdef CONFIG_RTW88_DEBUGFS
@@ -460,6 +461,112 @@ static int rtw_debug_get_rf_dump(struct seq_file *m, void *v)
return 0;
}
+static void rtw_print_cck_rate_txt(struct seq_file *m, u8 rate)
+{
+ static const char * const
+ cck_rate[] = {"1M", "2M", "5.5M", "11M"};
+ u8 idx = rate - DESC_RATE1M;
+
+ seq_printf(m, " CCK_%-5s", cck_rate[idx]);
+}
+
+static void rtw_print_ofdm_rate_txt(struct seq_file *m, u8 rate)
+{
+ static const char * const
+ ofdm_rate[] = {"6M", "9M", "12M", "18M", "24M", "36M", "48M", "54M"};
+ u8 idx = rate - DESC_RATE6M;
+
+ seq_printf(m, " OFDM_%-4s", ofdm_rate[idx]);
+}
+
+static void rtw_print_ht_rate_txt(struct seq_file *m, u8 rate)
+{
+ u8 mcs_n = rate - DESC_RATEMCS0;
+
+ seq_printf(m, " MCS%-6u", mcs_n);
+}
+
+static void rtw_print_vht_rate_txt(struct seq_file *m, u8 rate)
+{
+ u8 idx = rate - DESC_RATEVHT1SS_MCS0;
+ u8 n_ss, mcs_n;
+
+ /* n spatial stream */
+ n_ss = 1 + idx / 10;
+ /* MCS n */
+ mcs_n = idx % 10;
+ seq_printf(m, " VHT%uSMCS%u", n_ss, mcs_n);
+}
+
+static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_hal *hal = &rtwdev->hal;
+ void (*print_rate)(struct seq_file *, u8) = NULL;
+ u8 path, rate;
+ struct rtw_power_params pwr_param = {0};
+ u8 bw = hal->current_band_width;
+ u8 ch = hal->current_channel;
+ u8 regd = rtwdev->regd.txpwr_regd;
+
+ seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s)\n",
+ "path", "rate", "pwr", "", "base", "", "byr", "lmt");
+
+ mutex_lock(&hal->tx_power_mutex);
+ for (path = RF_PATH_A; path <= RF_PATH_B; path++) {
+ /* there is no CCK rates used in 5G */
+ if (hal->current_band_type == RTW_BAND_5G)
+ rate = DESC_RATE6M;
+ else
+ rate = DESC_RATE1M;
+
+ /* now, not support vht 3ss and vht 4ss*/
+ for (; rate <= DESC_RATEVHT2SS_MCS9; rate++) {
+ /* now, not support ht 3ss and ht 4ss*/
+ if (rate > DESC_RATEMCS15 &&
+ rate < DESC_RATEVHT1SS_MCS0)
+ continue;
+
+ switch (rate) {
+ case DESC_RATE1M...DESC_RATE11M:
+ print_rate = rtw_print_cck_rate_txt;
+ break;
+ case DESC_RATE6M...DESC_RATE54M:
+ print_rate = rtw_print_ofdm_rate_txt;
+ break;
+ case DESC_RATEMCS0...DESC_RATEMCS15:
+ print_rate = rtw_print_ht_rate_txt;
+ break;
+ case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT2SS_MCS9:
+ print_rate = rtw_print_vht_rate_txt;
+ break;
+ default:
+ print_rate = NULL;
+ break;
+ }
+
+ rtw_get_tx_power_params(rtwdev, path, rate, bw,
+ ch, regd, &pwr_param);
+
+ seq_printf(m, "%4c ", path + 'A');
+ if (print_rate)
+ print_rate(m, rate);
+ seq_printf(m, " %3u(0x%02x) %4u %4d (%4d %4d)\n",
+ hal->tx_pwr_tbl[path][rate],
+ hal->tx_pwr_tbl[path][rate],
+ pwr_param.pwr_base,
+ min_t(s8, pwr_param.pwr_offset,
+ pwr_param.pwr_limit),
+ pwr_param.pwr_offset, pwr_param.pwr_limit);
+ }
+ }
+
+ mutex_unlock(&hal->tx_power_mutex);
+
+ return 0;
+}
+
#define rtw_debug_impl_mac(page, addr) \
static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \
.cb_read = rtw_debug_get_mac_page, \
@@ -514,6 +621,10 @@ static struct rtw_debugfs_priv rtw_debug_priv_rf_dump = {
.cb_read = rtw_debug_get_rf_dump,
};
+static struct rtw_debugfs_priv rtw_debug_priv_tx_pwr_tbl = {
+ .cb_read = rtw_debugfs_get_tx_pwr_tbl,
+};
+
static struct rtw_debugfs_priv rtw_debug_priv_write_reg = {
.cb_write = rtw_debugfs_set_write_reg,
};
@@ -610,6 +721,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
rtw_debugfs_add_r(bb_41);
}
rtw_debugfs_add_r(rf_dump);
+ rtw_debugfs_add_r(tx_pwr_tbl);
}
#endif /* CONFIG_RTW88_DEBUGFS */
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 628477971213..b082e2cc95f5 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -3,6 +3,7 @@
*/
#include "main.h"
+#include "coex.h"
#include "fw.h"
#include "tx.h"
#include "reg.h"
@@ -36,17 +37,51 @@ void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
len = skb->len - pkt_offset - 2;
- rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n",
- c2h->id, c2h->seq, len);
+ mutex_lock(&rtwdev->mutex);
switch (c2h->id) {
+ case C2H_BT_INFO:
+ rtw_coex_bt_info_notify(rtwdev, c2h->payload, len);
+ break;
+ case C2H_WLAN_INFO:
+ rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len);
+ break;
case C2H_HALMAC:
rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
break;
default:
break;
}
+
+ mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
+ struct sk_buff *skb)
+{
+ struct rtw_c2h_cmd *c2h;
+ u8 len;
+
+ c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
+ len = skb->len - pkt_offset - 2;
+ *((u32 *)skb->cb) = pkt_offset;
+
+ rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n",
+ c2h->id, c2h->seq, len);
+
+ switch (c2h->id) {
+ case C2H_BT_MP_INFO:
+ rtw_coex_info_response(rtwdev, skb);
+ break;
+ default:
+ /* pass offset for further operation */
+ *((u32 *)skb->cb) = pkt_offset;
+ skb_queue_tail(&rtwdev->c2h_queue, skb);
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
+ break;
+ }
}
+EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe);
static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
u8 *h2c)
@@ -181,6 +216,102 @@ void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para)
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
}
+void rtw_fw_query_bt_info(struct rtw_dev *rtwdev)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_INFO);
+
+ SET_QUERY_BT_INFO(h2c_pkt, true);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_CH_INFO);
+
+ SET_WL_CH_INFO_LINK(h2c_pkt, link);
+ SET_WL_CH_INFO_CHNL(h2c_pkt, ch);
+ SET_WL_CH_INFO_BW(h2c_pkt, bw);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev,
+ struct rtw_coex_info_req *req)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_MP_INFO);
+
+ SET_BT_MP_INFO_SEQ(h2c_pkt, req->seq);
+ SET_BT_MP_INFO_OP_CODE(h2c_pkt, req->op_code);
+ SET_BT_MP_INFO_PARA1(h2c_pkt, req->para1);
+ SET_BT_MP_INFO_PARA2(h2c_pkt, req->para2);
+ SET_BT_MP_INFO_PARA3(h2c_pkt, req->para3);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u8 index = 0 - bt_pwr_dec_lvl;
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_FORCE_BT_TX_POWER);
+
+ SET_BT_TX_POWER_INDEX(h2c_pkt, index);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_IGNORE_WLAN_ACTION);
+
+ SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, enable);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
+ u8 para1, u8 para2, u8 para3, u8 para4, u8 para5)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_COEX_TDMA_TYPE);
+
+ SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, para1);
+ SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, para2);
+ SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, para3);
+ SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, para4);
+ SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, para5);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BT_WIFI_CONTROL);
+
+ SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, op_code);
+
+ SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, *data);
+ SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, *(data + 1));
+ SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, *(data + 2));
+ SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, *(data + 3));
+ SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, *(data + 4));
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 703466393ecb..e95d85bd097f 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -35,7 +35,9 @@
enum rtw_c2h_cmd_id {
C2H_BT_INFO = 0x09,
+ C2H_BT_MP_INFO = 0x0b,
C2H_HW_FEATURE_REPORT = 0x19,
+ C2H_WLAN_INFO = 0x27,
C2H_HW_FEATURE_DUMP = 0xfd,
C2H_HALMAC = 0xff,
};
@@ -71,6 +73,14 @@ enum rtw_fw_rf_type {
FW_RF_MAX_TYPE = 0xF,
};
+struct rtw_coex_info_req {
+ u8 seq;
+ u8 op_code;
+ u8 para1;
+ u8 para2;
+ u8 para3;
+};
+
struct rtw_iqk_para {
u8 clear;
u8 segment_iqk;
@@ -139,6 +149,14 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_RA_INFO 0x40
#define H2C_CMD_RSSI_MONITOR 0x42
+#define H2C_CMD_COEX_TDMA_TYPE 0x60
+#define H2C_CMD_QUERY_BT_INFO 0x61
+#define H2C_CMD_FORCE_BT_TX_POWER 0x62
+#define H2C_CMD_IGNORE_WLAN_ACTION 0x63
+#define H2C_CMD_WL_CH_INFO 0x66
+#define H2C_CMD_QUERY_BT_MP_INFO 0x67
+#define H2C_CMD_BT_WIFI_CONTROL 0x69
+
#define SET_H2C_CMD_ID_CLASS(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(7, 0))
@@ -191,6 +209,50 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
#define SET_RA_INFO_RA_MASK3(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(31, 24))
+#define SET_QUERY_BT_INFO(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_WL_CH_INFO_LINK(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_WL_CH_INFO_CHNL(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_WL_CH_INFO_BW(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_BT_MP_INFO_SEQ(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 12))
+#define SET_BT_MP_INFO_OP_CODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_BT_MP_INFO_PARA1(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_BT_MP_INFO_PARA2(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0))
+#define SET_BT_MP_INFO_PARA3(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
+#define SET_BT_TX_POWER_INDEX(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0))
+#define SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
+#define SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0))
+#define SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
+#define SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
{
@@ -200,12 +262,23 @@ static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
return (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
}
+void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
+ struct sk_buff *skb);
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
void rtw_fw_send_general_info(struct rtw_dev *rtwdev);
void rtw_fw_send_phydm_info(struct rtw_dev *rtwdev);
void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para);
void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev);
+void rtw_fw_query_bt_info(struct rtw_dev *rtwdev);
+void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw);
+void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev,
+ struct rtw_coex_info_req *req);
+void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl);
+void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev,
+ u8 para1, u8 para2, u8 para3, u8 para4, u8 para5);
+void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data);
void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index abe6a148673b..fedea28c7a97 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -7,6 +7,7 @@
#include "tx.h"
#include "fw.h"
#include "mac.h"
+#include "coex.h"
#include "ps.h"
#include "reg.h"
#include "debug.h"
@@ -253,6 +254,7 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
enum rtw_net_type net_type;
if (conf->assoc) {
+ rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_FINISH);
net_type = RTW_NET_MGD_LINKED;
chip->ops->do_iqk(rtwdev);
@@ -262,6 +264,7 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_add_rsvd_page(rtwdev, RSVD_NULL, true);
rtw_fw_download_rsvd_page(rtwdev, vif);
rtw_send_rsvd_page_h2c(rtwdev);
+ rtw_coex_media_status_notify(rtwdev, conf->assoc);
} else {
net_type = RTW_NET_NO_LINK;
rtwvif->aid = 0;
@@ -469,6 +472,8 @@ static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
config |= PORT_SET_MAC_ADDR;
rtw_vif_port_config(rtwdev, rtwvif, config);
+ rtw_coex_scan_notify(rtwdev, COEX_SCAN_START);
+
rtw_flag_set(rtwdev, RTW_FLAG_DIG_DISABLE);
rtw_flag_set(rtwdev, RTW_FLAG_SCANNING);
@@ -491,6 +496,19 @@ static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
config |= PORT_SET_MAC_ADDR;
rtw_vif_port_config(rtwdev, rtwvif, config);
+ rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH);
+
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw_ops_mgd_prepare_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u16 duration)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_START);
mutex_unlock(&rtwdev->mutex);
}
@@ -509,5 +527,6 @@ const struct ieee80211_ops rtw_ops = {
.ampdu_action = rtw_ops_ampdu_action,
.sw_scan_start = rtw_ops_sw_scan_start,
.sw_scan_complete = rtw_ops_sw_scan_complete,
+ .mgd_prepare_tx = rtw_ops_mgd_prepare_tx,
};
EXPORT_SYMBOL(rtw_ops);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 5a2c06267d07..e5a6bc094808 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -8,6 +8,7 @@
#include "ps.h"
#include "sec.h"
#include "mac.h"
+#include "coex.h"
#include "phy.h"
#include "reg.h"
#include "efuse.h"
@@ -149,6 +150,7 @@ static void rtw_watch_dog_work(struct work_struct *work)
struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
watch_dog_work.work);
struct rtw_watch_dog_iter_data data = {};
+ bool busy_traffic = rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
if (!rtw_flag_check(rtwdev, RTW_FLAG_RUNNING))
return;
@@ -156,6 +158,14 @@ static void rtw_watch_dog_work(struct work_struct *work)
ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
RTW_WATCH_DOG_DELAY_TIME);
+ if (rtwdev->stats.tx_cnt > 100 || rtwdev->stats.rx_cnt > 100)
+ rtw_flag_set(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+ else
+ rtw_flag_clear(rtwdev, RTW_FLAG_BUSY_TRAFFIC);
+
+ if (busy_traffic != rtw_flag_check(rtwdev, RTW_FLAG_BUSY_TRAFFIC))
+ rtw_coex_wl_status_change_notify(rtwdev);
+
/* reset tx/rx statictics */
rtwdev->stats.tx_unicast = 0;
rtwdev->stats.rx_unicast = 0;
@@ -298,6 +308,15 @@ void rtw_set_channel(struct rtw_dev *rtwdev)
chip->ops->set_channel(rtwdev, center_chan, bandwidth, primary_chan_idx);
+ if (hal->current_band_type == RTW_BAND_5G) {
+ rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_5G);
+ } else {
+ if (rtw_flag_check(rtwdev, RTW_FLAG_SCANNING))
+ rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G);
+ else
+ rtw_coex_switchband_notify(rtwdev, COEX_SWITCH_TO_24G_NOFORSCAN);
+ }
+
rtw_phy_set_tx_power_level(rtwdev, center_chan);
}
@@ -641,6 +660,7 @@ static int rtw_power_on(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_fw_state *fw = &rtwdev->fw;
+ bool wifi_only;
int ret;
ret = rtw_hci_setup(rtwdev);
@@ -684,6 +704,10 @@ static int rtw_power_on(struct rtw_dev *rtwdev)
goto err_off;
}
+ wifi_only = !rtwdev->efuse.btcoex;
+ rtw_coex_power_on_setting(rtwdev);
+ rtw_coex_init_hw_config(rtwdev, wifi_only);
+
return 0;
err_off:
@@ -722,10 +746,15 @@ static void rtw_power_off(struct rtw_dev *rtwdev)
void rtw_core_stop(struct rtw_dev *rtwdev)
{
+ struct rtw_coex *coex = &rtwdev->coex;
+
rtw_flag_clear(rtwdev, RTW_FLAG_RUNNING);
rtw_flag_clear(rtwdev, RTW_FLAG_FW_RUNNING);
cancel_delayed_work_sync(&rtwdev->watch_dog_work);
+ cancel_delayed_work_sync(&coex->bt_relink_work);
+ cancel_delayed_work_sync(&coex->bt_reenable_work);
+ cancel_delayed_work_sync(&coex->defreeze_work);
rtw_power_off(rtwdev);
}
@@ -876,7 +905,6 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
struct rtw_efuse *efuse = &rtwdev->efuse;
- u32 wl_bt_pwr_ctrl;
int ret = 0;
switch (rtw_hci_type(rtwdev)) {
@@ -888,9 +916,6 @@ static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
return -EINVAL;
}
- wl_bt_pwr_ctrl = rtw_read32(rtwdev, REG_WL_BT_PWR_CTRL);
- if (wl_bt_pwr_ctrl & BIT_BT_FUNC_EN)
- rtwdev->efuse.btcoex = true;
hal->chip_version = rtw_read32(rtwdev, REG_SYS_CFG1);
hal->fab_version = BIT_GET_VENDOR_ID(hal->chip_version) >> 2;
hal->cut_version = BIT_GET_CHIP_VER(hal->chip_version);
@@ -1044,11 +1069,14 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
efuse->lna_type_5g = 0;
if (efuse->channel_plan == 0xff)
efuse->channel_plan = 0x7f;
+ if (efuse->rf_board_option == 0xff)
+ efuse->rf_board_option = 0;
if (efuse->bt_setting & BIT(0))
efuse->share_ant = true;
if (efuse->regd == 0xff)
efuse->regd = 0;
+ efuse->btcoex = (efuse->rf_board_option & 0xe0) == 0x20;
efuse->ext_pa_2g = efuse->pa_type_2g & BIT(4) ? 1 : 0;
efuse->ext_lna_2g = efuse->lna_type_2g & BIT(3) ? 1 : 0;
efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0;
@@ -1111,6 +1139,7 @@ EXPORT_SYMBOL(rtw_chip_info_setup);
int rtw_core_init(struct rtw_dev *rtwdev)
{
+ struct rtw_coex *coex = &rtwdev->coex;
int ret;
INIT_LIST_HEAD(&rtwdev->rsvd_page_list);
@@ -1120,8 +1149,12 @@ int rtw_core_init(struct rtw_dev *rtwdev)
INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
INIT_DELAYED_WORK(&rtwdev->lps_work, rtw_lps_work);
+ INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work);
+ INIT_DELAYED_WORK(&coex->bt_reenable_work, rtw_coex_bt_reenable_work);
+ INIT_DELAYED_WORK(&coex->defreeze_work, rtw_coex_defreeze_work);
INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
skb_queue_head_init(&rtwdev->c2h_queue);
+ skb_queue_head_init(&rtwdev->coex.queue);
skb_queue_head_init(&rtwdev->tx_report.queue);
spin_lock_init(&rtwdev->dm_lock);
@@ -1130,8 +1163,11 @@ int rtw_core_init(struct rtw_dev *rtwdev)
spin_lock_init(&rtwdev->tx_report.q_lock);
mutex_init(&rtwdev->mutex);
+ mutex_init(&rtwdev->coex.mutex);
mutex_init(&rtwdev->hal.tx_power_mutex);
+ init_waitqueue_head(&rtwdev->coex.wait);
+
rtwdev->sec.total_cam_num = 32;
rtwdev->hal.current_channel = 1;
set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map);
@@ -1174,6 +1210,7 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
}
mutex_destroy(&rtwdev->mutex);
+ mutex_destroy(&rtwdev->coex.mutex);
mutex_destroy(&rtwdev->hal.tx_power_mutex);
}
EXPORT_SYMBOL(rtw_core_deinit);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 8fa05751836b..9208b9ce5513 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -310,6 +310,7 @@ enum rtw_flags {
RTW_FLAG_INACTIVE_PS,
RTW_FLAG_LEISURE_PS,
RTW_FLAG_DIG_DISABLE,
+ RTW_FLAG_BUSY_TRAFFIC,
NUM_OF_RTW_FLAGS,
};
@@ -640,6 +641,16 @@ struct rtw_chip_ops {
void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable);
void (*false_alarm_statistics)(struct rtw_dev *rtwdev);
void (*do_iqk)(struct rtw_dev *rtwdev);
+
+ /* for coex */
+ void (*coex_set_init)(struct rtw_dev *rtwdev);
+ void (*coex_set_ant_switch)(struct rtw_dev *rtwdev,
+ u8 ctrl_type, u8 pos_type);
+ void (*coex_set_gnt_fix)(struct rtw_dev *rtwdev);
+ void (*coex_set_gnt_debug)(struct rtw_dev *rtwdev);
+ void (*coex_set_rfe_type)(struct rtw_dev *rtwdev);
+ void (*coex_set_wl_tx_power)(struct rtw_dev *rtwdev, u8 wl_pwr);
+ void (*coex_set_wl_rx_gain)(struct rtw_dev *rtwdev, bool low_gain);
};
#define RTW_PWR_POLLING_CNT 20000
@@ -852,6 +863,216 @@ struct rtw_chip_info {
const struct rtw_rfe_def *rfe_defs;
u32 rfe_defs_size;
+
+ /* coex paras */
+ u32 coex_para_ver;
+ u8 bt_desired_ver;
+ bool scbd_support;
+ bool new_scbd10_def; /* true: fix 2M(8822c) */
+ u8 pstdma_type; /* 0: LPSoff, 1:LPSon */
+ u8 bt_rssi_type;
+ u8 ant_isolation;
+ u8 rssi_tolerance;
+ u8 table_sant_num;
+ u8 table_nsant_num;
+ u8 tdma_sant_num;
+ u8 tdma_nsant_num;
+ u8 bt_afh_span_bw20;
+ u8 bt_afh_span_bw40;
+ u8 afh_5g_num;
+ u8 wl_rf_para_num;
+ const u8 *bt_rssi_step;
+ const u8 *wl_rssi_step;
+ const struct coex_table_para *table_nsant;
+ const struct coex_table_para *table_sant;
+ const struct coex_tdma_para *tdma_sant;
+ const struct coex_tdma_para *tdma_nsant;
+ const struct coex_rf_para *wl_rf_para_tx;
+ const struct coex_rf_para *wl_rf_para_rx;
+ const struct coex_5g_afh_map *afh_5g;
+};
+
+enum rtw_coex_bt_state_cnt {
+ COEX_CNT_BT_RETRY,
+ COEX_CNT_BT_REINIT,
+ COEX_CNT_BT_REENABLE,
+ COEX_CNT_BT_POPEVENT,
+ COEX_CNT_BT_SETUPLINK,
+ COEX_CNT_BT_IGNWLANACT,
+ COEX_CNT_BT_INQ,
+ COEX_CNT_BT_PAGE,
+ COEX_CNT_BT_ROLESWITCH,
+ COEX_CNT_BT_AFHUPDATE,
+ COEX_CNT_BT_INFOUPDATE,
+ COEX_CNT_BT_IQK,
+ COEX_CNT_BT_IQKFAIL,
+
+ COEX_CNT_BT_MAX
+};
+
+enum rtw_coex_wl_state_cnt {
+ COEX_CNT_WL_CONNPKT,
+ COEX_CNT_WL_COEXRUN,
+ COEX_CNT_WL_NOISY0,
+ COEX_CNT_WL_NOISY1,
+ COEX_CNT_WL_NOISY2,
+ COEX_CNT_WL_5MS_NOEXTEND,
+ COEX_CNT_WL_FW_NOTIFY,
+
+ COEX_CNT_WL_MAX
+};
+
+struct rtw_coex_rfe {
+ bool ant_switch_exist;
+ bool ant_switch_diversity;
+ bool ant_switch_with_bt;
+ u8 rfe_module_type;
+ u8 ant_switch_polarity;
+
+ /* true if WLG at BTG, else at WLAG */
+ bool wlg_at_btg;
+};
+
+struct rtw_coex_dm {
+ bool cur_ps_tdma_on;
+ bool cur_wl_rx_low_gain_en;
+
+ u8 reason;
+ u8 bt_rssi_state[4];
+ u8 wl_rssi_state[4];
+ u8 wl_ch_info[3];
+ u8 cur_ps_tdma;
+ u8 cur_table;
+ u8 ps_tdma_para[5];
+ u8 cur_bt_pwr_lvl;
+ u8 cur_bt_lna_lvl;
+ u8 cur_wl_pwr_lvl;
+ u8 bt_status;
+ u32 cur_ant_pos_type;
+ u32 cur_switch_status;
+ u32 setting_tdma;
+};
+
+#define COEX_BTINFO_SRC_WL_FW 0x0
+#define COEX_BTINFO_SRC_BT_RSP 0x1
+#define COEX_BTINFO_SRC_BT_ACT 0x2
+#define COEX_BTINFO_SRC_BT_IQK 0x3
+#define COEX_BTINFO_SRC_BT_SCBD 0x4
+#define COEX_BTINFO_SRC_MAX 0x5
+
+#define COEX_INFO_FTP BIT(7)
+#define COEX_INFO_A2DP BIT(6)
+#define COEX_INFO_HID BIT(5)
+#define COEX_INFO_SCO_BUSY BIT(4)
+#define COEX_INFO_ACL_BUSY BIT(3)
+#define COEX_INFO_INQ_PAGE BIT(2)
+#define COEX_INFO_SCO_ESCO BIT(1)
+#define COEX_INFO_CONNECTION BIT(0)
+#define COEX_BTINFO_LENGTH_MAX 10
+
+struct rtw_coex_stat {
+ bool bt_disabled;
+ bool bt_disabled_pre;
+ bool bt_link_exist;
+ bool bt_whck_test;
+ bool bt_inq_page;
+ bool bt_inq;
+ bool bt_page;
+ bool bt_ble_voice;
+ bool bt_ble_exist;
+ bool bt_hfp_exist;
+ bool bt_a2dp_exist;
+ bool bt_hid_exist;
+ bool bt_pan_exist; /* PAN or OPP */
+ bool bt_opp_exist; /* OPP only */
+ bool bt_acl_busy;
+ bool bt_fix_2M;
+ bool bt_setup_link;
+ bool bt_multi_link;
+ bool bt_a2dp_sink;
+ bool bt_a2dp_active;
+ bool bt_reenable;
+ bool bt_ble_scan_en;
+ bool bt_init_scan;
+ bool bt_slave;
+ bool bt_418_hid_exist;
+ bool bt_mailbox_reply;
+
+ bool wl_under_lps;
+ bool wl_under_ips;
+ bool wl_hi_pri_task1;
+ bool wl_hi_pri_task2;
+ bool wl_force_lps_ctrl;
+ bool wl_gl_busy;
+ bool wl_linkscan_proc;
+ bool wl_ps_state_fail;
+ bool wl_tx_limit_en;
+ bool wl_ampdu_limit_en;
+ bool wl_connected;
+ bool wl_slot_extend;
+ bool wl_cck_lock;
+ bool wl_cck_lock_pre;
+ bool wl_cck_lock_ever;
+
+ u32 bt_supported_version;
+ u32 bt_supported_feature;
+ s8 bt_rssi;
+ u8 kt_ver;
+ u8 gnt_workaround_state;
+ u8 tdma_timer_base;
+ u8 bt_profile_num;
+ u8 bt_info_c2h[COEX_BTINFO_SRC_MAX][COEX_BTINFO_LENGTH_MAX];
+ u8 bt_info_lb2;
+ u8 bt_info_lb3;
+ u8 bt_info_hb0;
+ u8 bt_info_hb1;
+ u8 bt_info_hb2;
+ u8 bt_info_hb3;
+ u8 bt_ble_scan_type;
+ u8 bt_hid_pair_num;
+ u8 bt_hid_slot;
+ u8 bt_a2dp_bitpool;
+ u8 bt_iqk_state;
+
+ u8 wl_noisy_level;
+ u8 wl_fw_dbg_info[10];
+ u8 wl_fw_dbg_info_pre[10];
+ u8 wl_coex_mode;
+ u8 ampdu_max_time;
+ u8 wl_tput_dir;
+
+ u16 score_board;
+ u16 retry_limit;
+
+ /* counters to record bt states */
+ u32 cnt_bt[COEX_CNT_BT_MAX];
+
+ /* counters to record wifi states */
+ u32 cnt_wl[COEX_CNT_WL_MAX];
+
+ u32 darfrc;
+ u32 darfrch;
+};
+
+struct rtw_coex {
+ /* protects coex info request section */
+ struct mutex mutex;
+ struct sk_buff_head queue;
+ wait_queue_head_t wait;
+
+ bool under_5g;
+ bool stop_dm;
+ bool freeze;
+ bool freerun;
+ bool wl_rf_off;
+
+ struct rtw_coex_stat stat;
+ struct rtw_coex_dm dm;
+ struct rtw_coex_rfe rfe;
+
+ struct delayed_work bt_relink_work;
+ struct delayed_work bt_reenable_work;
+ struct delayed_work defreeze_work;
};
#define DACK_MSBK_BACKUP_NUM 0xf
@@ -861,6 +1082,16 @@ struct rtw_dm_info {
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
u32 total_fa_cnt;
+
+ u32 cck_ok_cnt;
+ u32 cck_err_cnt;
+ u32 ofdm_ok_cnt;
+ u32 ofdm_err_cnt;
+ u32 ht_ok_cnt;
+ u32 ht_err_cnt;
+ u32 vht_ok_cnt;
+ u32 vht_err_cnt;
+
u8 min_rssi;
u8 pre_min_rssi;
u16 fa_history[4];
@@ -888,6 +1119,7 @@ struct rtw_efuse {
u8 addr[ETH_ALEN];
u8 channel_plan;
u8 country_code[2];
+ u8 rf_board_option;
u8 rfe_option;
u8 thermal_meter;
u8 crystal_cap;
@@ -1047,6 +1279,7 @@ struct rtw_dev {
struct rtw_regulatory regd;
struct rtw_dm_info dm_info;
+ struct rtw_coex coex;
/* ensures exclusive access from mac80211 callbacks */
struct mutex mutex;
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 353871c27779..00ef229552d5 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -8,6 +8,7 @@
#include "pci.h"
#include "tx.h"
#include "rx.h"
+#include "fw.h"
#include "debug.h"
static u32 rtw_pci_tx_queue_idx_addr[] = {
@@ -206,6 +207,23 @@ static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
return 0;
}
+static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
+ struct rtw_pci_rx_ring *rx_ring,
+ u32 idx, u32 desc_sz)
+{
+ struct device *dev = rtwdev->dev;
+ struct rtw_pci_rx_buffer_desc *buf_desc;
+ int buf_sz = RTK_PCI_RX_BUF_SIZE;
+
+ dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
+
+ buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
+ idx * desc_sz);
+ memset(buf_desc, 0, sizeof(*buf_desc));
+ buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
+ buf_desc->dma = cpu_to_le32(dma);
+}
+
static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
struct rtw_pci_rx_ring *rx_ring,
u8 desc_size, u32 len)
@@ -765,6 +783,7 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
u32 pkt_offset;
u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
u32 buf_desc_sz = chip->rx_buf_desc_sz;
+ u32 new_len;
u8 *rx_desc;
dma_addr_t dma;
@@ -783,8 +802,8 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
rtw_pci_dma_check(rtwdev, ring, cur_rp);
skb = ring->buf[cur_rp];
dma = *((dma_addr_t *)skb->cb);
- pci_unmap_single(rtwpci->pdev, dma, RTK_PCI_RX_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
rx_desc = skb->data;
chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
@@ -792,40 +811,32 @@ static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
pkt_stat.shift;
- if (pkt_stat.is_c2h) {
- /* keep rx_desc, halmac needs it */
- skb_put(skb, pkt_stat.pkt_len + pkt_offset);
+ /* allocate a new skb for this frame,
+ * discard the frame if none available
+ */
+ new_len = pkt_stat.pkt_len + pkt_offset;
+ new = dev_alloc_skb(new_len);
+ if (WARN_ONCE(!new, "rx routine starvation\n"))
+ goto next_rp;
- /* pass offset for further operation */
- *((u32 *)skb->cb) = pkt_offset;
- skb_queue_tail(&rtwdev->c2h_queue, skb);
- ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
+ /* put the DMA data including rx_desc from phy to new skb */
+ skb_put_data(new, skb->data, new_len);
+
+ if (pkt_stat.is_c2h) {
+ rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
} else {
- /* remove rx_desc, maybe use skb_pull? */
- skb_put(skb, pkt_stat.pkt_len);
- skb_reserve(skb, pkt_offset);
-
- /* alloc a smaller skb to mac80211 */
- new = dev_alloc_skb(pkt_stat.pkt_len);
- if (!new) {
- new = skb;
- } else {
- skb_put_data(new, skb->data, skb->len);
- dev_kfree_skb_any(skb);
- }
- /* TODO: merge into rx.c */
- rtw_rx_stats(rtwdev, pkt_stat.vif, skb);
+ /* remove rx_desc */
+ skb_pull(new, pkt_offset);
+
+ rtw_rx_stats(rtwdev, pkt_stat.vif, new);
memcpy(new->cb, &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(rtwdev->hw, new);
}
- /* skb delivered to mac80211, alloc a new one in rx ring */
- new = dev_alloc_skb(RTK_PCI_RX_BUF_SIZE);
- if (WARN(!new, "rx routine starvation\n"))
- return;
-
- ring->buf[cur_rp] = new;
- rtw_pci_reset_rx_desc(rtwdev, new, ring, cur_rp, buf_desc_sz);
+next_rp:
+ /* new skb delivered to mac80211, re-enable original skb DMA */
+ rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
+ buf_desc_sz);
/* host read next element in ring */
if (++cur_rp >= ring->r.len)
@@ -977,7 +988,6 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
u16 cut;
u16 value;
u16 offset;
- u16 ip_sel;
int i;
cut = BIT(0) << rtwdev->hal.cut_version;
@@ -990,7 +1000,6 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
break;
offset = para->offset;
value = para->value;
- ip_sel = para->ip_sel;
if (para->ip_sel == RTW_IP_SEL_PHY)
rtw_mdio_write(rtwdev, offset, value, true);
else
@@ -1005,7 +1014,6 @@ static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
break;
offset = para->offset;
value = para->value;
- ip_sel = para->ip_sel;
if (para->ip_sel == RTW_IP_SEL_PHY)
rtw_mdio_write(rtwdev, offset, value, false);
else
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 4ec8dcf17361..528ee1ee2fd2 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -29,15 +29,6 @@ struct phy_pg_cfg_pair {
u32 data;
};
-struct txpwr_lmt_cfg_pair {
- u8 regd;
- u8 band;
- u8 bw;
- u8 rs;
- u8 ch;
- s8 txpwr_lmt;
-};
-
static const u32 db_invert_table[12][8] = {
{10, 13, 16, 20,
25, 32, 40, 50},
@@ -1267,10 +1258,8 @@ static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev)
void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev,
const struct rtw_table *tbl)
{
- const struct txpwr_lmt_cfg_pair *p = tbl->data;
- const struct txpwr_lmt_cfg_pair *end = p + tbl->size / 6;
-
- BUILD_BUG_ON(sizeof(struct txpwr_lmt_cfg_pair) != sizeof(u8) * 6);
+ const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data;
+ const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size;
for (; p < end; p++) {
rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band,
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index 7c8eb732b13c..cc87b157f23e 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -45,6 +45,15 @@ void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel);
void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal);
void rtw_phy_tx_power_limit_config(struct rtw_hal *hal);
+struct rtw_txpwr_lmt_cfg_pair {
+ u8 regd;
+ u8 band;
+ u8 bw;
+ u8 rs;
+ u8 ch;
+ s8 txpwr_lmt;
+};
+
#define RTW_DECL_TABLE_PHY_COND_CORE(name, cfg, path) \
const struct rtw_table name ## _tbl = { \
.data = name, \
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index 607bfa4317d9..9ecd14feb76b 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -6,6 +6,7 @@
#include "fw.h"
#include "ps.h"
#include "mac.h"
+#include "coex.h"
#include "debug.h"
static int rtw_ips_pwr_up(struct rtw_dev *rtwdev)
@@ -26,6 +27,8 @@ int rtw_enter_ips(struct rtw_dev *rtwdev)
{
rtw_flag_set(rtwdev, RTW_FLAG_INACTIVE_PS);
+ rtw_coex_ips_notify(rtwdev, COEX_IPS_ENTER);
+
rtw_core_stop(rtwdev);
return 0;
@@ -53,6 +56,8 @@ int rtw_leave_ips(struct rtw_dev *rtwdev)
rtw_iterate_vifs_atomic(rtwdev, rtw_restore_port_cfg_iter, rtwdev);
+ rtw_coex_ips_notify(rtwdev, COEX_IPS_LEAVE);
+
return 0;
}
@@ -67,6 +72,8 @@ static void rtw_leave_lps_core(struct rtw_dev *rtwdev)
rtw_fw_set_pwr_mode(rtwdev);
rtw_flag_clear(rtwdev, RTW_FLAG_LEISURE_PS);
+
+ rtw_coex_lps_notify(rtwdev, COEX_LPS_DISABLE);
}
static void rtw_enter_lps_core(struct rtw_dev *rtwdev)
@@ -78,6 +85,8 @@ static void rtw_enter_lps_core(struct rtw_dev *rtwdev)
conf->rlbm = 1;
conf->smart_ps = 2;
+ rtw_coex_lps_notify(rtwdev, COEX_LPS_ENABLE);
+
rtw_fw_set_pwr_mode(rtwdev);
rtw_flag_set(rtwdev, RTW_FLAG_LEISURE_PS);
}
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index e2628f05812c..0bd0717baa8b 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -37,17 +37,28 @@
#define REG_GPIO_MUXCFG 0x0040
#define BIT_FSPI_EN BIT(19)
+#define BIT_BT_AOD_GPIO3 BIT(9)
+#define BIT_BT_PTA_EN BIT(5)
#define BIT_WLRFE_4_5_EN BIT(2)
#define REG_LED_CFG 0x004C
#define BIT_LNAON_SEL_EN BIT(26)
#define BIT_PAPE_SEL_EN BIT(25)
+#define BIT_DPDT_WL_SEL BIT(24)
+#define BIT_DPDT_SEL_EN BIT(23)
#define REG_PAD_CTRL1 0x0064
#define BIT_PAPE_WLBT_SEL BIT(29)
#define BIT_LNAON_WLBT_SEL BIT(28)
+#define BIT_BTGP_JTAG_EN BIT(24)
+#define BIT_BTGP_SPI_EN BIT(20)
+#define BIT_LED1DIS BIT(15)
+#define BIT_SW_DPDT_SEL_DATA BIT(0)
#define REG_WL_BT_PWR_CTRL 0x0068
#define BIT_BT_FUNC_EN BIT(18)
#define BIT_BT_DIG_CLK_EN BIT(8)
+#define REG_SYS_SDIO_CTRL 0x0070
+#define BIT_DBG_GNT_WL_BT BIT(27)
+#define BIT_LTE_MUX_CTRL_PATH BIT(26)
#define REG_HCI_OPT_CTRL 0x0074
#define REG_MCUFW_CTRL 0x0080
@@ -70,6 +81,8 @@
#define FW_READY_MASK 0xffff
#define REG_WLRF1 0x00EC
+#define REG_WIFI_BT_INFO 0x00AA
+#define BIT_BT_INT_EN BIT(15)
#define REG_SYS_CFG1 0x00F0
#define BIT_RTL_ID BIT(23)
#define BIT_RF_TYPE_ID BIT(27)
@@ -187,6 +200,7 @@
#define REG_LIFETIME_EN 0x0426
#define BIT_BA_PARSER_EN BIT(5)
#define REG_SPEC_SIFS 0x0428
+#define REG_RETRY_LIMIT 0x042a
#define REG_DARFRC 0x0430
#define REG_DARFRCH 0x0434
#define REG_RARFRCH 0x043C
@@ -199,18 +213,25 @@
#define REG_AMPDU_MAX_TIME_V1 0x0455
#define REG_BCNQ1_BDNY_V1 0x0456
#define REG_TX_HANG_CTRL 0x045E
+#define BIT_EN_GNT_BT_AWAKE BIT(3)
#define BIT_EN_EOF_V1 BIT(2)
#define REG_DATA_SC 0x0483
#define REG_ARFR4 0x049C
+#define BIT_WL_RFK BIT(0)
#define REG_ARFRH4 0x04A0
#define REG_ARFR5 0x04A4
#define REG_ARFRH5 0x04A8
#define REG_SW_AMPDU_BURST_MODE_CTRL 0x04BC
#define BIT_PRE_TX_CMD BIT(6)
+#define REG_QUEUE_CTRL 0x04C6
+#define BIT_PTA_WL_TX_EN BIT(4)
+#define BIT_PTA_EDCCA_EN BIT(5)
#define REG_PROT_MODE_CTRL 0x04C8
#define REG_BAR_MODE_CTRL 0x04CC
#define REG_PRECNT_CTRL 0x04E5
+#define BIT_BTCCA_CTRL (BIT(0) | BIT(1))
#define BIT_EN_PRECNT BIT(11)
+#define REG_DUMMY_PAGE4_V1 0x04FC
#define REG_EDCA_VO_PARAM 0x0500
#define REG_EDCA_VI_PARAM 0x0504
@@ -297,11 +318,34 @@
#define REG_RXFLTMAP0 0x06A0
#define REG_RXFLTMAP1 0x06A2
#define REG_RXFLTMAP2 0x06A4
+#define REG_BT_COEX_TABLE0 0x06C0
+#define REG_BT_COEX_TABLE1 0x06C4
+#define REG_BT_COEX_BRK_TABLE 0x06C8
+#define REG_BT_COEX_TABLE_H 0x06CC
+#define REG_BT_COEX_TABLE_H1 0x06CD
+#define REG_BT_COEX_TABLE_H2 0x06CE
+#define REG_BT_COEX_TABLE_H3 0x06CF
#define REG_BBPSF_CTRL 0x06DC
+#define REG_BT_COEX_V2 0x0763
+#define BIT_GNT_BT_POLARITY BIT(4)
+#define BIT_LTE_COEX_EN BIT(7)
+#define REG_BT_STAT_CTRL 0x0778
+#define REG_BT_TDMA_TIME 0x0790
#define REG_WMAC_OPTION_FUNCTION 0x07D0
#define REG_WMAC_OPTION_FUNCTION_1 0x07D4
+#define REG_RX_GAIN_EN 0x081c
+
+#define REG_RFE_CTRL_E 0x0974
+
+#define REG_RFE_CTRL8 0x0cb4
+#define BIT_MASK_RFE_SEL89 GENMASK(7, 0)
+#define REG_RFE_INV8 0x0cbd
+#define BIT_MASK_RFE_INV89 GENMASK(1, 0)
+#define REG_RFE_INV16 0x0cbe
+#define BIT_RFE_BUF_EN BIT(3)
+
#define REG_ANAPAR_XTAL_0 0x1040
#define REG_CPU_DMEM_CON 0x1080
#define BIT_WL_PLATFORM_RST BIT(16)
@@ -407,15 +451,33 @@
#define LTECOEX_WRITE_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1
#define LTECOEX_READ_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1
+#define REG_IGN_GNT_BT1 0x1860
+
+#define REG_RFESEL_CTRL 0x1990
+
+#define REG_NOMASK_TXBT 0x1ca7
+#define REG_ANAPAR 0x1c30
+#define BIT_ANAPAR_BTPS BIT(22)
+#define REG_RSTB_SEL 0x1c38
+
+#define REG_IGN_GNTBT4 0x4160
+
+#define RF_MODOPT 0x01
#define RF_DTXLOK 0x08
#define RF_CFGCH 0x18
+#define RF_RCK 0x1d
#define RF_LUTWA 0x33
#define RF_LUTWD1 0x3e
#define RF_LUTWD0 0x3f
#define RF_XTALX2 0xb8
#define RF_MALSEL 0xbe
+#define RF_RCKD 0xde
#define RF_LUTDBG 0xdf
#define RF_LUTWE2 0xee
#define RF_LUTWE 0xef
+#define LTE_COEX_CTRL 0x38
+#define LTE_WL_TRX_CTRL 0xa0
+#define LTE_BT_TRX_CTRL 0xa4
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 1172f6c0605b..568033afb024 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -3,6 +3,7 @@
*/
#include "main.h"
+#include "coex.h"
#include "fw.h"
#include "tx.h"
#include "rx.h"
@@ -31,6 +32,7 @@ static int rtw8822b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
map = (struct rtw8822b_efuse *)log_map;
efuse->rfe_option = map->rfe_option;
+ efuse->rf_board_option = map->rf_board_option;
efuse->crystal_cap = map->xtal_k;
efuse->pa_type_2g = map->pa_type;
efuse->pa_type_5g = map->pa_type;
@@ -104,24 +106,6 @@ static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev)
rtw_phy_init(rtwdev);
rtw8822b_phy_rfe_init(rtwdev);
-
- /* wifi path controller */
- rtw_write32_mask(rtwdev, 0x70, 0x4000000, 1);
- /* BB control */
- rtw_write32_mask(rtwdev, 0x4c, 0x01800000, 0x2);
- /* antenna mux switch */
- rtw_write8(rtwdev, 0x974, 0xff);
- rtw_write32_mask(rtwdev, 0x1990, 0x300, 0);
- rtw_write32_mask(rtwdev, 0xcbc, 0x80000, 0x0);
- /* SW control */
- rtw_write8(rtwdev, 0xcb4, 0x77);
- /* switch to WL side controller and gnt_wl gnt_bt debug signal */
- rtw_write32_mask(rtwdev, 0x70, 0xff000000, 0x0e);
- /* gnt_wl = 1, gnt_bt = 0 */
- rtw_write32(rtwdev, 0x1704, 0x7700);
- rtw_write32(rtwdev, 0x1700, 0xc00f0038);
- /* switch for WL 2G */
- rtw_write8(rtwdev, 0xcbd, 0x2);
}
#define WLAN_SLOT_TIME 0x09
@@ -960,6 +944,7 @@ static void rtw8822b_false_alarm_statistics(struct rtw_dev *rtwdev)
u32 cck_enable;
u32 cck_fa_cnt;
u32 ofdm_fa_cnt;
+ u32 crc32_cnt;
cck_enable = rtw_read32(rtwdev, 0x808) & BIT(28);
cck_fa_cnt = rtw_read16(rtwdev, 0xa5c);
@@ -970,6 +955,19 @@ static void rtw8822b_false_alarm_statistics(struct rtw_dev *rtwdev)
dm_info->total_fa_cnt = ofdm_fa_cnt;
dm_info->total_fa_cnt += cck_enable ? cck_fa_cnt : 0;
+ crc32_cnt = rtw_read32(rtwdev, 0xf04);
+ dm_info->cck_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->cck_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ crc32_cnt = rtw_read32(rtwdev, 0xf14);
+ dm_info->ofdm_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->ofdm_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ crc32_cnt = rtw_read32(rtwdev, 0xf10);
+ dm_info->ht_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->ht_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ crc32_cnt = rtw_read32(rtwdev, 0xf0c);
+ dm_info->vht_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->vht_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+
rtw_write32_set(rtwdev, 0x9a4, BIT(17));
rtw_write32_clr(rtwdev, 0x9a4, BIT(17));
rtw_write32_clr(rtwdev, 0xa2c, BIT(15));
@@ -997,12 +995,260 @@ static void rtw8822b_do_iqk(struct rtw_dev *rtwdev)
rtw_write_rf(rtwdev, RF_PATH_A, RF_DTXLOK, RFREG_MASK, 0x0);
reload = !!rtw_read32_mask(rtwdev, REG_IQKFAILMSK, BIT(16));
- iqk_fail_mask = rtw_read32_mask(rtwdev, REG_IQKFAILMSK, GENMASK(0, 7));
+ iqk_fail_mask = rtw_read32_mask(rtwdev, REG_IQKFAILMSK, GENMASK(7, 0));
rtw_dbg(rtwdev, RTW_DBG_PHY,
"iqk counter=%d reload=%d do_iqk_cnt=%d n_iqk_fail(mask)=0x%02x\n",
counter, reload, ++do_iqk_cnt, iqk_fail_mask);
}
+static void rtw8822b_coex_cfg_init(struct rtw_dev *rtwdev)
+{
+ /* enable TBTT nterrupt */
+ rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
+
+ /* BT report packet sample rate */
+ /* 0x790[5:0]=0x5 */
+ rtw_write8_set(rtwdev, REG_BT_TDMA_TIME, 0x05);
+
+ /* enable BT counter statistics */
+ rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1);
+
+ /* enable PTA (3-wire function form BT side) */
+ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN);
+ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_AOD_GPIO3);
+
+ /* enable PTA (tx/rx signal form WiFi side) */
+ rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN);
+ /* wl tx signal to PTA not case EDCCA */
+ rtw_write8_clr(rtwdev, REG_QUEUE_CTRL, BIT_PTA_EDCCA_EN);
+ /* GNT_BT=1 while select both */
+ rtw_write8_set(rtwdev, REG_BT_COEX_V2, BIT_GNT_BT_POLARITY);
+}
+
+static void rtw8822b_coex_cfg_ant_switch(struct rtw_dev *rtwdev,
+ u8 ctrl_type, u8 pos_type)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+ bool polarity_inverse;
+ u8 regval = 0;
+
+ if (((ctrl_type << 8) + pos_type) == coex_dm->cur_switch_status)
+ return;
+
+ coex_dm->cur_switch_status = (ctrl_type << 8) + pos_type;
+
+ if (coex_rfe->ant_switch_diversity &&
+ ctrl_type == COEX_SWITCH_CTRL_BY_BBSW)
+ ctrl_type = COEX_SWITCH_CTRL_BY_ANTDIV;
+
+ polarity_inverse = (coex_rfe->ant_switch_polarity == 1);
+
+ switch (ctrl_type) {
+ default:
+ case COEX_SWITCH_CTRL_BY_BBSW:
+ /* 0x4c[23] = 0 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x0);
+ /* 0x4c[24] = 1 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 3, BIT_DPDT_WL_SEL >> 24, 0x1);
+ /* BB SW, DPDT use RFE_ctrl8 and RFE_ctrl9 as ctrl pin */
+ rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89, 0x77);
+
+ if (pos_type == COEX_SWITCH_TO_WLG_BT) {
+ if (coex_rfe->rfe_module_type != 0x4 &&
+ coex_rfe->rfe_module_type != 0x2)
+ regval = 0x3;
+ else
+ regval = (!polarity_inverse ? 0x2 : 0x1);
+ } else if (pos_type == COEX_SWITCH_TO_WLG) {
+ regval = (!polarity_inverse ? 0x2 : 0x1);
+ } else {
+ regval = (!polarity_inverse ? 0x1 : 0x2);
+ }
+
+ rtw_write8_mask(rtwdev, REG_RFE_INV8, BIT_MASK_RFE_INV89, regval);
+ break;
+ case COEX_SWITCH_CTRL_BY_PTA:
+ /* 0x4c[23] = 0 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x0);
+ /* 0x4c[24] = 1 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 3, BIT_DPDT_WL_SEL >> 24, 0x1);
+ /* PTA, DPDT use RFE_ctrl8 and RFE_ctrl9 as ctrl pin */
+ rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89, 0x66);
+
+ regval = (!polarity_inverse ? 0x2 : 0x1);
+ rtw_write8_mask(rtwdev, REG_RFE_INV8, BIT_MASK_RFE_INV89, regval);
+ break;
+ case COEX_SWITCH_CTRL_BY_ANTDIV:
+ /* 0x4c[23] = 0 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x0);
+ /* 0x4c[24] = 1 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 3, BIT_DPDT_WL_SEL >> 24, 0x1);
+ rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89, 0x88);
+ break;
+ case COEX_SWITCH_CTRL_BY_MAC:
+ /* 0x4c[23] = 1 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x1);
+
+ regval = (!polarity_inverse ? 0x0 : 0x1);
+ rtw_write8_mask(rtwdev, REG_PAD_CTRL1, BIT_SW_DPDT_SEL_DATA, regval);
+ break;
+ case COEX_SWITCH_CTRL_BY_FW:
+ /* 0x4c[23] = 0 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x0);
+ /* 0x4c[24] = 1 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 3, BIT_DPDT_WL_SEL >> 24, 0x1);
+ break;
+ case COEX_SWITCH_CTRL_BY_BT:
+ /* 0x4c[23] = 0 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 2, BIT_DPDT_SEL_EN >> 16, 0x0);
+ /* 0x4c[24] = 0 */
+ rtw_write8_mask(rtwdev, REG_LED_CFG + 3, BIT_DPDT_WL_SEL >> 24, 0x0);
+ break;
+ }
+}
+
+static void rtw8822b_coex_cfg_gnt_fix(struct rtw_dev *rtwdev)
+{
+}
+
+static void rtw8822b_coex_cfg_gnt_debug(struct rtw_dev *rtwdev)
+{
+ rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 2, BIT_BTGP_SPI_EN >> 16, 0);
+ rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 3, BIT_BTGP_JTAG_EN >> 24, 0);
+ rtw_write8_mask(rtwdev, REG_GPIO_MUXCFG + 2, BIT_FSPI_EN >> 16, 0);
+ rtw_write8_mask(rtwdev, REG_PAD_CTRL1 + 1, BIT_LED1DIS >> 8, 0);
+ rtw_write8_mask(rtwdev, REG_SYS_SDIO_CTRL + 3, BIT_DBG_GNT_WL_BT >> 24, 0);
+}
+
+static void rtw8822b_coex_cfg_rfe_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ bool is_ext_fem = false;
+
+ coex_rfe->rfe_module_type = rtwdev->efuse.rfe_option;
+ coex_rfe->ant_switch_polarity = 0;
+ coex_rfe->ant_switch_diversity = false;
+ if (coex_rfe->rfe_module_type == 0x12 ||
+ coex_rfe->rfe_module_type == 0x15 ||
+ coex_rfe->rfe_module_type == 0x16)
+ coex_rfe->ant_switch_exist = false;
+ else
+ coex_rfe->ant_switch_exist = true;
+
+ if (coex_rfe->rfe_module_type == 2 ||
+ coex_rfe->rfe_module_type == 4) {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_EXTFEM, true);
+ is_ext_fem = true;
+ } else {
+ rtw_coex_write_scbd(rtwdev, COEX_SCBD_EXTFEM, false);
+ }
+
+ coex_rfe->wlg_at_btg = false;
+
+ if (efuse->share_ant &&
+ coex_rfe->ant_switch_exist && !is_ext_fem)
+ coex_rfe->ant_switch_with_bt = true;
+ else
+ coex_rfe->ant_switch_with_bt = false;
+
+ /* Ext switch buffer mux */
+ rtw_write8(rtwdev, REG_RFE_CTRL_E, 0xff);
+ rtw_write8_mask(rtwdev, REG_RFESEL_CTRL + 1, 0x3, 0x0);
+ rtw_write8_mask(rtwdev, REG_RFE_INV16, BIT_RFE_BUF_EN, 0x0);
+
+ /* Disable LTE Coex Function in WiFi side */
+ rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, BIT_LTE_COEX_EN, 0);
+
+ /* BTC_CTT_WL_VS_LTE */
+ rtw_coex_write_indirect_reg(rtwdev, LTE_WL_TRX_CTRL, MASKLWORD, 0xffff);
+
+ /* BTC_CTT_BT_VS_LTE */
+ rtw_coex_write_indirect_reg(rtwdev, LTE_BT_TRX_CTRL, MASKLWORD, 0xffff);
+}
+
+static void rtw8822b_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ static const u16 reg_addr[] = {0xc58, 0xe58};
+ static const u8 wl_tx_power[] = {0xd8, 0xd4, 0xd0, 0xcc, 0xc8};
+ u8 i, pwr;
+
+ if (wl_pwr == coex_dm->cur_wl_pwr_lvl)
+ return;
+
+ coex_dm->cur_wl_pwr_lvl = wl_pwr;
+
+ if (coex_dm->cur_wl_pwr_lvl >= ARRAY_SIZE(wl_tx_power))
+ coex_dm->cur_wl_pwr_lvl = ARRAY_SIZE(wl_tx_power) - 1;
+
+ pwr = wl_tx_power[coex_dm->cur_wl_pwr_lvl];
+
+ for (i = 0; i < ARRAY_SIZE(reg_addr); i++)
+ rtw_write8_mask(rtwdev, reg_addr[i], 0xff, pwr);
+}
+
+static void rtw8822b_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+ /* WL Rx Low gain on */
+ static const u32 wl_rx_low_gain_on[] = {
+ 0xff000003, 0xbd120003, 0xbe100003, 0xbf080003, 0xbf060003,
+ 0xbf050003, 0xbc140003, 0xbb160003, 0xba180003, 0xb91a0003,
+ 0xb81c0003, 0xb71e0003, 0xb4200003, 0xb5220003, 0xb4240003,
+ 0xb3260003, 0xb2280003, 0xb12a0003, 0xb02c0003, 0xaf2e0003,
+ 0xae300003, 0xad320003, 0xac340003, 0xab360003, 0x8d380003,
+ 0x8c3a0003, 0x8b3c0003, 0x8a3e0003, 0x6e400003, 0x6d420003,
+ 0x6c440003, 0x6b460003, 0x6a480003, 0x694a0003, 0x684c0003,
+ 0x674e0003, 0x66500003, 0x65520003, 0x64540003, 0x64560003,
+ 0x007e0403
+ };
+
+ /* WL Rx Low gain off */
+ static const u32 wl_rx_low_gain_off[] = {
+ 0xff000003, 0xf4120003, 0xf5100003, 0xf60e0003, 0xf70c0003,
+ 0xf80a0003, 0xf3140003, 0xf2160003, 0xf1180003, 0xf01a0003,
+ 0xef1c0003, 0xee1e0003, 0xed200003, 0xec220003, 0xeb240003,
+ 0xea260003, 0xe9280003, 0xe82a0003, 0xe72c0003, 0xe62e0003,
+ 0xe5300003, 0xc8320003, 0xc7340003, 0xc6360003, 0xc5380003,
+ 0xc43a0003, 0xc33c0003, 0xc23e0003, 0xc1400003, 0xc0420003,
+ 0xa5440003, 0xa4460003, 0xa3480003, 0xa24a0003, 0xa14c0003,
+ 0x834e0003, 0x82500003, 0x81520003, 0x80540003, 0x65560003,
+ 0x007e0403
+ };
+ u8 i;
+
+ if (low_gain == coex_dm->cur_wl_rx_low_gain_en)
+ return;
+
+ coex_dm->cur_wl_rx_low_gain_en = low_gain;
+
+ if (coex_dm->cur_wl_rx_low_gain_en) {
+ for (i = 0; i < ARRAY_SIZE(wl_rx_low_gain_on); i++)
+ rtw_write32(rtwdev, REG_RX_GAIN_EN, wl_rx_low_gain_on[i]);
+
+ /* set Rx filter corner RCK offset */
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_RCKD, 0x2, 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_RCK, 0x3f, 0x3f);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_RCKD, 0x2, 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_RCK, 0x3f, 0x3f);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(wl_rx_low_gain_off); i++)
+ rtw_write32(rtwdev, 0x81c, wl_rx_low_gain_off[i]);
+
+ /* set Rx filter corner RCK offset */
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_RCK, 0x3f, 0x4);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_RCKD, 0x2, 0x0);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_RCK, 0x3f, 0x4);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_RCKD, 0x2, 0x0);
+ }
+}
+
static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -1549,8 +1795,160 @@ static struct rtw_chip_ops rtw8822b_ops = {
.cfg_ldo25 = rtw8822b_cfg_ldo25,
.false_alarm_statistics = rtw8822b_false_alarm_statistics,
.do_iqk = rtw8822b_do_iqk,
+
+ .coex_set_init = rtw8822b_coex_cfg_init,
+ .coex_set_ant_switch = rtw8822b_coex_cfg_ant_switch,
+ .coex_set_gnt_fix = rtw8822b_coex_cfg_gnt_fix,
+ .coex_set_gnt_debug = rtw8822b_coex_cfg_gnt_debug,
+ .coex_set_rfe_type = rtw8822b_coex_cfg_rfe_type,
+ .coex_set_wl_tx_power = rtw8822b_coex_cfg_wl_tx_power,
+ .coex_set_wl_rx_gain = rtw8822b_coex_cfg_wl_rx_gain,
+};
+
+/* Shared-Antenna Coex Table */
+static const struct coex_table_para table_sant_8822b[] = {
+ {0xffffffff, 0xffffffff}, /* case-0 */
+ {0x55555555, 0x55555555},
+ {0x66555555, 0x66555555},
+ {0xaaaaaaaa, 0xaaaaaaaa},
+ {0x5a5a5a5a, 0x5a5a5a5a},
+ {0xfafafafa, 0xfafafafa}, /* case-5 */
+ {0x6a5a6a5a, 0xaaaaaaaa},
+ {0x6a5a56aa, 0x6a5a56aa},
+ {0x6a5a5a5a, 0x6a5a5a5a},
+ {0x66555555, 0x5a5a5a5a},
+ {0x66555555, 0x6a5a5a5a}, /* case-10 */
+ {0x66555555, 0xfafafafa},
+ {0x66555555, 0x6a5a5aaa},
+ {0x66555555, 0x5aaa5aaa},
+ {0x66555555, 0xaaaa5aaa},
+ {0x66555555, 0xaaaaaaaa}, /* case-15 */
+ {0xffff55ff, 0xfafafafa},
+ {0xffff55ff, 0x6afa5afa},
+ {0xaaffffaa, 0xfafafafa},
+ {0xaa5555aa, 0x5a5a5a5a},
+ {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */
+ {0xaa5555aa, 0xaaaaaaaa},
+ {0xffffffff, 0x5a5a5a5a},
+ {0xffffffff, 0x6a5a5a5a},
+ {0xffffffff, 0x55555555},
+ {0xffffffff, 0x6a5a5aaa}, /* case-25 */
+ {0x55555555, 0x5a5a5a5a},
+ {0x55555555, 0xaaaaaaaa},
+ {0x55555555, 0x6a5a6a5a},
+ {0x66556655, 0x66556655}
+};
+
+/* Non-Shared-Antenna Coex Table */
+static const struct coex_table_para table_nsant_8822b[] = {
+ {0xffffffff, 0xffffffff}, /* case-100 */
+ {0x55555555, 0x55555555},
+ {0x66555555, 0x66555555},
+ {0xaaaaaaaa, 0xaaaaaaaa},
+ {0x5a5a5a5a, 0x5a5a5a5a},
+ {0xfafafafa, 0xfafafafa}, /* case-105 */
+ {0x5afa5afa, 0x5afa5afa},
+ {0x55555555, 0xfafafafa},
+ {0x66555555, 0xfafafafa},
+ {0x66555555, 0x5a5a5a5a},
+ {0x66555555, 0x6a5a5a5a}, /* case-110 */
+ {0x66555555, 0xaaaaaaaa},
+ {0xffff55ff, 0xfafafafa},
+ {0xffff55ff, 0x5afa5afa},
+ {0xffff55ff, 0xaaaaaaaa},
+ {0xaaffffaa, 0xfafafafa}, /* case-115 */
+ {0xaaffffaa, 0x5afa5afa},
+ {0xaaffffaa, 0xaaaaaaaa},
+ {0xffffffff, 0xfafafafa},
+ {0xffffffff, 0x5afa5afa},
+ {0xffffffff, 0xaaaaaaaa}, /* case-120 */
+ {0x55ff55ff, 0x5afa5afa},
+ {0x55ff55ff, 0xaaaaaaaa},
+ {0x55ff55ff, 0x55ff55ff}
};
+/* Shared-Antenna TDMA */
+static const struct coex_tdma_para tdma_sant_8822b[] = {
+ { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */
+ { {0x61, 0x45, 0x03, 0x11, 0x11} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x11} },
+ { {0x61, 0x30, 0x03, 0x11, 0x11} },
+ { {0x61, 0x20, 0x03, 0x11, 0x11} },
+ { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-5 */
+ { {0x61, 0x45, 0x03, 0x11, 0x10} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x10} },
+ { {0x61, 0x30, 0x03, 0x11, 0x10} },
+ { {0x61, 0x20, 0x03, 0x11, 0x10} },
+ { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */
+ { {0x61, 0x08, 0x03, 0x11, 0x14} },
+ { {0x61, 0x08, 0x03, 0x10, 0x14} },
+ { {0x51, 0x08, 0x03, 0x10, 0x54} },
+ { {0x51, 0x08, 0x03, 0x10, 0x55} },
+ { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */
+ { {0x51, 0x45, 0x03, 0x10, 0x10} },
+ { {0x51, 0x3a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x30, 0x03, 0x10, 0x50} },
+ { {0x51, 0x20, 0x03, 0x10, 0x50} },
+ { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-20 */
+ { {0x51, 0x4a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x0c, 0x03, 0x10, 0x54} },
+ { {0x55, 0x08, 0x03, 0x10, 0x54} },
+ { {0x65, 0x10, 0x03, 0x11, 0x11} },
+ { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */
+ { {0x51, 0x08, 0x03, 0x10, 0x50} }
+};
+
+/* Non-Shared-Antenna TDMA */
+static const struct coex_tdma_para tdma_nsant_8822b[] = {
+ { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-100 */
+ { {0x61, 0x45, 0x03, 0x11, 0x11} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x11} },
+ { {0x61, 0x30, 0x03, 0x11, 0x11} },
+ { {0x61, 0x20, 0x03, 0x11, 0x11} },
+ { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-105 */
+ { {0x61, 0x45, 0x03, 0x11, 0x10} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x10} },
+ { {0x61, 0x30, 0x03, 0x11, 0x10} },
+ { {0x61, 0x20, 0x03, 0x11, 0x10} },
+ { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-110 */
+ { {0x61, 0x08, 0x03, 0x11, 0x14} },
+ { {0x61, 0x08, 0x03, 0x10, 0x14} },
+ { {0x51, 0x08, 0x03, 0x10, 0x54} },
+ { {0x51, 0x08, 0x03, 0x10, 0x55} },
+ { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-115 */
+ { {0x51, 0x45, 0x03, 0x10, 0x50} },
+ { {0x51, 0x3a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x30, 0x03, 0x10, 0x50} },
+ { {0x51, 0x20, 0x03, 0x10, 0x50} },
+ { {0x51, 0x10, 0x03, 0x10, 0x50} } /* case-120 */
+};
+
+/* rssi in percentage % (dbm = % - 100) */
+static const u8 wl_rssi_step_8822b[] = {60, 50, 44, 30};
+static const u8 bt_rssi_step_8822b[] = {30, 30, 30, 30};
+static const struct coex_5g_afh_map afh_5g_8822b[] = { {0, 0, 0} };
+
+/* wl_tx_dec_power, bt_tx_dec_power, wl_rx_gain, bt_rx_lna_constrain */
+static const struct coex_rf_para rf_para_tx_8822b[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 16, false, 7}, /* for WL-CPT */
+ {4, 0, true, 1},
+ {3, 6, true, 1},
+ {2, 9, true, 1},
+ {1, 13, true, 1}
+};
+
+static const struct coex_rf_para rf_para_rx_8822b[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 16, false, 7}, /* for WL-CPT */
+ {4, 0, true, 1},
+ {3, 6, true, 1},
+ {2, 9, true, 1},
+ {1, 13, true, 1}
+};
+
+static_assert(ARRAY_SIZE(rf_para_tx_8822b) == ARRAY_SIZE(rf_para_rx_8822b));
+
struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
@@ -1588,6 +1986,32 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.rf_tbl = {&rtw8822b_rf_a_tbl, &rtw8822b_rf_b_tbl},
.rfe_defs = rtw8822b_rfe_defs,
.rfe_defs_size = ARRAY_SIZE(rtw8822b_rfe_defs),
+
+ .coex_para_ver = 0x19062706,
+ .bt_desired_ver = 0x6,
+ .scbd_support = true,
+ .new_scbd10_def = false,
+ .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
+ .bt_rssi_type = COEX_BTRSSI_RATIO,
+ .ant_isolation = 15,
+ .rssi_tolerance = 2,
+ .wl_rssi_step = wl_rssi_step_8822b,
+ .bt_rssi_step = bt_rssi_step_8822b,
+ .table_sant_num = ARRAY_SIZE(table_sant_8822b),
+ .table_sant = table_sant_8822b,
+ .table_nsant_num = ARRAY_SIZE(table_nsant_8822b),
+ .table_nsant = table_nsant_8822b,
+ .tdma_sant_num = ARRAY_SIZE(tdma_sant_8822b),
+ .tdma_sant = tdma_sant_8822b,
+ .tdma_nsant_num = ARRAY_SIZE(tdma_nsant_8822b),
+ .tdma_nsant = tdma_nsant_8822b,
+ .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8822b),
+ .wl_rf_para_tx = rf_para_tx_8822b,
+ .wl_rf_para_rx = rf_para_rx_8822b,
+ .bt_afh_span_bw20 = 0x24,
+ .bt_afh_span_bw40 = 0x36,
+ .afh_5g_num = ARRAY_SIZE(afh_5g_8822b),
+ .afh_5g = afh_5g_8822b,
};
EXPORT_SYMBOL(rtw8822b_hw_spec);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
index 2d2dfb495ce1..465f58411cab 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
@@ -20382,402 +20382,1182 @@ static const u32 rtw8822b_rf_b[] = {
RTW_DECL_TABLE_RF_RADIO(rtw8822b_rf_b, B);
-static const u8 rtw8822b_txpwr_lmt_type2[] = {
- 0, 0, 0, 0, 1, 32, 2, 0, 0, 0, 1, 28, 1, 0, 0, 0, 1, 30,
- 0, 0, 0, 0, 2, 32, 2, 0, 0, 0, 2, 28, 1, 0, 0, 0, 2, 30,
- 0, 0, 0, 0, 3, 32, 2, 0, 0, 0, 3, 28, 1, 0, 0, 0, 3, 30,
- 0, 0, 0, 0, 4, 32, 2, 0, 0, 0, 4, 28, 1, 0, 0, 0, 4, 30,
- 0, 0, 0, 0, 5, 32, 2, 0, 0, 0, 5, 28, 1, 0, 0, 0, 5, 30,
- 0, 0, 0, 0, 6, 32, 2, 0, 0, 0, 6, 28, 1, 0, 0, 0, 6, 30,
- 0, 0, 0, 0, 7, 32, 2, 0, 0, 0, 7, 28, 1, 0, 0, 0, 7, 30,
- 0, 0, 0, 0, 8, 32, 2, 0, 0, 0, 8, 28, 1, 0, 0, 0, 8, 30,
- 0, 0, 0, 0, 9, 32, 2, 0, 0, 0, 9, 28, 1, 0, 0, 0, 9, 30,
- 0, 0, 0, 0, 10, 32, 2, 0, 0, 0, 10, 28, 1, 0, 0, 0, 10, 30,
- 0, 0, 0, 0, 11, 32, 2, 0, 0, 0, 11, 28, 1, 0, 0, 0, 11, 30,
- 0, 0, 0, 0, 12, 26, 2, 0, 0, 0, 12, 28, 1, 0, 0, 0, 12, 30,
- 0, 0, 0, 0, 13, 20, 2, 0, 0, 0, 13, 28, 1, 0, 0, 0, 13, 28,
- 0, 0, 0, 0, 14, 63, 2, 0, 0, 0, 14, 63, 1, 0, 0, 0, 14, 32,
- 0, 0, 0, 1, 1, 26, 2, 0, 0, 1, 1, 30, 1, 0, 0, 1, 1, 34,
- 0, 0, 0, 1, 2, 30, 2, 0, 0, 1, 2, 30, 1, 0, 0, 1, 2, 34,
- 0, 0, 0, 1, 3, 32, 2, 0, 0, 1, 3, 30, 1, 0, 0, 1, 3, 34,
- 0, 0, 0, 1, 4, 34, 2, 0, 0, 1, 4, 30, 1, 0, 0, 1, 4, 34,
- 0, 0, 0, 1, 5, 34, 2, 0, 0, 1, 5, 30, 1, 0, 0, 1, 5, 34,
- 0, 0, 0, 1, 6, 34, 2, 0, 0, 1, 6, 30, 1, 0, 0, 1, 6, 34,
- 0, 0, 0, 1, 7, 34, 2, 0, 0, 1, 7, 30, 1, 0, 0, 1, 7, 34,
- 0, 0, 0, 1, 8, 34, 2, 0, 0, 1, 8, 30, 1, 0, 0, 1, 8, 34,
- 0, 0, 0, 1, 9, 32, 2, 0, 0, 1, 9, 30, 1, 0, 0, 1, 9, 34,
- 0, 0, 0, 1, 10, 30, 2, 0, 0, 1, 10, 30, 1, 0, 0, 1, 10, 34,
- 0, 0, 0, 1, 11, 28, 2, 0, 0, 1, 11, 30, 1, 0, 0, 1, 11, 34,
- 0, 0, 0, 1, 12, 22, 2, 0, 0, 1, 12, 30, 1, 0, 0, 1, 12, 34,
- 0, 0, 0, 1, 13, 14, 2, 0, 0, 1, 13, 30, 1, 0, 0, 1, 13, 34,
- 0, 0, 0, 1, 14, 63, 2, 0, 0, 1, 14, 63, 1, 0, 0, 1, 14, 63,
- 0, 0, 0, 2, 1, 26, 2, 0, 0, 2, 1, 30, 1, 0, 0, 2, 1, 34,
- 0, 0, 0, 2, 2, 30, 2, 0, 0, 2, 2, 30, 1, 0, 0, 2, 2, 34,
- 0, 0, 0, 2, 3, 32, 2, 0, 0, 2, 3, 30, 1, 0, 0, 2, 3, 34,
- 0, 0, 0, 2, 4, 34, 2, 0, 0, 2, 4, 30, 1, 0, 0, 2, 4, 34,
- 0, 0, 0, 2, 5, 34, 2, 0, 0, 2, 5, 30, 1, 0, 0, 2, 5, 34,
- 0, 0, 0, 2, 6, 34, 2, 0, 0, 2, 6, 30, 1, 0, 0, 2, 6, 34,
- 0, 0, 0, 2, 7, 34, 2, 0, 0, 2, 7, 30, 1, 0, 0, 2, 7, 34,
- 0, 0, 0, 2, 8, 34, 2, 0, 0, 2, 8, 30, 1, 0, 0, 2, 8, 34,
- 0, 0, 0, 2, 9, 32, 2, 0, 0, 2, 9, 30, 1, 0, 0, 2, 9, 34,
- 0, 0, 0, 2, 10, 30, 2, 0, 0, 2, 10, 30, 1, 0, 0, 2, 10, 34,
- 0, 0, 0, 2, 11, 26, 2, 0, 0, 2, 11, 30, 1, 0, 0, 2, 11, 34,
- 0, 0, 0, 2, 12, 20, 2, 0, 0, 2, 12, 30, 1, 0, 0, 2, 12, 34,
- 0, 0, 0, 2, 13, 14, 2, 0, 0, 2, 13, 30, 1, 0, 0, 2, 13, 34,
- 0, 0, 0, 2, 14, 63, 2, 0, 0, 2, 14, 63, 1, 0, 0, 2, 14, 63,
- 0, 0, 0, 3, 1, 26, 2, 0, 0, 3, 1, 18, 1, 0, 0, 3, 1, 30,
- 0, 0, 0, 3, 2, 28, 2, 0, 0, 3, 2, 18, 1, 0, 0, 3, 2, 30,
- 0, 0, 0, 3, 3, 30, 2, 0, 0, 3, 3, 18, 1, 0, 0, 3, 3, 30,
- 0, 0, 0, 3, 4, 30, 2, 0, 0, 3, 4, 18, 1, 0, 0, 3, 4, 30,
- 0, 0, 0, 3, 5, 32, 2, 0, 0, 3, 5, 18, 1, 0, 0, 3, 5, 30,
- 0, 0, 0, 3, 6, 32, 2, 0, 0, 3, 6, 18, 1, 0, 0, 3, 6, 30,
- 0, 0, 0, 3, 7, 32, 2, 0, 0, 3, 7, 18, 1, 0, 0, 3, 7, 30,
- 0, 0, 0, 3, 8, 30, 2, 0, 0, 3, 8, 18, 1, 0, 0, 3, 8, 30,
- 0, 0, 0, 3, 9, 30, 2, 0, 0, 3, 9, 18, 1, 0, 0, 3, 9, 30,
- 0, 0, 0, 3, 10, 28, 2, 0, 0, 3, 10, 18, 1, 0, 0, 3, 10, 30,
- 0, 0, 0, 3, 11, 26, 2, 0, 0, 3, 11, 18, 1, 0, 0, 3, 11, 30,
- 0, 0, 0, 3, 12, 20, 2, 0, 0, 3, 12, 18, 1, 0, 0, 3, 12, 30,
- 0, 0, 0, 3, 13, 14, 2, 0, 0, 3, 13, 18, 1, 0, 0, 3, 13, 30,
- 0, 0, 0, 3, 14, 63, 2, 0, 0, 3, 14, 63, 1, 0, 0, 3, 14, 63,
- 0, 0, 1, 2, 1, 63, 2, 0, 1, 2, 1, 63, 1, 0, 1, 2, 1, 63,
- 0, 0, 1, 2, 2, 63, 2, 0, 1, 2, 2, 63, 1, 0, 1, 2, 2, 63,
- 0, 0, 1, 2, 3, 26, 2, 0, 1, 2, 3, 30, 1, 0, 1, 2, 3, 34,
- 0, 0, 1, 2, 4, 26, 2, 0, 1, 2, 4, 30, 1, 0, 1, 2, 4, 34,
- 0, 0, 1, 2, 5, 30, 2, 0, 1, 2, 5, 30, 1, 0, 1, 2, 5, 34,
- 0, 0, 1, 2, 6, 32, 2, 0, 1, 2, 6, 30, 1, 0, 1, 2, 6, 34,
- 0, 0, 1, 2, 7, 30, 2, 0, 1, 2, 7, 30, 1, 0, 1, 2, 7, 34,
- 0, 0, 1, 2, 8, 26, 2, 0, 1, 2, 8, 30, 1, 0, 1, 2, 8, 34,
- 0, 0, 1, 2, 9, 26, 2, 0, 1, 2, 9, 30, 1, 0, 1, 2, 9, 34,
- 0, 0, 1, 2, 10, 20, 2, 0, 1, 2, 10, 30, 1, 0, 1, 2, 10, 34,
- 0, 0, 1, 2, 11, 14, 2, 0, 1, 2, 11, 30, 1, 0, 1, 2, 11, 34,
- 0, 0, 1, 2, 12, 63, 2, 0, 1, 2, 12, 63, 1, 0, 1, 2, 12, 63,
- 0, 0, 1, 2, 13, 63, 2, 0, 1, 2, 13, 63, 1, 0, 1, 2, 13, 63,
- 0, 0, 1, 2, 14, 63, 2, 0, 1, 2, 14, 63, 1, 0, 1, 2, 14, 63,
- 0, 0, 1, 3, 1, 63, 2, 0, 1, 3, 1, 63, 1, 0, 1, 3, 1, 63,
- 0, 0, 1, 3, 2, 63, 2, 0, 1, 3, 2, 63, 1, 0, 1, 3, 2, 63,
- 0, 0, 1, 3, 3, 24, 2, 0, 1, 3, 3, 18, 1, 0, 1, 3, 3, 30,
- 0, 0, 1, 3, 4, 24, 2, 0, 1, 3, 4, 18, 1, 0, 1, 3, 4, 30,
- 0, 0, 1, 3, 5, 26, 2, 0, 1, 3, 5, 18, 1, 0, 1, 3, 5, 30,
- 0, 0, 1, 3, 6, 28, 2, 0, 1, 3, 6, 18, 1, 0, 1, 3, 6, 30,
- 0, 0, 1, 3, 7, 26, 2, 0, 1, 3, 7, 18, 1, 0, 1, 3, 7, 30,
- 0, 0, 1, 3, 8, 26, 2, 0, 1, 3, 8, 18, 1, 0, 1, 3, 8, 30,
- 0, 0, 1, 3, 9, 26, 2, 0, 1, 3, 9, 18, 1, 0, 1, 3, 9, 30,
- 0, 0, 1, 3, 10, 20, 2, 0, 1, 3, 10, 18, 1, 0, 1, 3, 10, 30,
- 0, 0, 1, 3, 11, 14, 2, 0, 1, 3, 11, 18, 1, 0, 1, 3, 11, 30,
- 0, 0, 1, 3, 12, 63, 2, 0, 1, 3, 12, 63, 1, 0, 1, 3, 12, 63,
- 0, 0, 1, 3, 13, 63, 2, 0, 1, 3, 13, 63, 1, 0, 1, 3, 13, 63,
- 0, 0, 1, 3, 14, 63, 2, 0, 1, 3, 14, 63, 1, 0, 1, 3, 14, 63,
- 0, 1, 0, 1, 36, 36, 2, 1, 0, 1, 36, 32, 1, 1, 0, 1, 36, 30,
- 0, 1, 0, 1, 40, 38, 2, 1, 0, 1, 40, 32, 1, 1, 0, 1, 40, 30,
- 0, 1, 0, 1, 44, 38, 2, 1, 0, 1, 44, 32, 1, 1, 0, 1, 44, 30,
- 0, 1, 0, 1, 48, 38, 2, 1, 0, 1, 48, 32, 1, 1, 0, 1, 48, 30,
- 0, 1, 0, 1, 52, 38, 2, 1, 0, 1, 52, 32, 1, 1, 0, 1, 52, 28,
- 0, 1, 0, 1, 56, 38, 2, 1, 0, 1, 56, 32, 1, 1, 0, 1, 56, 28,
- 0, 1, 0, 1, 60, 38, 2, 1, 0, 1, 60, 32, 1, 1, 0, 1, 60, 28,
- 0, 1, 0, 1, 64, 34, 2, 1, 0, 1, 64, 32, 1, 1, 0, 1, 64, 28,
- 0, 1, 0, 1, 100, 32, 2, 1, 0, 1, 100, 32, 1, 1, 0, 1, 100, 32,
- 0, 1, 0, 1, 104, 38, 2, 1, 0, 1, 104, 32, 1, 1, 0, 1, 104, 32,
- 0, 1, 0, 1, 108, 38, 2, 1, 0, 1, 108, 32, 1, 1, 0, 1, 108, 32,
- 0, 1, 0, 1, 112, 38, 2, 1, 0, 1, 112, 32, 1, 1, 0, 1, 112, 32,
- 0, 1, 0, 1, 116, 38, 2, 1, 0, 1, 116, 32, 1, 1, 0, 1, 116, 32,
- 0, 1, 0, 1, 120, 38, 2, 1, 0, 1, 120, 32, 1, 1, 0, 1, 120, 32,
- 0, 1, 0, 1, 124, 38, 2, 1, 0, 1, 124, 32, 1, 1, 0, 1, 124, 32,
- 0, 1, 0, 1, 128, 38, 2, 1, 0, 1, 128, 32, 1, 1, 0, 1, 128, 32,
- 0, 1, 0, 1, 132, 38, 2, 1, 0, 1, 132, 32, 1, 1, 0, 1, 132, 32,
- 0, 1, 0, 1, 136, 38, 2, 1, 0, 1, 136, 32, 1, 1, 0, 1, 136, 32,
- 0, 1, 0, 1, 140, 34, 2, 1, 0, 1, 140, 32, 1, 1, 0, 1, 140, 32,
- 0, 1, 0, 1, 144, 34, 2, 1, 0, 1, 144, 32, 1, 1, 0, 1, 144, 63,
- 0, 1, 0, 1, 149, 38, 2, 1, 0, 1, 149, 63, 1, 1, 0, 1, 149, 63,
- 0, 1, 0, 1, 153, 38, 2, 1, 0, 1, 153, 63, 1, 1, 0, 1, 153, 63,
- 0, 1, 0, 1, 157, 38, 2, 1, 0, 1, 157, 63, 1, 1, 0, 1, 157, 63,
- 0, 1, 0, 1, 161, 38, 2, 1, 0, 1, 161, 63, 1, 1, 0, 1, 161, 63,
- 0, 1, 0, 1, 165, 38, 2, 1, 0, 1, 165, 63, 1, 1, 0, 1, 165, 63,
- 0, 1, 0, 2, 36, 36, 2, 1, 0, 2, 36, 32, 1, 1, 0, 2, 36, 28,
- 0, 1, 0, 2, 40, 38, 2, 1, 0, 2, 40, 32, 1, 1, 0, 2, 40, 28,
- 0, 1, 0, 2, 44, 38, 2, 1, 0, 2, 44, 32, 1, 1, 0, 2, 44, 28,
- 0, 1, 0, 2, 48, 38, 2, 1, 0, 2, 48, 32, 1, 1, 0, 2, 48, 28,
- 0, 1, 0, 2, 52, 38, 2, 1, 0, 2, 52, 32, 1, 1, 0, 2, 52, 28,
- 0, 1, 0, 2, 56, 38, 2, 1, 0, 2, 56, 32, 1, 1, 0, 2, 56, 28,
- 0, 1, 0, 2, 60, 38, 2, 1, 0, 2, 60, 32, 1, 1, 0, 2, 60, 28,
- 0, 1, 0, 2, 64, 34, 2, 1, 0, 2, 64, 32, 1, 1, 0, 2, 64, 28,
- 0, 1, 0, 2, 100, 32, 2, 1, 0, 2, 100, 32, 1, 1, 0, 2, 100, 32,
- 0, 1, 0, 2, 104, 38, 2, 1, 0, 2, 104, 32, 1, 1, 0, 2, 104, 32,
- 0, 1, 0, 2, 108, 38, 2, 1, 0, 2, 108, 32, 1, 1, 0, 2, 108, 32,
- 0, 1, 0, 2, 112, 38, 2, 1, 0, 2, 112, 32, 1, 1, 0, 2, 112, 32,
- 0, 1, 0, 2, 116, 38, 2, 1, 0, 2, 116, 32, 1, 1, 0, 2, 116, 32,
- 0, 1, 0, 2, 120, 38, 2, 1, 0, 2, 120, 32, 1, 1, 0, 2, 120, 32,
- 0, 1, 0, 2, 124, 38, 2, 1, 0, 2, 124, 32, 1, 1, 0, 2, 124, 32,
- 0, 1, 0, 2, 128, 38, 2, 1, 0, 2, 128, 32, 1, 1, 0, 2, 128, 32,
- 0, 1, 0, 2, 132, 38, 2, 1, 0, 2, 132, 32, 1, 1, 0, 2, 132, 32,
- 0, 1, 0, 2, 136, 38, 2, 1, 0, 2, 136, 32, 1, 1, 0, 2, 136, 32,
- 0, 1, 0, 2, 140, 32, 2, 1, 0, 2, 140, 32, 1, 1, 0, 2, 140, 32,
- 0, 1, 0, 2, 144, 26, 2, 1, 0, 2, 144, 63, 1, 1, 0, 2, 144, 63,
- 0, 1, 0, 2, 149, 38, 2, 1, 0, 2, 149, 63, 1, 1, 0, 2, 149, 63,
- 0, 1, 0, 2, 153, 38, 2, 1, 0, 2, 153, 63, 1, 1, 0, 2, 153, 63,
- 0, 1, 0, 2, 157, 38, 2, 1, 0, 2, 157, 63, 1, 1, 0, 2, 157, 63,
- 0, 1, 0, 2, 161, 38, 2, 1, 0, 2, 161, 63, 1, 1, 0, 2, 161, 63,
- 0, 1, 0, 2, 165, 38, 2, 1, 0, 2, 165, 63, 1, 1, 0, 2, 165, 63,
- 0, 1, 0, 3, 36, 34, 2, 1, 0, 3, 36, 20, 1, 1, 0, 3, 36, 22,
- 0, 1, 0, 3, 40, 36, 2, 1, 0, 3, 40, 20, 1, 1, 0, 3, 40, 22,
- 0, 1, 0, 3, 44, 36, 2, 1, 0, 3, 44, 20, 1, 1, 0, 3, 44, 22,
- 0, 1, 0, 3, 48, 36, 2, 1, 0, 3, 48, 20, 1, 1, 0, 3, 48, 22,
- 0, 1, 0, 3, 52, 36, 2, 1, 0, 3, 52, 20, 1, 1, 0, 3, 52, 22,
- 0, 1, 0, 3, 56, 36, 2, 1, 0, 3, 56, 20, 1, 1, 0, 3, 56, 22,
- 0, 1, 0, 3, 60, 36, 2, 1, 0, 3, 60, 20, 1, 1, 0, 3, 60, 22,
- 0, 1, 0, 3, 64, 34, 2, 1, 0, 3, 64, 20, 1, 1, 0, 3, 64, 22,
- 0, 1, 0, 3, 100, 32, 2, 1, 0, 3, 100, 20, 1, 1, 0, 3, 100, 30,
- 0, 1, 0, 3, 104, 36, 2, 1, 0, 3, 104, 20, 1, 1, 0, 3, 104, 30,
- 0, 1, 0, 3, 108, 38, 2, 1, 0, 3, 108, 20, 1, 1, 0, 3, 108, 30,
- 0, 1, 0, 3, 112, 38, 2, 1, 0, 3, 112, 20, 1, 1, 0, 3, 112, 30,
- 0, 1, 0, 3, 116, 38, 2, 1, 0, 3, 116, 20, 1, 1, 0, 3, 116, 30,
- 0, 1, 0, 3, 120, 38, 2, 1, 0, 3, 120, 20, 1, 1, 0, 3, 120, 30,
- 0, 1, 0, 3, 124, 38, 2, 1, 0, 3, 124, 20, 1, 1, 0, 3, 124, 30,
- 0, 1, 0, 3, 128, 38, 2, 1, 0, 3, 128, 20, 1, 1, 0, 3, 128, 30,
- 0, 1, 0, 3, 132, 38, 2, 1, 0, 3, 132, 20, 1, 1, 0, 3, 132, 30,
- 0, 1, 0, 3, 136, 36, 2, 1, 0, 3, 136, 20, 1, 1, 0, 3, 136, 30,
- 0, 1, 0, 3, 140, 32, 2, 1, 0, 3, 140, 20, 1, 1, 0, 3, 140, 30,
- 0, 1, 0, 3, 144, 26, 2, 1, 0, 3, 144, 63, 1, 1, 0, 3, 144, 63,
- 0, 1, 0, 3, 149, 38, 2, 1, 0, 3, 149, 63, 1, 1, 0, 3, 149, 63,
- 0, 1, 0, 3, 153, 38, 2, 1, 0, 3, 153, 63, 1, 1, 0, 3, 153, 63,
- 0, 1, 0, 3, 157, 38, 2, 1, 0, 3, 157, 63, 1, 1, 0, 3, 157, 63,
- 0, 1, 0, 3, 161, 38, 2, 1, 0, 3, 161, 63, 1, 1, 0, 3, 161, 63,
- 0, 1, 0, 3, 165, 38, 2, 1, 0, 3, 165, 63, 1, 1, 0, 3, 165, 63,
- 0, 1, 1, 2, 38, 28, 2, 1, 1, 2, 38, 30, 1, 1, 1, 2, 38, 30,
- 0, 1, 1, 2, 46, 36, 2, 1, 1, 2, 46, 30, 1, 1, 1, 2, 46, 30,
- 0, 1, 1, 2, 54, 36, 2, 1, 1, 2, 54, 30, 1, 1, 1, 2, 54, 30,
- 0, 1, 1, 2, 62, 30, 2, 1, 1, 2, 62, 30, 1, 1, 1, 2, 62, 30,
- 0, 1, 1, 2, 102, 30, 2, 1, 1, 2, 102, 30, 1, 1, 1, 2, 102, 30,
- 0, 1, 1, 2, 110, 36, 2, 1, 1, 2, 110, 30, 1, 1, 1, 2, 110, 30,
- 0, 1, 1, 2, 118, 36, 2, 1, 1, 2, 118, 30, 1, 1, 1, 2, 118, 30,
- 0, 1, 1, 2, 126, 36, 2, 1, 1, 2, 126, 30, 1, 1, 1, 2, 126, 30,
- 0, 1, 1, 2, 134, 36, 2, 1, 1, 2, 134, 30, 1, 1, 1, 2, 134, 30,
- 0, 1, 1, 2, 142, 30, 2, 1, 1, 2, 142, 63, 1, 1, 1, 2, 142, 63,
- 0, 1, 1, 2, 151, 36, 2, 1, 1, 2, 151, 63, 1, 1, 1, 2, 151, 63,
- 0, 1, 1, 2, 159, 36, 2, 1, 1, 2, 159, 63, 1, 1, 1, 2, 159, 63,
- 0, 1, 1, 3, 38, 26, 2, 1, 1, 3, 38, 20, 1, 1, 1, 3, 38, 22,
- 0, 1, 1, 3, 46, 36, 2, 1, 1, 3, 46, 20, 1, 1, 1, 3, 46, 22,
- 0, 1, 1, 3, 54, 36, 2, 1, 1, 3, 54, 20, 1, 1, 1, 3, 54, 22,
- 0, 1, 1, 3, 62, 28, 2, 1, 1, 3, 62, 20, 1, 1, 1, 3, 62, 22,
- 0, 1, 1, 3, 102, 28, 2, 1, 1, 3, 102, 20, 1, 1, 1, 3, 102, 30,
- 0, 1, 1, 3, 110, 36, 2, 1, 1, 3, 110, 20, 1, 1, 1, 3, 110, 30,
- 0, 1, 1, 3, 118, 36, 2, 1, 1, 3, 118, 20, 1, 1, 1, 3, 118, 30,
- 0, 1, 1, 3, 126, 36, 2, 1, 1, 3, 126, 20, 1, 1, 1, 3, 126, 30,
- 0, 1, 1, 3, 134, 36, 2, 1, 1, 3, 134, 20, 1, 1, 1, 3, 134, 30,
- 0, 1, 1, 3, 142, 30, 2, 1, 1, 3, 142, 63, 1, 1, 1, 3, 142, 63,
- 0, 1, 1, 3, 151, 36, 2, 1, 1, 3, 151, 63, 1, 1, 1, 3, 151, 63,
- 0, 1, 1, 3, 159, 36, 2, 1, 1, 3, 159, 63, 1, 1, 1, 3, 159, 63,
- 0, 1, 2, 4, 42, 26, 2, 1, 2, 4, 42, 30, 1, 1, 2, 4, 42, 28,
- 0, 1, 2, 4, 58, 26, 2, 1, 2, 4, 58, 30, 1, 1, 2, 4, 58, 28,
- 0, 1, 2, 4, 106, 26, 2, 1, 2, 4, 106, 30, 1, 1, 2, 4, 106, 30,
- 0, 1, 2, 4, 122, 36, 2, 1, 2, 4, 122, 30, 1, 1, 2, 4, 122, 30,
- 0, 1, 2, 4, 138, 36, 2, 1, 2, 4, 138, 63, 1, 1, 2, 4, 138, 63,
- 0, 1, 2, 4, 155, 36, 2, 1, 2, 4, 155, 63, 1, 1, 2, 4, 155, 63,
- 0, 1, 2, 5, 42, 24, 2, 1, 2, 5, 42, 20, 1, 1, 2, 5, 42, 22,
- 0, 1, 2, 5, 58, 24, 2, 1, 2, 5, 58, 20, 1, 1, 2, 5, 58, 22,
- 0, 1, 2, 5, 106, 26, 2, 1, 2, 5, 106, 20, 1, 1, 2, 5, 106, 30,
- 0, 1, 2, 5, 122, 36, 2, 1, 2, 5, 122, 20, 1, 1, 2, 5, 122, 30,
- 0, 1, 2, 5, 138, 36, 2, 1, 2, 5, 138, 63, 1, 1, 2, 5, 138, 63,
- 0, 1, 2, 5, 155, 36, 2, 1, 2, 5, 155, 63, 1, 1, 2, 5, 155, 63
+static const struct rtw_txpwr_lmt_cfg_pair rtw8822b_txpwr_lmt_type2[] = {
+ { 0, 0, 0, 0, 1, 32, },
+ { 2, 0, 0, 0, 1, 28, },
+ { 1, 0, 0, 0, 1, 30, },
+ { 0, 0, 0, 0, 2, 32, },
+ { 2, 0, 0, 0, 2, 28, },
+ { 1, 0, 0, 0, 2, 30, },
+ { 0, 0, 0, 0, 3, 32, },
+ { 2, 0, 0, 0, 3, 28, },
+ { 1, 0, 0, 0, 3, 30, },
+ { 0, 0, 0, 0, 4, 32, },
+ { 2, 0, 0, 0, 4, 28, },
+ { 1, 0, 0, 0, 4, 30, },
+ { 0, 0, 0, 0, 5, 32, },
+ { 2, 0, 0, 0, 5, 28, },
+ { 1, 0, 0, 0, 5, 30, },
+ { 0, 0, 0, 0, 6, 32, },
+ { 2, 0, 0, 0, 6, 28, },
+ { 1, 0, 0, 0, 6, 30, },
+ { 0, 0, 0, 0, 7, 32, },
+ { 2, 0, 0, 0, 7, 28, },
+ { 1, 0, 0, 0, 7, 30, },
+ { 0, 0, 0, 0, 8, 32, },
+ { 2, 0, 0, 0, 8, 28, },
+ { 1, 0, 0, 0, 8, 30, },
+ { 0, 0, 0, 0, 9, 32, },
+ { 2, 0, 0, 0, 9, 28, },
+ { 1, 0, 0, 0, 9, 30, },
+ { 0, 0, 0, 0, 10, 32, },
+ { 2, 0, 0, 0, 10, 28, },
+ { 1, 0, 0, 0, 10, 30, },
+ { 0, 0, 0, 0, 11, 32, },
+ { 2, 0, 0, 0, 11, 28, },
+ { 1, 0, 0, 0, 11, 30, },
+ { 0, 0, 0, 0, 12, 26, },
+ { 2, 0, 0, 0, 12, 28, },
+ { 1, 0, 0, 0, 12, 30, },
+ { 0, 0, 0, 0, 13, 20, },
+ { 2, 0, 0, 0, 13, 28, },
+ { 1, 0, 0, 0, 13, 28, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 32, },
+ { 0, 0, 0, 1, 1, 26, },
+ { 2, 0, 0, 1, 1, 30, },
+ { 1, 0, 0, 1, 1, 34, },
+ { 0, 0, 0, 1, 2, 30, },
+ { 2, 0, 0, 1, 2, 30, },
+ { 1, 0, 0, 1, 2, 34, },
+ { 0, 0, 0, 1, 3, 32, },
+ { 2, 0, 0, 1, 3, 30, },
+ { 1, 0, 0, 1, 3, 34, },
+ { 0, 0, 0, 1, 4, 34, },
+ { 2, 0, 0, 1, 4, 30, },
+ { 1, 0, 0, 1, 4, 34, },
+ { 0, 0, 0, 1, 5, 34, },
+ { 2, 0, 0, 1, 5, 30, },
+ { 1, 0, 0, 1, 5, 34, },
+ { 0, 0, 0, 1, 6, 34, },
+ { 2, 0, 0, 1, 6, 30, },
+ { 1, 0, 0, 1, 6, 34, },
+ { 0, 0, 0, 1, 7, 34, },
+ { 2, 0, 0, 1, 7, 30, },
+ { 1, 0, 0, 1, 7, 34, },
+ { 0, 0, 0, 1, 8, 34, },
+ { 2, 0, 0, 1, 8, 30, },
+ { 1, 0, 0, 1, 8, 34, },
+ { 0, 0, 0, 1, 9, 32, },
+ { 2, 0, 0, 1, 9, 30, },
+ { 1, 0, 0, 1, 9, 34, },
+ { 0, 0, 0, 1, 10, 30, },
+ { 2, 0, 0, 1, 10, 30, },
+ { 1, 0, 0, 1, 10, 34, },
+ { 0, 0, 0, 1, 11, 28, },
+ { 2, 0, 0, 1, 11, 30, },
+ { 1, 0, 0, 1, 11, 34, },
+ { 0, 0, 0, 1, 12, 22, },
+ { 2, 0, 0, 1, 12, 30, },
+ { 1, 0, 0, 1, 12, 34, },
+ { 0, 0, 0, 1, 13, 14, },
+ { 2, 0, 0, 1, 13, 30, },
+ { 1, 0, 0, 1, 13, 34, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 26, },
+ { 2, 0, 0, 2, 1, 30, },
+ { 1, 0, 0, 2, 1, 34, },
+ { 0, 0, 0, 2, 2, 30, },
+ { 2, 0, 0, 2, 2, 30, },
+ { 1, 0, 0, 2, 2, 34, },
+ { 0, 0, 0, 2, 3, 32, },
+ { 2, 0, 0, 2, 3, 30, },
+ { 1, 0, 0, 2, 3, 34, },
+ { 0, 0, 0, 2, 4, 34, },
+ { 2, 0, 0, 2, 4, 30, },
+ { 1, 0, 0, 2, 4, 34, },
+ { 0, 0, 0, 2, 5, 34, },
+ { 2, 0, 0, 2, 5, 30, },
+ { 1, 0, 0, 2, 5, 34, },
+ { 0, 0, 0, 2, 6, 34, },
+ { 2, 0, 0, 2, 6, 30, },
+ { 1, 0, 0, 2, 6, 34, },
+ { 0, 0, 0, 2, 7, 34, },
+ { 2, 0, 0, 2, 7, 30, },
+ { 1, 0, 0, 2, 7, 34, },
+ { 0, 0, 0, 2, 8, 34, },
+ { 2, 0, 0, 2, 8, 30, },
+ { 1, 0, 0, 2, 8, 34, },
+ { 0, 0, 0, 2, 9, 32, },
+ { 2, 0, 0, 2, 9, 30, },
+ { 1, 0, 0, 2, 9, 34, },
+ { 0, 0, 0, 2, 10, 30, },
+ { 2, 0, 0, 2, 10, 30, },
+ { 1, 0, 0, 2, 10, 34, },
+ { 0, 0, 0, 2, 11, 26, },
+ { 2, 0, 0, 2, 11, 30, },
+ { 1, 0, 0, 2, 11, 34, },
+ { 0, 0, 0, 2, 12, 20, },
+ { 2, 0, 0, 2, 12, 30, },
+ { 1, 0, 0, 2, 12, 34, },
+ { 0, 0, 0, 2, 13, 14, },
+ { 2, 0, 0, 2, 13, 30, },
+ { 1, 0, 0, 2, 13, 34, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 26, },
+ { 2, 0, 0, 3, 1, 18, },
+ { 1, 0, 0, 3, 1, 30, },
+ { 0, 0, 0, 3, 2, 28, },
+ { 2, 0, 0, 3, 2, 18, },
+ { 1, 0, 0, 3, 2, 30, },
+ { 0, 0, 0, 3, 3, 30, },
+ { 2, 0, 0, 3, 3, 18, },
+ { 1, 0, 0, 3, 3, 30, },
+ { 0, 0, 0, 3, 4, 30, },
+ { 2, 0, 0, 3, 4, 18, },
+ { 1, 0, 0, 3, 4, 30, },
+ { 0, 0, 0, 3, 5, 32, },
+ { 2, 0, 0, 3, 5, 18, },
+ { 1, 0, 0, 3, 5, 30, },
+ { 0, 0, 0, 3, 6, 32, },
+ { 2, 0, 0, 3, 6, 18, },
+ { 1, 0, 0, 3, 6, 30, },
+ { 0, 0, 0, 3, 7, 32, },
+ { 2, 0, 0, 3, 7, 18, },
+ { 1, 0, 0, 3, 7, 30, },
+ { 0, 0, 0, 3, 8, 30, },
+ { 2, 0, 0, 3, 8, 18, },
+ { 1, 0, 0, 3, 8, 30, },
+ { 0, 0, 0, 3, 9, 30, },
+ { 2, 0, 0, 3, 9, 18, },
+ { 1, 0, 0, 3, 9, 30, },
+ { 0, 0, 0, 3, 10, 28, },
+ { 2, 0, 0, 3, 10, 18, },
+ { 1, 0, 0, 3, 10, 30, },
+ { 0, 0, 0, 3, 11, 26, },
+ { 2, 0, 0, 3, 11, 18, },
+ { 1, 0, 0, 3, 11, 30, },
+ { 0, 0, 0, 3, 12, 20, },
+ { 2, 0, 0, 3, 12, 18, },
+ { 1, 0, 0, 3, 12, 30, },
+ { 0, 0, 0, 3, 13, 14, },
+ { 2, 0, 0, 3, 13, 18, },
+ { 1, 0, 0, 3, 13, 30, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 26, },
+ { 2, 0, 1, 2, 3, 30, },
+ { 1, 0, 1, 2, 3, 34, },
+ { 0, 0, 1, 2, 4, 26, },
+ { 2, 0, 1, 2, 4, 30, },
+ { 1, 0, 1, 2, 4, 34, },
+ { 0, 0, 1, 2, 5, 30, },
+ { 2, 0, 1, 2, 5, 30, },
+ { 1, 0, 1, 2, 5, 34, },
+ { 0, 0, 1, 2, 6, 32, },
+ { 2, 0, 1, 2, 6, 30, },
+ { 1, 0, 1, 2, 6, 34, },
+ { 0, 0, 1, 2, 7, 30, },
+ { 2, 0, 1, 2, 7, 30, },
+ { 1, 0, 1, 2, 7, 34, },
+ { 0, 0, 1, 2, 8, 26, },
+ { 2, 0, 1, 2, 8, 30, },
+ { 1, 0, 1, 2, 8, 34, },
+ { 0, 0, 1, 2, 9, 26, },
+ { 2, 0, 1, 2, 9, 30, },
+ { 1, 0, 1, 2, 9, 34, },
+ { 0, 0, 1, 2, 10, 20, },
+ { 2, 0, 1, 2, 10, 30, },
+ { 1, 0, 1, 2, 10, 34, },
+ { 0, 0, 1, 2, 11, 14, },
+ { 2, 0, 1, 2, 11, 30, },
+ { 1, 0, 1, 2, 11, 34, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 63, },
+ { 1, 0, 1, 2, 12, 63, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 63, },
+ { 1, 0, 1, 2, 13, 63, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 24, },
+ { 2, 0, 1, 3, 3, 18, },
+ { 1, 0, 1, 3, 3, 30, },
+ { 0, 0, 1, 3, 4, 24, },
+ { 2, 0, 1, 3, 4, 18, },
+ { 1, 0, 1, 3, 4, 30, },
+ { 0, 0, 1, 3, 5, 26, },
+ { 2, 0, 1, 3, 5, 18, },
+ { 1, 0, 1, 3, 5, 30, },
+ { 0, 0, 1, 3, 6, 28, },
+ { 2, 0, 1, 3, 6, 18, },
+ { 1, 0, 1, 3, 6, 30, },
+ { 0, 0, 1, 3, 7, 26, },
+ { 2, 0, 1, 3, 7, 18, },
+ { 1, 0, 1, 3, 7, 30, },
+ { 0, 0, 1, 3, 8, 26, },
+ { 2, 0, 1, 3, 8, 18, },
+ { 1, 0, 1, 3, 8, 30, },
+ { 0, 0, 1, 3, 9, 26, },
+ { 2, 0, 1, 3, 9, 18, },
+ { 1, 0, 1, 3, 9, 30, },
+ { 0, 0, 1, 3, 10, 20, },
+ { 2, 0, 1, 3, 10, 18, },
+ { 1, 0, 1, 3, 10, 30, },
+ { 0, 0, 1, 3, 11, 14, },
+ { 2, 0, 1, 3, 11, 18, },
+ { 1, 0, 1, 3, 11, 30, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 63, },
+ { 1, 0, 1, 3, 12, 63, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 63, },
+ { 1, 0, 1, 3, 13, 63, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 1, 0, 1, 36, 36, },
+ { 2, 1, 0, 1, 36, 32, },
+ { 1, 1, 0, 1, 36, 30, },
+ { 0, 1, 0, 1, 40, 38, },
+ { 2, 1, 0, 1, 40, 32, },
+ { 1, 1, 0, 1, 40, 30, },
+ { 0, 1, 0, 1, 44, 38, },
+ { 2, 1, 0, 1, 44, 32, },
+ { 1, 1, 0, 1, 44, 30, },
+ { 0, 1, 0, 1, 48, 38, },
+ { 2, 1, 0, 1, 48, 32, },
+ { 1, 1, 0, 1, 48, 30, },
+ { 0, 1, 0, 1, 52, 38, },
+ { 2, 1, 0, 1, 52, 32, },
+ { 1, 1, 0, 1, 52, 28, },
+ { 0, 1, 0, 1, 56, 38, },
+ { 2, 1, 0, 1, 56, 32, },
+ { 1, 1, 0, 1, 56, 28, },
+ { 0, 1, 0, 1, 60, 38, },
+ { 2, 1, 0, 1, 60, 32, },
+ { 1, 1, 0, 1, 60, 28, },
+ { 0, 1, 0, 1, 64, 34, },
+ { 2, 1, 0, 1, 64, 32, },
+ { 1, 1, 0, 1, 64, 28, },
+ { 0, 1, 0, 1, 100, 32, },
+ { 2, 1, 0, 1, 100, 32, },
+ { 1, 1, 0, 1, 100, 32, },
+ { 0, 1, 0, 1, 104, 38, },
+ { 2, 1, 0, 1, 104, 32, },
+ { 1, 1, 0, 1, 104, 32, },
+ { 0, 1, 0, 1, 108, 38, },
+ { 2, 1, 0, 1, 108, 32, },
+ { 1, 1, 0, 1, 108, 32, },
+ { 0, 1, 0, 1, 112, 38, },
+ { 2, 1, 0, 1, 112, 32, },
+ { 1, 1, 0, 1, 112, 32, },
+ { 0, 1, 0, 1, 116, 38, },
+ { 2, 1, 0, 1, 116, 32, },
+ { 1, 1, 0, 1, 116, 32, },
+ { 0, 1, 0, 1, 120, 38, },
+ { 2, 1, 0, 1, 120, 32, },
+ { 1, 1, 0, 1, 120, 32, },
+ { 0, 1, 0, 1, 124, 38, },
+ { 2, 1, 0, 1, 124, 32, },
+ { 1, 1, 0, 1, 124, 32, },
+ { 0, 1, 0, 1, 128, 38, },
+ { 2, 1, 0, 1, 128, 32, },
+ { 1, 1, 0, 1, 128, 32, },
+ { 0, 1, 0, 1, 132, 38, },
+ { 2, 1, 0, 1, 132, 32, },
+ { 1, 1, 0, 1, 132, 32, },
+ { 0, 1, 0, 1, 136, 38, },
+ { 2, 1, 0, 1, 136, 32, },
+ { 1, 1, 0, 1, 136, 32, },
+ { 0, 1, 0, 1, 140, 34, },
+ { 2, 1, 0, 1, 140, 32, },
+ { 1, 1, 0, 1, 140, 32, },
+ { 0, 1, 0, 1, 144, 34, },
+ { 2, 1, 0, 1, 144, 32, },
+ { 1, 1, 0, 1, 144, 63, },
+ { 0, 1, 0, 1, 149, 38, },
+ { 2, 1, 0, 1, 149, 63, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 38, },
+ { 2, 1, 0, 1, 153, 63, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 38, },
+ { 2, 1, 0, 1, 157, 63, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 38, },
+ { 2, 1, 0, 1, 161, 63, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 38, },
+ { 2, 1, 0, 1, 165, 63, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 36, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 28, },
+ { 0, 1, 0, 2, 40, 38, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 28, },
+ { 0, 1, 0, 2, 44, 38, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 28, },
+ { 0, 1, 0, 2, 48, 38, },
+ { 2, 1, 0, 2, 48, 32, },
+ { 1, 1, 0, 2, 48, 28, },
+ { 0, 1, 0, 2, 52, 38, },
+ { 2, 1, 0, 2, 52, 32, },
+ { 1, 1, 0, 2, 52, 28, },
+ { 0, 1, 0, 2, 56, 38, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 28, },
+ { 0, 1, 0, 2, 60, 38, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 28, },
+ { 0, 1, 0, 2, 64, 34, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 28, },
+ { 0, 1, 0, 2, 100, 32, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 38, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 38, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 38, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 38, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 38, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 38, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 38, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 38, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 38, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 32, },
+ { 2, 1, 0, 2, 140, 32, },
+ { 1, 1, 0, 2, 140, 32, },
+ { 0, 1, 0, 2, 144, 26, },
+ { 2, 1, 0, 2, 144, 63, },
+ { 1, 1, 0, 2, 144, 63, },
+ { 0, 1, 0, 2, 149, 38, },
+ { 2, 1, 0, 2, 149, 63, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 38, },
+ { 2, 1, 0, 2, 153, 63, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 38, },
+ { 2, 1, 0, 2, 157, 63, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 38, },
+ { 2, 1, 0, 2, 161, 63, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 38, },
+ { 2, 1, 0, 2, 165, 63, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 34, },
+ { 2, 1, 0, 3, 36, 20, },
+ { 1, 1, 0, 3, 36, 22, },
+ { 0, 1, 0, 3, 40, 36, },
+ { 2, 1, 0, 3, 40, 20, },
+ { 1, 1, 0, 3, 40, 22, },
+ { 0, 1, 0, 3, 44, 36, },
+ { 2, 1, 0, 3, 44, 20, },
+ { 1, 1, 0, 3, 44, 22, },
+ { 0, 1, 0, 3, 48, 36, },
+ { 2, 1, 0, 3, 48, 20, },
+ { 1, 1, 0, 3, 48, 22, },
+ { 0, 1, 0, 3, 52, 36, },
+ { 2, 1, 0, 3, 52, 20, },
+ { 1, 1, 0, 3, 52, 22, },
+ { 0, 1, 0, 3, 56, 36, },
+ { 2, 1, 0, 3, 56, 20, },
+ { 1, 1, 0, 3, 56, 22, },
+ { 0, 1, 0, 3, 60, 36, },
+ { 2, 1, 0, 3, 60, 20, },
+ { 1, 1, 0, 3, 60, 22, },
+ { 0, 1, 0, 3, 64, 34, },
+ { 2, 1, 0, 3, 64, 20, },
+ { 1, 1, 0, 3, 64, 22, },
+ { 0, 1, 0, 3, 100, 32, },
+ { 2, 1, 0, 3, 100, 20, },
+ { 1, 1, 0, 3, 100, 30, },
+ { 0, 1, 0, 3, 104, 36, },
+ { 2, 1, 0, 3, 104, 20, },
+ { 1, 1, 0, 3, 104, 30, },
+ { 0, 1, 0, 3, 108, 38, },
+ { 2, 1, 0, 3, 108, 20, },
+ { 1, 1, 0, 3, 108, 30, },
+ { 0, 1, 0, 3, 112, 38, },
+ { 2, 1, 0, 3, 112, 20, },
+ { 1, 1, 0, 3, 112, 30, },
+ { 0, 1, 0, 3, 116, 38, },
+ { 2, 1, 0, 3, 116, 20, },
+ { 1, 1, 0, 3, 116, 30, },
+ { 0, 1, 0, 3, 120, 38, },
+ { 2, 1, 0, 3, 120, 20, },
+ { 1, 1, 0, 3, 120, 30, },
+ { 0, 1, 0, 3, 124, 38, },
+ { 2, 1, 0, 3, 124, 20, },
+ { 1, 1, 0, 3, 124, 30, },
+ { 0, 1, 0, 3, 128, 38, },
+ { 2, 1, 0, 3, 128, 20, },
+ { 1, 1, 0, 3, 128, 30, },
+ { 0, 1, 0, 3, 132, 38, },
+ { 2, 1, 0, 3, 132, 20, },
+ { 1, 1, 0, 3, 132, 30, },
+ { 0, 1, 0, 3, 136, 36, },
+ { 2, 1, 0, 3, 136, 20, },
+ { 1, 1, 0, 3, 136, 30, },
+ { 0, 1, 0, 3, 140, 32, },
+ { 2, 1, 0, 3, 140, 20, },
+ { 1, 1, 0, 3, 140, 30, },
+ { 0, 1, 0, 3, 144, 26, },
+ { 2, 1, 0, 3, 144, 63, },
+ { 1, 1, 0, 3, 144, 63, },
+ { 0, 1, 0, 3, 149, 38, },
+ { 2, 1, 0, 3, 149, 63, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 38, },
+ { 2, 1, 0, 3, 153, 63, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 38, },
+ { 2, 1, 0, 3, 157, 63, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 38, },
+ { 2, 1, 0, 3, 161, 63, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 38, },
+ { 2, 1, 0, 3, 165, 63, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 1, 2, 38, 28, },
+ { 2, 1, 1, 2, 38, 30, },
+ { 1, 1, 1, 2, 38, 30, },
+ { 0, 1, 1, 2, 46, 36, },
+ { 2, 1, 1, 2, 46, 30, },
+ { 1, 1, 1, 2, 46, 30, },
+ { 0, 1, 1, 2, 54, 36, },
+ { 2, 1, 1, 2, 54, 30, },
+ { 1, 1, 1, 2, 54, 30, },
+ { 0, 1, 1, 2, 62, 30, },
+ { 2, 1, 1, 2, 62, 30, },
+ { 1, 1, 1, 2, 62, 30, },
+ { 0, 1, 1, 2, 102, 30, },
+ { 2, 1, 1, 2, 102, 30, },
+ { 1, 1, 1, 2, 102, 30, },
+ { 0, 1, 1, 2, 110, 36, },
+ { 2, 1, 1, 2, 110, 30, },
+ { 1, 1, 1, 2, 110, 30, },
+ { 0, 1, 1, 2, 118, 36, },
+ { 2, 1, 1, 2, 118, 30, },
+ { 1, 1, 1, 2, 118, 30, },
+ { 0, 1, 1, 2, 126, 36, },
+ { 2, 1, 1, 2, 126, 30, },
+ { 1, 1, 1, 2, 126, 30, },
+ { 0, 1, 1, 2, 134, 36, },
+ { 2, 1, 1, 2, 134, 30, },
+ { 1, 1, 1, 2, 134, 30, },
+ { 0, 1, 1, 2, 142, 30, },
+ { 2, 1, 1, 2, 142, 63, },
+ { 1, 1, 1, 2, 142, 63, },
+ { 0, 1, 1, 2, 151, 36, },
+ { 2, 1, 1, 2, 151, 63, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 36, },
+ { 2, 1, 1, 2, 159, 63, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 26, },
+ { 2, 1, 1, 3, 38, 20, },
+ { 1, 1, 1, 3, 38, 22, },
+ { 0, 1, 1, 3, 46, 36, },
+ { 2, 1, 1, 3, 46, 20, },
+ { 1, 1, 1, 3, 46, 22, },
+ { 0, 1, 1, 3, 54, 36, },
+ { 2, 1, 1, 3, 54, 20, },
+ { 1, 1, 1, 3, 54, 22, },
+ { 0, 1, 1, 3, 62, 28, },
+ { 2, 1, 1, 3, 62, 20, },
+ { 1, 1, 1, 3, 62, 22, },
+ { 0, 1, 1, 3, 102, 28, },
+ { 2, 1, 1, 3, 102, 20, },
+ { 1, 1, 1, 3, 102, 30, },
+ { 0, 1, 1, 3, 110, 36, },
+ { 2, 1, 1, 3, 110, 20, },
+ { 1, 1, 1, 3, 110, 30, },
+ { 0, 1, 1, 3, 118, 36, },
+ { 2, 1, 1, 3, 118, 20, },
+ { 1, 1, 1, 3, 118, 30, },
+ { 0, 1, 1, 3, 126, 36, },
+ { 2, 1, 1, 3, 126, 20, },
+ { 1, 1, 1, 3, 126, 30, },
+ { 0, 1, 1, 3, 134, 36, },
+ { 2, 1, 1, 3, 134, 20, },
+ { 1, 1, 1, 3, 134, 30, },
+ { 0, 1, 1, 3, 142, 30, },
+ { 2, 1, 1, 3, 142, 63, },
+ { 1, 1, 1, 3, 142, 63, },
+ { 0, 1, 1, 3, 151, 36, },
+ { 2, 1, 1, 3, 151, 63, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 36, },
+ { 2, 1, 1, 3, 159, 63, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 2, 4, 42, 26, },
+ { 2, 1, 2, 4, 42, 30, },
+ { 1, 1, 2, 4, 42, 28, },
+ { 0, 1, 2, 4, 58, 26, },
+ { 2, 1, 2, 4, 58, 30, },
+ { 1, 1, 2, 4, 58, 28, },
+ { 0, 1, 2, 4, 106, 26, },
+ { 2, 1, 2, 4, 106, 30, },
+ { 1, 1, 2, 4, 106, 30, },
+ { 0, 1, 2, 4, 122, 36, },
+ { 2, 1, 2, 4, 122, 30, },
+ { 1, 1, 2, 4, 122, 30, },
+ { 0, 1, 2, 4, 138, 36, },
+ { 2, 1, 2, 4, 138, 63, },
+ { 1, 1, 2, 4, 138, 63, },
+ { 0, 1, 2, 4, 155, 36, },
+ { 2, 1, 2, 4, 155, 63, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 24, },
+ { 2, 1, 2, 5, 42, 20, },
+ { 1, 1, 2, 5, 42, 22, },
+ { 0, 1, 2, 5, 58, 24, },
+ { 2, 1, 2, 5, 58, 20, },
+ { 1, 1, 2, 5, 58, 22, },
+ { 0, 1, 2, 5, 106, 26, },
+ { 2, 1, 2, 5, 106, 20, },
+ { 1, 1, 2, 5, 106, 30, },
+ { 0, 1, 2, 5, 122, 36, },
+ { 2, 1, 2, 5, 122, 20, },
+ { 1, 1, 2, 5, 122, 30, },
+ { 0, 1, 2, 5, 138, 36, },
+ { 2, 1, 2, 5, 138, 63, },
+ { 1, 1, 2, 5, 138, 63, },
+ { 0, 1, 2, 5, 155, 36, },
+ { 2, 1, 2, 5, 155, 63, },
+ { 1, 1, 2, 5, 155, 63 },
};
RTW_DECL_TABLE_TXPWR_LMT(rtw8822b_txpwr_lmt_type2);
-static const u8 rtw8822b_txpwr_lmt_type5[] = {
- 0, 0, 0, 0, 1, 32, 2, 0, 0, 0, 1, 28, 1, 0, 0, 0, 1, 30,
- 0, 0, 0, 0, 2, 32, 2, 0, 0, 0, 2, 28, 1, 0, 0, 0, 2, 30,
- 0, 0, 0, 0, 3, 32, 2, 0, 0, 0, 3, 28, 1, 0, 0, 0, 3, 30,
- 0, 0, 0, 0, 4, 32, 2, 0, 0, 0, 4, 28, 1, 0, 0, 0, 4, 30,
- 0, 0, 0, 0, 5, 32, 2, 0, 0, 0, 5, 28, 1, 0, 0, 0, 5, 30,
- 0, 0, 0, 0, 6, 32, 2, 0, 0, 0, 6, 28, 1, 0, 0, 0, 6, 30,
- 0, 0, 0, 0, 7, 32, 2, 0, 0, 0, 7, 28, 1, 0, 0, 0, 7, 30,
- 0, 0, 0, 0, 8, 32, 2, 0, 0, 0, 8, 28, 1, 0, 0, 0, 8, 30,
- 0, 0, 0, 0, 9, 32, 2, 0, 0, 0, 9, 28, 1, 0, 0, 0, 9, 30,
- 0, 0, 0, 0, 10, 32, 2, 0, 0, 0, 10, 28, 1, 0, 0, 0, 10, 30,
- 0, 0, 0, 0, 11, 32, 2, 0, 0, 0, 11, 28, 1, 0, 0, 0, 11, 30,
- 0, 0, 0, 0, 12, 26, 2, 0, 0, 0, 12, 28, 1, 0, 0, 0, 12, 30,
- 0, 0, 0, 0, 13, 20, 2, 0, 0, 0, 13, 28, 1, 0, 0, 0, 13, 28,
- 0, 0, 0, 0, 14, 63, 2, 0, 0, 0, 14, 63, 1, 0, 0, 0, 14, 32,
- 0, 0, 0, 1, 1, 26, 2, 0, 0, 1, 1, 30, 1, 0, 0, 1, 1, 34,
- 0, 0, 0, 1, 2, 30, 2, 0, 0, 1, 2, 30, 1, 0, 0, 1, 2, 34,
- 0, 0, 0, 1, 3, 32, 2, 0, 0, 1, 3, 30, 1, 0, 0, 1, 3, 34,
- 0, 0, 0, 1, 4, 34, 2, 0, 0, 1, 4, 30, 1, 0, 0, 1, 4, 34,
- 0, 0, 0, 1, 5, 34, 2, 0, 0, 1, 5, 30, 1, 0, 0, 1, 5, 34,
- 0, 0, 0, 1, 6, 34, 2, 0, 0, 1, 6, 30, 1, 0, 0, 1, 6, 34,
- 0, 0, 0, 1, 7, 34, 2, 0, 0, 1, 7, 30, 1, 0, 0, 1, 7, 34,
- 0, 0, 0, 1, 8, 34, 2, 0, 0, 1, 8, 30, 1, 0, 0, 1, 8, 34,
- 0, 0, 0, 1, 9, 32, 2, 0, 0, 1, 9, 30, 1, 0, 0, 1, 9, 34,
- 0, 0, 0, 1, 10, 30, 2, 0, 0, 1, 10, 30, 1, 0, 0, 1, 10, 34,
- 0, 0, 0, 1, 11, 28, 2, 0, 0, 1, 11, 30, 1, 0, 0, 1, 11, 34,
- 0, 0, 0, 1, 12, 22, 2, 0, 0, 1, 12, 30, 1, 0, 0, 1, 12, 34,
- 0, 0, 0, 1, 13, 14, 2, 0, 0, 1, 13, 30, 1, 0, 0, 1, 13, 34,
- 0, 0, 0, 1, 14, 63, 2, 0, 0, 1, 14, 63, 1, 0, 0, 1, 14, 63,
- 0, 0, 0, 2, 1, 26, 2, 0, 0, 2, 1, 30, 1, 0, 0, 2, 1, 34,
- 0, 0, 0, 2, 2, 30, 2, 0, 0, 2, 2, 30, 1, 0, 0, 2, 2, 34,
- 0, 0, 0, 2, 3, 32, 2, 0, 0, 2, 3, 30, 1, 0, 0, 2, 3, 34,
- 0, 0, 0, 2, 4, 34, 2, 0, 0, 2, 4, 30, 1, 0, 0, 2, 4, 34,
- 0, 0, 0, 2, 5, 34, 2, 0, 0, 2, 5, 30, 1, 0, 0, 2, 5, 34,
- 0, 0, 0, 2, 6, 34, 2, 0, 0, 2, 6, 30, 1, 0, 0, 2, 6, 34,
- 0, 0, 0, 2, 7, 34, 2, 0, 0, 2, 7, 30, 1, 0, 0, 2, 7, 34,
- 0, 0, 0, 2, 8, 34, 2, 0, 0, 2, 8, 30, 1, 0, 0, 2, 8, 34,
- 0, 0, 0, 2, 9, 32, 2, 0, 0, 2, 9, 30, 1, 0, 0, 2, 9, 34,
- 0, 0, 0, 2, 10, 30, 2, 0, 0, 2, 10, 30, 1, 0, 0, 2, 10, 34,
- 0, 0, 0, 2, 11, 26, 2, 0, 0, 2, 11, 30, 1, 0, 0, 2, 11, 34,
- 0, 0, 0, 2, 12, 20, 2, 0, 0, 2, 12, 30, 1, 0, 0, 2, 12, 34,
- 0, 0, 0, 2, 13, 14, 2, 0, 0, 2, 13, 30, 1, 0, 0, 2, 13, 34,
- 0, 0, 0, 2, 14, 63, 2, 0, 0, 2, 14, 63, 1, 0, 0, 2, 14, 63,
- 0, 0, 0, 3, 1, 26, 2, 0, 0, 3, 1, 18, 1, 0, 0, 3, 1, 30,
- 0, 0, 0, 3, 2, 28, 2, 0, 0, 3, 2, 18, 1, 0, 0, 3, 2, 30,
- 0, 0, 0, 3, 3, 30, 2, 0, 0, 3, 3, 18, 1, 0, 0, 3, 3, 30,
- 0, 0, 0, 3, 4, 30, 2, 0, 0, 3, 4, 18, 1, 0, 0, 3, 4, 30,
- 0, 0, 0, 3, 5, 32, 2, 0, 0, 3, 5, 18, 1, 0, 0, 3, 5, 30,
- 0, 0, 0, 3, 6, 32, 2, 0, 0, 3, 6, 18, 1, 0, 0, 3, 6, 30,
- 0, 0, 0, 3, 7, 32, 2, 0, 0, 3, 7, 18, 1, 0, 0, 3, 7, 30,
- 0, 0, 0, 3, 8, 30, 2, 0, 0, 3, 8, 18, 1, 0, 0, 3, 8, 30,
- 0, 0, 0, 3, 9, 30, 2, 0, 0, 3, 9, 18, 1, 0, 0, 3, 9, 30,
- 0, 0, 0, 3, 10, 28, 2, 0, 0, 3, 10, 18, 1, 0, 0, 3, 10, 30,
- 0, 0, 0, 3, 11, 26, 2, 0, 0, 3, 11, 18, 1, 0, 0, 3, 11, 30,
- 0, 0, 0, 3, 12, 20, 2, 0, 0, 3, 12, 18, 1, 0, 0, 3, 12, 30,
- 0, 0, 0, 3, 13, 14, 2, 0, 0, 3, 13, 18, 1, 0, 0, 3, 13, 30,
- 0, 0, 0, 3, 14, 63, 2, 0, 0, 3, 14, 63, 1, 0, 0, 3, 14, 63,
- 0, 0, 1, 2, 1, 63, 2, 0, 1, 2, 1, 63, 1, 0, 1, 2, 1, 63,
- 0, 0, 1, 2, 2, 63, 2, 0, 1, 2, 2, 63, 1, 0, 1, 2, 2, 63,
- 0, 0, 1, 2, 3, 26, 2, 0, 1, 2, 3, 30, 1, 0, 1, 2, 3, 34,
- 0, 0, 1, 2, 4, 26, 2, 0, 1, 2, 4, 30, 1, 0, 1, 2, 4, 34,
- 0, 0, 1, 2, 5, 30, 2, 0, 1, 2, 5, 30, 1, 0, 1, 2, 5, 34,
- 0, 0, 1, 2, 6, 32, 2, 0, 1, 2, 6, 30, 1, 0, 1, 2, 6, 34,
- 0, 0, 1, 2, 7, 30, 2, 0, 1, 2, 7, 30, 1, 0, 1, 2, 7, 34,
- 0, 0, 1, 2, 8, 26, 2, 0, 1, 2, 8, 30, 1, 0, 1, 2, 8, 34,
- 0, 0, 1, 2, 9, 26, 2, 0, 1, 2, 9, 30, 1, 0, 1, 2, 9, 34,
- 0, 0, 1, 2, 10, 20, 2, 0, 1, 2, 10, 30, 1, 0, 1, 2, 10, 34,
- 0, 0, 1, 2, 11, 14, 2, 0, 1, 2, 11, 30, 1, 0, 1, 2, 11, 34,
- 0, 0, 1, 2, 12, 63, 2, 0, 1, 2, 12, 63, 1, 0, 1, 2, 12, 63,
- 0, 0, 1, 2, 13, 63, 2, 0, 1, 2, 13, 63, 1, 0, 1, 2, 13, 63,
- 0, 0, 1, 2, 14, 63, 2, 0, 1, 2, 14, 63, 1, 0, 1, 2, 14, 63,
- 0, 0, 1, 3, 1, 63, 2, 0, 1, 3, 1, 63, 1, 0, 1, 3, 1, 63,
- 0, 0, 1, 3, 2, 63, 2, 0, 1, 3, 2, 63, 1, 0, 1, 3, 2, 63,
- 0, 0, 1, 3, 3, 24, 2, 0, 1, 3, 3, 18, 1, 0, 1, 3, 3, 30,
- 0, 0, 1, 3, 4, 24, 2, 0, 1, 3, 4, 18, 1, 0, 1, 3, 4, 30,
- 0, 0, 1, 3, 5, 26, 2, 0, 1, 3, 5, 18, 1, 0, 1, 3, 5, 30,
- 0, 0, 1, 3, 6, 28, 2, 0, 1, 3, 6, 18, 1, 0, 1, 3, 6, 30,
- 0, 0, 1, 3, 7, 26, 2, 0, 1, 3, 7, 18, 1, 0, 1, 3, 7, 30,
- 0, 0, 1, 3, 8, 26, 2, 0, 1, 3, 8, 18, 1, 0, 1, 3, 8, 30,
- 0, 0, 1, 3, 9, 26, 2, 0, 1, 3, 9, 18, 1, 0, 1, 3, 9, 30,
- 0, 0, 1, 3, 10, 20, 2, 0, 1, 3, 10, 18, 1, 0, 1, 3, 10, 30,
- 0, 0, 1, 3, 11, 14, 2, 0, 1, 3, 11, 18, 1, 0, 1, 3, 11, 30,
- 0, 0, 1, 3, 12, 63, 2, 0, 1, 3, 12, 63, 1, 0, 1, 3, 12, 63,
- 0, 0, 1, 3, 13, 63, 2, 0, 1, 3, 13, 63, 1, 0, 1, 3, 13, 63,
- 0, 0, 1, 3, 14, 63, 2, 0, 1, 3, 14, 63, 1, 0, 1, 3, 14, 63,
- 0, 1, 0, 1, 36, 30, 2, 1, 0, 1, 36, 32, 1, 1, 0, 1, 36, 30,
- 0, 1, 0, 1, 40, 32, 2, 1, 0, 1, 40, 32, 1, 1, 0, 1, 40, 30,
- 0, 1, 0, 1, 44, 32, 2, 1, 0, 1, 44, 32, 1, 1, 0, 1, 44, 30,
- 0, 1, 0, 1, 48, 32, 2, 1, 0, 1, 48, 32, 1, 1, 0, 1, 48, 30,
- 0, 1, 0, 1, 52, 32, 2, 1, 0, 1, 52, 32, 1, 1, 0, 1, 52, 28,
- 0, 1, 0, 1, 56, 32, 2, 1, 0, 1, 56, 32, 1, 1, 0, 1, 56, 28,
- 0, 1, 0, 1, 60, 32, 2, 1, 0, 1, 60, 32, 1, 1, 0, 1, 60, 28,
- 0, 1, 0, 1, 64, 28, 2, 1, 0, 1, 64, 32, 1, 1, 0, 1, 64, 28,
- 0, 1, 0, 1, 100, 26, 2, 1, 0, 1, 100, 32, 1, 1, 0, 1, 100, 32,
- 0, 1, 0, 1, 104, 32, 2, 1, 0, 1, 104, 32, 1, 1, 0, 1, 104, 32,
- 0, 1, 0, 1, 108, 32, 2, 1, 0, 1, 108, 32, 1, 1, 0, 1, 108, 32,
- 0, 1, 0, 1, 112, 32, 2, 1, 0, 1, 112, 32, 1, 1, 0, 1, 112, 32,
- 0, 1, 0, 1, 116, 32, 2, 1, 0, 1, 116, 32, 1, 1, 0, 1, 116, 32,
- 0, 1, 0, 1, 120, 32, 2, 1, 0, 1, 120, 32, 1, 1, 0, 1, 120, 32,
- 0, 1, 0, 1, 124, 32, 2, 1, 0, 1, 124, 32, 1, 1, 0, 1, 124, 32,
- 0, 1, 0, 1, 128, 32, 2, 1, 0, 1, 128, 32, 1, 1, 0, 1, 128, 32,
- 0, 1, 0, 1, 132, 32, 2, 1, 0, 1, 132, 32, 1, 1, 0, 1, 132, 32,
- 0, 1, 0, 1, 136, 32, 2, 1, 0, 1, 136, 32, 1, 1, 0, 1, 136, 32,
- 0, 1, 0, 1, 140, 28, 2, 1, 0, 1, 140, 32, 1, 1, 0, 1, 140, 32,
- 0, 1, 0, 1, 144, 28, 2, 1, 0, 1, 144, 63, 1, 1, 0, 1, 144, 63,
- 0, 1, 0, 1, 149, 32, 2, 1, 0, 1, 149, 63, 1, 1, 0, 1, 149, 63,
- 0, 1, 0, 1, 153, 32, 2, 1, 0, 1, 153, 63, 1, 1, 0, 1, 153, 63,
- 0, 1, 0, 1, 157, 32, 2, 1, 0, 1, 157, 63, 1, 1, 0, 1, 157, 63,
- 0, 1, 0, 1, 161, 32, 2, 1, 0, 1, 161, 63, 1, 1, 0, 1, 161, 63,
- 0, 1, 0, 1, 165, 32, 2, 1, 0, 1, 165, 63, 1, 1, 0, 1, 165, 63,
- 0, 1, 0, 2, 36, 30, 2, 1, 0, 2, 36, 32, 1, 1, 0, 2, 36, 28,
- 0, 1, 0, 2, 40, 32, 2, 1, 0, 2, 40, 32, 1, 1, 0, 2, 40, 28,
- 0, 1, 0, 2, 44, 32, 2, 1, 0, 2, 44, 32, 1, 1, 0, 2, 44, 28,
- 0, 1, 0, 2, 48, 32, 2, 1, 0, 2, 48, 32, 1, 1, 0, 2, 48, 28,
- 0, 1, 0, 2, 52, 32, 2, 1, 0, 2, 52, 32, 1, 1, 0, 2, 52, 28,
- 0, 1, 0, 2, 56, 32, 2, 1, 0, 2, 56, 32, 1, 1, 0, 2, 56, 28,
- 0, 1, 0, 2, 60, 32, 2, 1, 0, 2, 60, 32, 1, 1, 0, 2, 60, 28,
- 0, 1, 0, 2, 64, 28, 2, 1, 0, 2, 64, 32, 1, 1, 0, 2, 64, 28,
- 0, 1, 0, 2, 100, 26, 2, 1, 0, 2, 100, 32, 1, 1, 0, 2, 100, 32,
- 0, 1, 0, 2, 104, 32, 2, 1, 0, 2, 104, 32, 1, 1, 0, 2, 104, 32,
- 0, 1, 0, 2, 108, 32, 2, 1, 0, 2, 108, 32, 1, 1, 0, 2, 108, 32,
- 0, 1, 0, 2, 112, 32, 2, 1, 0, 2, 112, 32, 1, 1, 0, 2, 112, 32,
- 0, 1, 0, 2, 116, 32, 2, 1, 0, 2, 116, 32, 1, 1, 0, 2, 116, 32,
- 0, 1, 0, 2, 120, 32, 2, 1, 0, 2, 120, 32, 1, 1, 0, 2, 120, 32,
- 0, 1, 0, 2, 124, 32, 2, 1, 0, 2, 124, 32, 1, 1, 0, 2, 124, 32,
- 0, 1, 0, 2, 128, 32, 2, 1, 0, 2, 128, 32, 1, 1, 0, 2, 128, 32,
- 0, 1, 0, 2, 132, 32, 2, 1, 0, 2, 132, 32, 1, 1, 0, 2, 132, 32,
- 0, 1, 0, 2, 136, 32, 2, 1, 0, 2, 136, 32, 1, 1, 0, 2, 136, 32,
- 0, 1, 0, 2, 140, 26, 2, 1, 0, 2, 140, 32, 1, 1, 0, 2, 140, 32,
- 0, 1, 0, 2, 144, 26, 2, 1, 0, 2, 144, 63, 1, 1, 0, 2, 144, 63,
- 0, 1, 0, 2, 149, 32, 2, 1, 0, 2, 149, 63, 1, 1, 0, 2, 149, 63,
- 0, 1, 0, 2, 153, 32, 2, 1, 0, 2, 153, 63, 1, 1, 0, 2, 153, 63,
- 0, 1, 0, 2, 157, 32, 2, 1, 0, 2, 157, 63, 1, 1, 0, 2, 157, 63,
- 0, 1, 0, 2, 161, 32, 2, 1, 0, 2, 161, 63, 1, 1, 0, 2, 161, 63,
- 0, 1, 0, 2, 165, 32, 2, 1, 0, 2, 165, 63, 1, 1, 0, 2, 165, 63,
- 0, 1, 0, 3, 36, 28, 2, 1, 0, 3, 36, 20, 1, 1, 0, 3, 36, 22,
- 0, 1, 0, 3, 40, 30, 2, 1, 0, 3, 40, 20, 1, 1, 0, 3, 40, 22,
- 0, 1, 0, 3, 44, 30, 2, 1, 0, 3, 44, 20, 1, 1, 0, 3, 44, 22,
- 0, 1, 0, 3, 48, 30, 2, 1, 0, 3, 48, 20, 1, 1, 0, 3, 48, 22,
- 0, 1, 0, 3, 52, 30, 2, 1, 0, 3, 52, 20, 1, 1, 0, 3, 52, 22,
- 0, 1, 0, 3, 56, 30, 2, 1, 0, 3, 56, 20, 1, 1, 0, 3, 56, 22,
- 0, 1, 0, 3, 60, 30, 2, 1, 0, 3, 60, 20, 1, 1, 0, 3, 60, 22,
- 0, 1, 0, 3, 64, 28, 2, 1, 0, 3, 64, 20, 1, 1, 0, 3, 64, 22,
- 0, 1, 0, 3, 100, 26, 2, 1, 0, 3, 100, 20, 1, 1, 0, 3, 100, 30,
- 0, 1, 0, 3, 104, 30, 2, 1, 0, 3, 104, 20, 1, 1, 0, 3, 104, 30,
- 0, 1, 0, 3, 108, 32, 2, 1, 0, 3, 108, 20, 1, 1, 0, 3, 108, 30,
- 0, 1, 0, 3, 112, 32, 2, 1, 0, 3, 112, 20, 1, 1, 0, 3, 112, 30,
- 0, 1, 0, 3, 116, 32, 2, 1, 0, 3, 116, 20, 1, 1, 0, 3, 116, 30,
- 0, 1, 0, 3, 120, 32, 2, 1, 0, 3, 120, 20, 1, 1, 0, 3, 120, 30,
- 0, 1, 0, 3, 124, 32, 2, 1, 0, 3, 124, 20, 1, 1, 0, 3, 124, 30,
- 0, 1, 0, 3, 128, 32, 2, 1, 0, 3, 128, 20, 1, 1, 0, 3, 128, 30,
- 0, 1, 0, 3, 132, 32, 2, 1, 0, 3, 132, 20, 1, 1, 0, 3, 132, 30,
- 0, 1, 0, 3, 136, 30, 2, 1, 0, 3, 136, 20, 1, 1, 0, 3, 136, 30,
- 0, 1, 0, 3, 140, 26, 2, 1, 0, 3, 140, 20, 1, 1, 0, 3, 140, 30,
- 0, 1, 0, 3, 144, 26, 2, 1, 0, 3, 144, 63, 1, 1, 0, 3, 144, 63,
- 0, 1, 0, 3, 149, 32, 2, 1, 0, 3, 149, 63, 1, 1, 0, 3, 149, 63,
- 0, 1, 0, 3, 153, 32, 2, 1, 0, 3, 153, 63, 1, 1, 0, 3, 153, 63,
- 0, 1, 0, 3, 157, 32, 2, 1, 0, 3, 157, 63, 1, 1, 0, 3, 157, 63,
- 0, 1, 0, 3, 161, 32, 2, 1, 0, 3, 161, 63, 1, 1, 0, 3, 161, 63,
- 0, 1, 0, 3, 165, 32, 2, 1, 0, 3, 165, 63, 1, 1, 0, 3, 165, 63,
- 0, 1, 1, 2, 38, 22, 2, 1, 1, 2, 38, 30, 1, 1, 1, 2, 38, 30,
- 0, 1, 1, 2, 46, 30, 2, 1, 1, 2, 46, 30, 1, 1, 1, 2, 46, 30,
- 0, 1, 1, 2, 54, 30, 2, 1, 1, 2, 54, 30, 1, 1, 1, 2, 54, 30,
- 0, 1, 1, 2, 62, 24, 2, 1, 1, 2, 62, 30, 1, 1, 1, 2, 62, 30,
- 0, 1, 1, 2, 102, 24, 2, 1, 1, 2, 102, 30, 1, 1, 1, 2, 102, 30,
- 0, 1, 1, 2, 110, 30, 2, 1, 1, 2, 110, 30, 1, 1, 1, 2, 110, 30,
- 0, 1, 1, 2, 118, 30, 2, 1, 1, 2, 118, 30, 1, 1, 1, 2, 118, 30,
- 0, 1, 1, 2, 126, 30, 2, 1, 1, 2, 126, 30, 1, 1, 1, 2, 126, 30,
- 0, 1, 1, 2, 134, 30, 2, 1, 1, 2, 134, 30, 1, 1, 1, 2, 134, 30,
- 0, 1, 1, 2, 142, 30, 2, 1, 1, 2, 142, 63, 1, 1, 1, 2, 142, 63,
- 0, 1, 1, 2, 151, 30, 2, 1, 1, 2, 151, 63, 1, 1, 1, 2, 151, 63,
- 0, 1, 1, 2, 159, 30, 2, 1, 1, 2, 159, 63, 1, 1, 1, 2, 159, 63,
- 0, 1, 1, 3, 38, 20, 2, 1, 1, 3, 38, 20, 1, 1, 1, 3, 38, 22,
- 0, 1, 1, 3, 46, 30, 2, 1, 1, 3, 46, 20, 1, 1, 1, 3, 46, 22,
- 0, 1, 1, 3, 54, 30, 2, 1, 1, 3, 54, 20, 1, 1, 1, 3, 54, 22,
- 0, 1, 1, 3, 62, 22, 2, 1, 1, 3, 62, 20, 1, 1, 1, 3, 62, 22,
- 0, 1, 1, 3, 102, 22, 2, 1, 1, 3, 102, 20, 1, 1, 1, 3, 102, 30,
- 0, 1, 1, 3, 110, 30, 2, 1, 1, 3, 110, 20, 1, 1, 1, 3, 110, 30,
- 0, 1, 1, 3, 118, 30, 2, 1, 1, 3, 118, 20, 1, 1, 1, 3, 118, 30,
- 0, 1, 1, 3, 126, 30, 2, 1, 1, 3, 126, 20, 1, 1, 1, 3, 126, 30,
- 0, 1, 1, 3, 134, 30, 2, 1, 1, 3, 134, 20, 1, 1, 1, 3, 134, 30,
- 0, 1, 1, 3, 142, 30, 2, 1, 1, 3, 142, 63, 1, 1, 1, 3, 142, 63,
- 0, 1, 1, 3, 151, 30, 2, 1, 1, 3, 151, 63, 1, 1, 1, 3, 151, 63,
- 0, 1, 1, 3, 159, 30, 2, 1, 1, 3, 159, 63, 1, 1, 1, 3, 159, 63,
- 0, 1, 2, 4, 42, 20, 2, 1, 2, 4, 42, 30, 1, 1, 2, 4, 42, 28,
- 0, 1, 2, 4, 58, 20, 2, 1, 2, 4, 58, 30, 1, 1, 2, 4, 58, 28,
- 0, 1, 2, 4, 106, 20, 2, 1, 2, 4, 106, 30, 1, 1, 2, 4, 106, 30,
- 0, 1, 2, 4, 122, 30, 2, 1, 2, 4, 122, 30, 1, 1, 2, 4, 122, 30,
- 0, 1, 2, 4, 138, 30, 2, 1, 2, 4, 138, 63, 1, 1, 2, 4, 138, 63,
- 0, 1, 2, 4, 155, 30, 2, 1, 2, 4, 155, 63, 1, 1, 2, 4, 155, 63,
- 0, 1, 2, 5, 42, 18, 2, 1, 2, 5, 42, 20, 1, 1, 2, 5, 42, 22,
- 0, 1, 2, 5, 58, 18, 2, 1, 2, 5, 58, 20, 1, 1, 2, 5, 58, 22,
- 0, 1, 2, 5, 106, 20, 2, 1, 2, 5, 106, 20, 1, 1, 2, 5, 106, 30,
- 0, 1, 2, 5, 122, 30, 2, 1, 2, 5, 122, 20, 1, 1, 2, 5, 122, 30,
- 0, 1, 2, 5, 138, 30, 2, 1, 2, 5, 138, 63, 1, 1, 2, 5, 138, 63,
- 0, 1, 2, 5, 155, 30, 2, 1, 2, 5, 155, 63, 1, 1, 2, 5, 155, 63,
+static const struct rtw_txpwr_lmt_cfg_pair rtw8822b_txpwr_lmt_type5[] = {
+ { 0, 0, 0, 0, 1, 32, },
+ { 2, 0, 0, 0, 1, 28, },
+ { 1, 0, 0, 0, 1, 30, },
+ { 0, 0, 0, 0, 2, 32, },
+ { 2, 0, 0, 0, 2, 28, },
+ { 1, 0, 0, 0, 2, 30, },
+ { 0, 0, 0, 0, 3, 32, },
+ { 2, 0, 0, 0, 3, 28, },
+ { 1, 0, 0, 0, 3, 30, },
+ { 0, 0, 0, 0, 4, 32, },
+ { 2, 0, 0, 0, 4, 28, },
+ { 1, 0, 0, 0, 4, 30, },
+ { 0, 0, 0, 0, 5, 32, },
+ { 2, 0, 0, 0, 5, 28, },
+ { 1, 0, 0, 0, 5, 30, },
+ { 0, 0, 0, 0, 6, 32, },
+ { 2, 0, 0, 0, 6, 28, },
+ { 1, 0, 0, 0, 6, 30, },
+ { 0, 0, 0, 0, 7, 32, },
+ { 2, 0, 0, 0, 7, 28, },
+ { 1, 0, 0, 0, 7, 30, },
+ { 0, 0, 0, 0, 8, 32, },
+ { 2, 0, 0, 0, 8, 28, },
+ { 1, 0, 0, 0, 8, 30, },
+ { 0, 0, 0, 0, 9, 32, },
+ { 2, 0, 0, 0, 9, 28, },
+ { 1, 0, 0, 0, 9, 30, },
+ { 0, 0, 0, 0, 10, 32, },
+ { 2, 0, 0, 0, 10, 28, },
+ { 1, 0, 0, 0, 10, 30, },
+ { 0, 0, 0, 0, 11, 32, },
+ { 2, 0, 0, 0, 11, 28, },
+ { 1, 0, 0, 0, 11, 30, },
+ { 0, 0, 0, 0, 12, 26, },
+ { 2, 0, 0, 0, 12, 28, },
+ { 1, 0, 0, 0, 12, 30, },
+ { 0, 0, 0, 0, 13, 20, },
+ { 2, 0, 0, 0, 13, 28, },
+ { 1, 0, 0, 0, 13, 28, },
+ { 0, 0, 0, 0, 14, 63, },
+ { 2, 0, 0, 0, 14, 63, },
+ { 1, 0, 0, 0, 14, 32, },
+ { 0, 0, 0, 1, 1, 26, },
+ { 2, 0, 0, 1, 1, 30, },
+ { 1, 0, 0, 1, 1, 34, },
+ { 0, 0, 0, 1, 2, 30, },
+ { 2, 0, 0, 1, 2, 30, },
+ { 1, 0, 0, 1, 2, 34, },
+ { 0, 0, 0, 1, 3, 32, },
+ { 2, 0, 0, 1, 3, 30, },
+ { 1, 0, 0, 1, 3, 34, },
+ { 0, 0, 0, 1, 4, 34, },
+ { 2, 0, 0, 1, 4, 30, },
+ { 1, 0, 0, 1, 4, 34, },
+ { 0, 0, 0, 1, 5, 34, },
+ { 2, 0, 0, 1, 5, 30, },
+ { 1, 0, 0, 1, 5, 34, },
+ { 0, 0, 0, 1, 6, 34, },
+ { 2, 0, 0, 1, 6, 30, },
+ { 1, 0, 0, 1, 6, 34, },
+ { 0, 0, 0, 1, 7, 34, },
+ { 2, 0, 0, 1, 7, 30, },
+ { 1, 0, 0, 1, 7, 34, },
+ { 0, 0, 0, 1, 8, 34, },
+ { 2, 0, 0, 1, 8, 30, },
+ { 1, 0, 0, 1, 8, 34, },
+ { 0, 0, 0, 1, 9, 32, },
+ { 2, 0, 0, 1, 9, 30, },
+ { 1, 0, 0, 1, 9, 34, },
+ { 0, 0, 0, 1, 10, 30, },
+ { 2, 0, 0, 1, 10, 30, },
+ { 1, 0, 0, 1, 10, 34, },
+ { 0, 0, 0, 1, 11, 28, },
+ { 2, 0, 0, 1, 11, 30, },
+ { 1, 0, 0, 1, 11, 34, },
+ { 0, 0, 0, 1, 12, 22, },
+ { 2, 0, 0, 1, 12, 30, },
+ { 1, 0, 0, 1, 12, 34, },
+ { 0, 0, 0, 1, 13, 14, },
+ { 2, 0, 0, 1, 13, 30, },
+ { 1, 0, 0, 1, 13, 34, },
+ { 0, 0, 0, 1, 14, 63, },
+ { 2, 0, 0, 1, 14, 63, },
+ { 1, 0, 0, 1, 14, 63, },
+ { 0, 0, 0, 2, 1, 26, },
+ { 2, 0, 0, 2, 1, 30, },
+ { 1, 0, 0, 2, 1, 34, },
+ { 0, 0, 0, 2, 2, 30, },
+ { 2, 0, 0, 2, 2, 30, },
+ { 1, 0, 0, 2, 2, 34, },
+ { 0, 0, 0, 2, 3, 32, },
+ { 2, 0, 0, 2, 3, 30, },
+ { 1, 0, 0, 2, 3, 34, },
+ { 0, 0, 0, 2, 4, 34, },
+ { 2, 0, 0, 2, 4, 30, },
+ { 1, 0, 0, 2, 4, 34, },
+ { 0, 0, 0, 2, 5, 34, },
+ { 2, 0, 0, 2, 5, 30, },
+ { 1, 0, 0, 2, 5, 34, },
+ { 0, 0, 0, 2, 6, 34, },
+ { 2, 0, 0, 2, 6, 30, },
+ { 1, 0, 0, 2, 6, 34, },
+ { 0, 0, 0, 2, 7, 34, },
+ { 2, 0, 0, 2, 7, 30, },
+ { 1, 0, 0, 2, 7, 34, },
+ { 0, 0, 0, 2, 8, 34, },
+ { 2, 0, 0, 2, 8, 30, },
+ { 1, 0, 0, 2, 8, 34, },
+ { 0, 0, 0, 2, 9, 32, },
+ { 2, 0, 0, 2, 9, 30, },
+ { 1, 0, 0, 2, 9, 34, },
+ { 0, 0, 0, 2, 10, 30, },
+ { 2, 0, 0, 2, 10, 30, },
+ { 1, 0, 0, 2, 10, 34, },
+ { 0, 0, 0, 2, 11, 26, },
+ { 2, 0, 0, 2, 11, 30, },
+ { 1, 0, 0, 2, 11, 34, },
+ { 0, 0, 0, 2, 12, 20, },
+ { 2, 0, 0, 2, 12, 30, },
+ { 1, 0, 0, 2, 12, 34, },
+ { 0, 0, 0, 2, 13, 14, },
+ { 2, 0, 0, 2, 13, 30, },
+ { 1, 0, 0, 2, 13, 34, },
+ { 0, 0, 0, 2, 14, 63, },
+ { 2, 0, 0, 2, 14, 63, },
+ { 1, 0, 0, 2, 14, 63, },
+ { 0, 0, 0, 3, 1, 26, },
+ { 2, 0, 0, 3, 1, 18, },
+ { 1, 0, 0, 3, 1, 30, },
+ { 0, 0, 0, 3, 2, 28, },
+ { 2, 0, 0, 3, 2, 18, },
+ { 1, 0, 0, 3, 2, 30, },
+ { 0, 0, 0, 3, 3, 30, },
+ { 2, 0, 0, 3, 3, 18, },
+ { 1, 0, 0, 3, 3, 30, },
+ { 0, 0, 0, 3, 4, 30, },
+ { 2, 0, 0, 3, 4, 18, },
+ { 1, 0, 0, 3, 4, 30, },
+ { 0, 0, 0, 3, 5, 32, },
+ { 2, 0, 0, 3, 5, 18, },
+ { 1, 0, 0, 3, 5, 30, },
+ { 0, 0, 0, 3, 6, 32, },
+ { 2, 0, 0, 3, 6, 18, },
+ { 1, 0, 0, 3, 6, 30, },
+ { 0, 0, 0, 3, 7, 32, },
+ { 2, 0, 0, 3, 7, 18, },
+ { 1, 0, 0, 3, 7, 30, },
+ { 0, 0, 0, 3, 8, 30, },
+ { 2, 0, 0, 3, 8, 18, },
+ { 1, 0, 0, 3, 8, 30, },
+ { 0, 0, 0, 3, 9, 30, },
+ { 2, 0, 0, 3, 9, 18, },
+ { 1, 0, 0, 3, 9, 30, },
+ { 0, 0, 0, 3, 10, 28, },
+ { 2, 0, 0, 3, 10, 18, },
+ { 1, 0, 0, 3, 10, 30, },
+ { 0, 0, 0, 3, 11, 26, },
+ { 2, 0, 0, 3, 11, 18, },
+ { 1, 0, 0, 3, 11, 30, },
+ { 0, 0, 0, 3, 12, 20, },
+ { 2, 0, 0, 3, 12, 18, },
+ { 1, 0, 0, 3, 12, 30, },
+ { 0, 0, 0, 3, 13, 14, },
+ { 2, 0, 0, 3, 13, 18, },
+ { 1, 0, 0, 3, 13, 30, },
+ { 0, 0, 0, 3, 14, 63, },
+ { 2, 0, 0, 3, 14, 63, },
+ { 1, 0, 0, 3, 14, 63, },
+ { 0, 0, 1, 2, 1, 63, },
+ { 2, 0, 1, 2, 1, 63, },
+ { 1, 0, 1, 2, 1, 63, },
+ { 0, 0, 1, 2, 2, 63, },
+ { 2, 0, 1, 2, 2, 63, },
+ { 1, 0, 1, 2, 2, 63, },
+ { 0, 0, 1, 2, 3, 26, },
+ { 2, 0, 1, 2, 3, 30, },
+ { 1, 0, 1, 2, 3, 34, },
+ { 0, 0, 1, 2, 4, 26, },
+ { 2, 0, 1, 2, 4, 30, },
+ { 1, 0, 1, 2, 4, 34, },
+ { 0, 0, 1, 2, 5, 30, },
+ { 2, 0, 1, 2, 5, 30, },
+ { 1, 0, 1, 2, 5, 34, },
+ { 0, 0, 1, 2, 6, 32, },
+ { 2, 0, 1, 2, 6, 30, },
+ { 1, 0, 1, 2, 6, 34, },
+ { 0, 0, 1, 2, 7, 30, },
+ { 2, 0, 1, 2, 7, 30, },
+ { 1, 0, 1, 2, 7, 34, },
+ { 0, 0, 1, 2, 8, 26, },
+ { 2, 0, 1, 2, 8, 30, },
+ { 1, 0, 1, 2, 8, 34, },
+ { 0, 0, 1, 2, 9, 26, },
+ { 2, 0, 1, 2, 9, 30, },
+ { 1, 0, 1, 2, 9, 34, },
+ { 0, 0, 1, 2, 10, 20, },
+ { 2, 0, 1, 2, 10, 30, },
+ { 1, 0, 1, 2, 10, 34, },
+ { 0, 0, 1, 2, 11, 14, },
+ { 2, 0, 1, 2, 11, 30, },
+ { 1, 0, 1, 2, 11, 34, },
+ { 0, 0, 1, 2, 12, 63, },
+ { 2, 0, 1, 2, 12, 63, },
+ { 1, 0, 1, 2, 12, 63, },
+ { 0, 0, 1, 2, 13, 63, },
+ { 2, 0, 1, 2, 13, 63, },
+ { 1, 0, 1, 2, 13, 63, },
+ { 0, 0, 1, 2, 14, 63, },
+ { 2, 0, 1, 2, 14, 63, },
+ { 1, 0, 1, 2, 14, 63, },
+ { 0, 0, 1, 3, 1, 63, },
+ { 2, 0, 1, 3, 1, 63, },
+ { 1, 0, 1, 3, 1, 63, },
+ { 0, 0, 1, 3, 2, 63, },
+ { 2, 0, 1, 3, 2, 63, },
+ { 1, 0, 1, 3, 2, 63, },
+ { 0, 0, 1, 3, 3, 24, },
+ { 2, 0, 1, 3, 3, 18, },
+ { 1, 0, 1, 3, 3, 30, },
+ { 0, 0, 1, 3, 4, 24, },
+ { 2, 0, 1, 3, 4, 18, },
+ { 1, 0, 1, 3, 4, 30, },
+ { 0, 0, 1, 3, 5, 26, },
+ { 2, 0, 1, 3, 5, 18, },
+ { 1, 0, 1, 3, 5, 30, },
+ { 0, 0, 1, 3, 6, 28, },
+ { 2, 0, 1, 3, 6, 18, },
+ { 1, 0, 1, 3, 6, 30, },
+ { 0, 0, 1, 3, 7, 26, },
+ { 2, 0, 1, 3, 7, 18, },
+ { 1, 0, 1, 3, 7, 30, },
+ { 0, 0, 1, 3, 8, 26, },
+ { 2, 0, 1, 3, 8, 18, },
+ { 1, 0, 1, 3, 8, 30, },
+ { 0, 0, 1, 3, 9, 26, },
+ { 2, 0, 1, 3, 9, 18, },
+ { 1, 0, 1, 3, 9, 30, },
+ { 0, 0, 1, 3, 10, 20, },
+ { 2, 0, 1, 3, 10, 18, },
+ { 1, 0, 1, 3, 10, 30, },
+ { 0, 0, 1, 3, 11, 14, },
+ { 2, 0, 1, 3, 11, 18, },
+ { 1, 0, 1, 3, 11, 30, },
+ { 0, 0, 1, 3, 12, 63, },
+ { 2, 0, 1, 3, 12, 63, },
+ { 1, 0, 1, 3, 12, 63, },
+ { 0, 0, 1, 3, 13, 63, },
+ { 2, 0, 1, 3, 13, 63, },
+ { 1, 0, 1, 3, 13, 63, },
+ { 0, 0, 1, 3, 14, 63, },
+ { 2, 0, 1, 3, 14, 63, },
+ { 1, 0, 1, 3, 14, 63, },
+ { 0, 1, 0, 1, 36, 30, },
+ { 2, 1, 0, 1, 36, 32, },
+ { 1, 1, 0, 1, 36, 30, },
+ { 0, 1, 0, 1, 40, 32, },
+ { 2, 1, 0, 1, 40, 32, },
+ { 1, 1, 0, 1, 40, 30, },
+ { 0, 1, 0, 1, 44, 32, },
+ { 2, 1, 0, 1, 44, 32, },
+ { 1, 1, 0, 1, 44, 30, },
+ { 0, 1, 0, 1, 48, 32, },
+ { 2, 1, 0, 1, 48, 32, },
+ { 1, 1, 0, 1, 48, 30, },
+ { 0, 1, 0, 1, 52, 32, },
+ { 2, 1, 0, 1, 52, 32, },
+ { 1, 1, 0, 1, 52, 28, },
+ { 0, 1, 0, 1, 56, 32, },
+ { 2, 1, 0, 1, 56, 32, },
+ { 1, 1, 0, 1, 56, 28, },
+ { 0, 1, 0, 1, 60, 32, },
+ { 2, 1, 0, 1, 60, 32, },
+ { 1, 1, 0, 1, 60, 28, },
+ { 0, 1, 0, 1, 64, 28, },
+ { 2, 1, 0, 1, 64, 32, },
+ { 1, 1, 0, 1, 64, 28, },
+ { 0, 1, 0, 1, 100, 26, },
+ { 2, 1, 0, 1, 100, 32, },
+ { 1, 1, 0, 1, 100, 32, },
+ { 0, 1, 0, 1, 104, 32, },
+ { 2, 1, 0, 1, 104, 32, },
+ { 1, 1, 0, 1, 104, 32, },
+ { 0, 1, 0, 1, 108, 32, },
+ { 2, 1, 0, 1, 108, 32, },
+ { 1, 1, 0, 1, 108, 32, },
+ { 0, 1, 0, 1, 112, 32, },
+ { 2, 1, 0, 1, 112, 32, },
+ { 1, 1, 0, 1, 112, 32, },
+ { 0, 1, 0, 1, 116, 32, },
+ { 2, 1, 0, 1, 116, 32, },
+ { 1, 1, 0, 1, 116, 32, },
+ { 0, 1, 0, 1, 120, 32, },
+ { 2, 1, 0, 1, 120, 32, },
+ { 1, 1, 0, 1, 120, 32, },
+ { 0, 1, 0, 1, 124, 32, },
+ { 2, 1, 0, 1, 124, 32, },
+ { 1, 1, 0, 1, 124, 32, },
+ { 0, 1, 0, 1, 128, 32, },
+ { 2, 1, 0, 1, 128, 32, },
+ { 1, 1, 0, 1, 128, 32, },
+ { 0, 1, 0, 1, 132, 32, },
+ { 2, 1, 0, 1, 132, 32, },
+ { 1, 1, 0, 1, 132, 32, },
+ { 0, 1, 0, 1, 136, 32, },
+ { 2, 1, 0, 1, 136, 32, },
+ { 1, 1, 0, 1, 136, 32, },
+ { 0, 1, 0, 1, 140, 28, },
+ { 2, 1, 0, 1, 140, 32, },
+ { 1, 1, 0, 1, 140, 32, },
+ { 0, 1, 0, 1, 144, 28, },
+ { 2, 1, 0, 1, 144, 63, },
+ { 1, 1, 0, 1, 144, 63, },
+ { 0, 1, 0, 1, 149, 32, },
+ { 2, 1, 0, 1, 149, 63, },
+ { 1, 1, 0, 1, 149, 63, },
+ { 0, 1, 0, 1, 153, 32, },
+ { 2, 1, 0, 1, 153, 63, },
+ { 1, 1, 0, 1, 153, 63, },
+ { 0, 1, 0, 1, 157, 32, },
+ { 2, 1, 0, 1, 157, 63, },
+ { 1, 1, 0, 1, 157, 63, },
+ { 0, 1, 0, 1, 161, 32, },
+ { 2, 1, 0, 1, 161, 63, },
+ { 1, 1, 0, 1, 161, 63, },
+ { 0, 1, 0, 1, 165, 32, },
+ { 2, 1, 0, 1, 165, 63, },
+ { 1, 1, 0, 1, 165, 63, },
+ { 0, 1, 0, 2, 36, 30, },
+ { 2, 1, 0, 2, 36, 32, },
+ { 1, 1, 0, 2, 36, 28, },
+ { 0, 1, 0, 2, 40, 32, },
+ { 2, 1, 0, 2, 40, 32, },
+ { 1, 1, 0, 2, 40, 28, },
+ { 0, 1, 0, 2, 44, 32, },
+ { 2, 1, 0, 2, 44, 32, },
+ { 1, 1, 0, 2, 44, 28, },
+ { 0, 1, 0, 2, 48, 32, },
+ { 2, 1, 0, 2, 48, 32, },
+ { 1, 1, 0, 2, 48, 28, },
+ { 0, 1, 0, 2, 52, 32, },
+ { 2, 1, 0, 2, 52, 32, },
+ { 1, 1, 0, 2, 52, 28, },
+ { 0, 1, 0, 2, 56, 32, },
+ { 2, 1, 0, 2, 56, 32, },
+ { 1, 1, 0, 2, 56, 28, },
+ { 0, 1, 0, 2, 60, 32, },
+ { 2, 1, 0, 2, 60, 32, },
+ { 1, 1, 0, 2, 60, 28, },
+ { 0, 1, 0, 2, 64, 28, },
+ { 2, 1, 0, 2, 64, 32, },
+ { 1, 1, 0, 2, 64, 28, },
+ { 0, 1, 0, 2, 100, 26, },
+ { 2, 1, 0, 2, 100, 32, },
+ { 1, 1, 0, 2, 100, 32, },
+ { 0, 1, 0, 2, 104, 32, },
+ { 2, 1, 0, 2, 104, 32, },
+ { 1, 1, 0, 2, 104, 32, },
+ { 0, 1, 0, 2, 108, 32, },
+ { 2, 1, 0, 2, 108, 32, },
+ { 1, 1, 0, 2, 108, 32, },
+ { 0, 1, 0, 2, 112, 32, },
+ { 2, 1, 0, 2, 112, 32, },
+ { 1, 1, 0, 2, 112, 32, },
+ { 0, 1, 0, 2, 116, 32, },
+ { 2, 1, 0, 2, 116, 32, },
+ { 1, 1, 0, 2, 116, 32, },
+ { 0, 1, 0, 2, 120, 32, },
+ { 2, 1, 0, 2, 120, 32, },
+ { 1, 1, 0, 2, 120, 32, },
+ { 0, 1, 0, 2, 124, 32, },
+ { 2, 1, 0, 2, 124, 32, },
+ { 1, 1, 0, 2, 124, 32, },
+ { 0, 1, 0, 2, 128, 32, },
+ { 2, 1, 0, 2, 128, 32, },
+ { 1, 1, 0, 2, 128, 32, },
+ { 0, 1, 0, 2, 132, 32, },
+ { 2, 1, 0, 2, 132, 32, },
+ { 1, 1, 0, 2, 132, 32, },
+ { 0, 1, 0, 2, 136, 32, },
+ { 2, 1, 0, 2, 136, 32, },
+ { 1, 1, 0, 2, 136, 32, },
+ { 0, 1, 0, 2, 140, 26, },
+ { 2, 1, 0, 2, 140, 32, },
+ { 1, 1, 0, 2, 140, 32, },
+ { 0, 1, 0, 2, 144, 26, },
+ { 2, 1, 0, 2, 144, 63, },
+ { 1, 1, 0, 2, 144, 63, },
+ { 0, 1, 0, 2, 149, 32, },
+ { 2, 1, 0, 2, 149, 63, },
+ { 1, 1, 0, 2, 149, 63, },
+ { 0, 1, 0, 2, 153, 32, },
+ { 2, 1, 0, 2, 153, 63, },
+ { 1, 1, 0, 2, 153, 63, },
+ { 0, 1, 0, 2, 157, 32, },
+ { 2, 1, 0, 2, 157, 63, },
+ { 1, 1, 0, 2, 157, 63, },
+ { 0, 1, 0, 2, 161, 32, },
+ { 2, 1, 0, 2, 161, 63, },
+ { 1, 1, 0, 2, 161, 63, },
+ { 0, 1, 0, 2, 165, 32, },
+ { 2, 1, 0, 2, 165, 63, },
+ { 1, 1, 0, 2, 165, 63, },
+ { 0, 1, 0, 3, 36, 28, },
+ { 2, 1, 0, 3, 36, 20, },
+ { 1, 1, 0, 3, 36, 22, },
+ { 0, 1, 0, 3, 40, 30, },
+ { 2, 1, 0, 3, 40, 20, },
+ { 1, 1, 0, 3, 40, 22, },
+ { 0, 1, 0, 3, 44, 30, },
+ { 2, 1, 0, 3, 44, 20, },
+ { 1, 1, 0, 3, 44, 22, },
+ { 0, 1, 0, 3, 48, 30, },
+ { 2, 1, 0, 3, 48, 20, },
+ { 1, 1, 0, 3, 48, 22, },
+ { 0, 1, 0, 3, 52, 30, },
+ { 2, 1, 0, 3, 52, 20, },
+ { 1, 1, 0, 3, 52, 22, },
+ { 0, 1, 0, 3, 56, 30, },
+ { 2, 1, 0, 3, 56, 20, },
+ { 1, 1, 0, 3, 56, 22, },
+ { 0, 1, 0, 3, 60, 30, },
+ { 2, 1, 0, 3, 60, 20, },
+ { 1, 1, 0, 3, 60, 22, },
+ { 0, 1, 0, 3, 64, 28, },
+ { 2, 1, 0, 3, 64, 20, },
+ { 1, 1, 0, 3, 64, 22, },
+ { 0, 1, 0, 3, 100, 26, },
+ { 2, 1, 0, 3, 100, 20, },
+ { 1, 1, 0, 3, 100, 30, },
+ { 0, 1, 0, 3, 104, 30, },
+ { 2, 1, 0, 3, 104, 20, },
+ { 1, 1, 0, 3, 104, 30, },
+ { 0, 1, 0, 3, 108, 32, },
+ { 2, 1, 0, 3, 108, 20, },
+ { 1, 1, 0, 3, 108, 30, },
+ { 0, 1, 0, 3, 112, 32, },
+ { 2, 1, 0, 3, 112, 20, },
+ { 1, 1, 0, 3, 112, 30, },
+ { 0, 1, 0, 3, 116, 32, },
+ { 2, 1, 0, 3, 116, 20, },
+ { 1, 1, 0, 3, 116, 30, },
+ { 0, 1, 0, 3, 120, 32, },
+ { 2, 1, 0, 3, 120, 20, },
+ { 1, 1, 0, 3, 120, 30, },
+ { 0, 1, 0, 3, 124, 32, },
+ { 2, 1, 0, 3, 124, 20, },
+ { 1, 1, 0, 3, 124, 30, },
+ { 0, 1, 0, 3, 128, 32, },
+ { 2, 1, 0, 3, 128, 20, },
+ { 1, 1, 0, 3, 128, 30, },
+ { 0, 1, 0, 3, 132, 32, },
+ { 2, 1, 0, 3, 132, 20, },
+ { 1, 1, 0, 3, 132, 30, },
+ { 0, 1, 0, 3, 136, 30, },
+ { 2, 1, 0, 3, 136, 20, },
+ { 1, 1, 0, 3, 136, 30, },
+ { 0, 1, 0, 3, 140, 26, },
+ { 2, 1, 0, 3, 140, 20, },
+ { 1, 1, 0, 3, 140, 30, },
+ { 0, 1, 0, 3, 144, 26, },
+ { 2, 1, 0, 3, 144, 63, },
+ { 1, 1, 0, 3, 144, 63, },
+ { 0, 1, 0, 3, 149, 32, },
+ { 2, 1, 0, 3, 149, 63, },
+ { 1, 1, 0, 3, 149, 63, },
+ { 0, 1, 0, 3, 153, 32, },
+ { 2, 1, 0, 3, 153, 63, },
+ { 1, 1, 0, 3, 153, 63, },
+ { 0, 1, 0, 3, 157, 32, },
+ { 2, 1, 0, 3, 157, 63, },
+ { 1, 1, 0, 3, 157, 63, },
+ { 0, 1, 0, 3, 161, 32, },
+ { 2, 1, 0, 3, 161, 63, },
+ { 1, 1, 0, 3, 161, 63, },
+ { 0, 1, 0, 3, 165, 32, },
+ { 2, 1, 0, 3, 165, 63, },
+ { 1, 1, 0, 3, 165, 63, },
+ { 0, 1, 1, 2, 38, 22, },
+ { 2, 1, 1, 2, 38, 30, },
+ { 1, 1, 1, 2, 38, 30, },
+ { 0, 1, 1, 2, 46, 30, },
+ { 2, 1, 1, 2, 46, 30, },
+ { 1, 1, 1, 2, 46, 30, },
+ { 0, 1, 1, 2, 54, 30, },
+ { 2, 1, 1, 2, 54, 30, },
+ { 1, 1, 1, 2, 54, 30, },
+ { 0, 1, 1, 2, 62, 24, },
+ { 2, 1, 1, 2, 62, 30, },
+ { 1, 1, 1, 2, 62, 30, },
+ { 0, 1, 1, 2, 102, 24, },
+ { 2, 1, 1, 2, 102, 30, },
+ { 1, 1, 1, 2, 102, 30, },
+ { 0, 1, 1, 2, 110, 30, },
+ { 2, 1, 1, 2, 110, 30, },
+ { 1, 1, 1, 2, 110, 30, },
+ { 0, 1, 1, 2, 118, 30, },
+ { 2, 1, 1, 2, 118, 30, },
+ { 1, 1, 1, 2, 118, 30, },
+ { 0, 1, 1, 2, 126, 30, },
+ { 2, 1, 1, 2, 126, 30, },
+ { 1, 1, 1, 2, 126, 30, },
+ { 0, 1, 1, 2, 134, 30, },
+ { 2, 1, 1, 2, 134, 30, },
+ { 1, 1, 1, 2, 134, 30, },
+ { 0, 1, 1, 2, 142, 30, },
+ { 2, 1, 1, 2, 142, 63, },
+ { 1, 1, 1, 2, 142, 63, },
+ { 0, 1, 1, 2, 151, 30, },
+ { 2, 1, 1, 2, 151, 63, },
+ { 1, 1, 1, 2, 151, 63, },
+ { 0, 1, 1, 2, 159, 30, },
+ { 2, 1, 1, 2, 159, 63, },
+ { 1, 1, 1, 2, 159, 63, },
+ { 0, 1, 1, 3, 38, 20, },
+ { 2, 1, 1, 3, 38, 20, },
+ { 1, 1, 1, 3, 38, 22, },
+ { 0, 1, 1, 3, 46, 30, },
+ { 2, 1, 1, 3, 46, 20, },
+ { 1, 1, 1, 3, 46, 22, },
+ { 0, 1, 1, 3, 54, 30, },
+ { 2, 1, 1, 3, 54, 20, },
+ { 1, 1, 1, 3, 54, 22, },
+ { 0, 1, 1, 3, 62, 22, },
+ { 2, 1, 1, 3, 62, 20, },
+ { 1, 1, 1, 3, 62, 22, },
+ { 0, 1, 1, 3, 102, 22, },
+ { 2, 1, 1, 3, 102, 20, },
+ { 1, 1, 1, 3, 102, 30, },
+ { 0, 1, 1, 3, 110, 30, },
+ { 2, 1, 1, 3, 110, 20, },
+ { 1, 1, 1, 3, 110, 30, },
+ { 0, 1, 1, 3, 118, 30, },
+ { 2, 1, 1, 3, 118, 20, },
+ { 1, 1, 1, 3, 118, 30, },
+ { 0, 1, 1, 3, 126, 30, },
+ { 2, 1, 1, 3, 126, 20, },
+ { 1, 1, 1, 3, 126, 30, },
+ { 0, 1, 1, 3, 134, 30, },
+ { 2, 1, 1, 3, 134, 20, },
+ { 1, 1, 1, 3, 134, 30, },
+ { 0, 1, 1, 3, 142, 30, },
+ { 2, 1, 1, 3, 142, 63, },
+ { 1, 1, 1, 3, 142, 63, },
+ { 0, 1, 1, 3, 151, 30, },
+ { 2, 1, 1, 3, 151, 63, },
+ { 1, 1, 1, 3, 151, 63, },
+ { 0, 1, 1, 3, 159, 30, },
+ { 2, 1, 1, 3, 159, 63, },
+ { 1, 1, 1, 3, 159, 63, },
+ { 0, 1, 2, 4, 42, 20, },
+ { 2, 1, 2, 4, 42, 30, },
+ { 1, 1, 2, 4, 42, 28, },
+ { 0, 1, 2, 4, 58, 20, },
+ { 2, 1, 2, 4, 58, 30, },
+ { 1, 1, 2, 4, 58, 28, },
+ { 0, 1, 2, 4, 106, 20, },
+ { 2, 1, 2, 4, 106, 30, },
+ { 1, 1, 2, 4, 106, 30, },
+ { 0, 1, 2, 4, 122, 30, },
+ { 2, 1, 2, 4, 122, 30, },
+ { 1, 1, 2, 4, 122, 30, },
+ { 0, 1, 2, 4, 138, 30, },
+ { 2, 1, 2, 4, 138, 63, },
+ { 1, 1, 2, 4, 138, 63, },
+ { 0, 1, 2, 4, 155, 30, },
+ { 2, 1, 2, 4, 155, 63, },
+ { 1, 1, 2, 4, 155, 63, },
+ { 0, 1, 2, 5, 42, 18, },
+ { 2, 1, 2, 5, 42, 20, },
+ { 1, 1, 2, 5, 42, 22, },
+ { 0, 1, 2, 5, 58, 18, },
+ { 2, 1, 2, 5, 58, 20, },
+ { 1, 1, 2, 5, 58, 22, },
+ { 0, 1, 2, 5, 106, 20, },
+ { 2, 1, 2, 5, 106, 20, },
+ { 1, 1, 2, 5, 106, 30, },
+ { 0, 1, 2, 5, 122, 30, },
+ { 2, 1, 2, 5, 122, 20, },
+ { 1, 1, 2, 5, 122, 30, },
+ { 0, 1, 2, 5, 138, 30, },
+ { 2, 1, 2, 5, 138, 63, },
+ { 1, 1, 2, 5, 138, 63, },
+ { 0, 1, 2, 5, 155, 30, },
+ { 2, 1, 2, 5, 155, 63, },
+ { 1, 1, 2, 5, 155, 63, },
};
RTW_DECL_TABLE_TXPWR_LMT(rtw8822b_txpwr_lmt_type5);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index f6214ff20337..207f64cc3e55 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -3,6 +3,7 @@
*/
#include "main.h"
+#include "coex.h"
#include "fw.h"
#include "tx.h"
#include "rx.h"
@@ -31,6 +32,7 @@ static int rtw8822c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
map = (struct rtw8822c_efuse *)log_map;
efuse->rfe_option = map->rfe_option;
+ efuse->rf_board_option = map->rf_board_option;
efuse->crystal_cap = map->xtal_k;
efuse->channel_plan = map->channel_plan;
efuse->country_code[0] = map->country_code[0];
@@ -1041,12 +1043,6 @@ static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
dm_info->cck_gi_l_bnd = ((cck_gi_l_bnd_msb << 4) | (cck_gi_l_bnd_lsb));
rtw8822c_rf_init(rtwdev);
- /* wifi path controller */
- rtw_write32_mask(rtwdev, 0x70, 0xff000000, 0x0e);
- rtw_write32_mask(rtwdev, 0x1704, 0xffffffff, 0x7700);
- rtw_write32_mask(rtwdev, 0x1700, 0xffffffff, 0xc00f0038);
- rtw_write32_mask(rtwdev, 0x6c0, 0xffffffff, 0xaaaaaaaa);
- rtw_write32_mask(rtwdev, 0x6c4, 0xffffffff, 0xaaaaaaaa);
}
#define WLAN_TXQ_RPT_EN 0x1F
@@ -1817,6 +1813,7 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
u32 cck_enable;
u32 cck_fa_cnt;
+ u32 crc32_cnt;
u32 ofdm_fa_cnt;
u32 ofdm_fa_cnt1, ofdm_fa_cnt2, ofdm_fa_cnt3, ofdm_fa_cnt4, ofdm_fa_cnt5;
u16 parity_fail, rate_illegal, crc8_fail, mcs_fail, sb_search_fail,
@@ -1848,6 +1845,19 @@ static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
dm_info->total_fa_cnt = ofdm_fa_cnt;
dm_info->total_fa_cnt += cck_enable ? cck_fa_cnt : 0;
+ crc32_cnt = rtw_read32(rtwdev, 0x2c04);
+ dm_info->cck_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->cck_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ crc32_cnt = rtw_read32(rtwdev, 0x2c14);
+ dm_info->ofdm_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->ofdm_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ crc32_cnt = rtw_read32(rtwdev, 0x2c10);
+ dm_info->ht_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->ht_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+ crc32_cnt = rtw_read32(rtwdev, 0x2c0c);
+ dm_info->vht_ok_cnt = crc32_cnt & 0xffff;
+ dm_info->vht_err_cnt = (crc32_cnt & 0xffff0000) >> 16;
+
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 0);
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 2);
rtw_write32_mask(rtwdev, REG_CCANRX, BIT_OFDM_FA_RST, 0);
@@ -1864,6 +1874,161 @@ static void rtw8822c_do_iqk(struct rtw_dev *rtwdev)
{
}
+/* for coex */
+static void rtw8822c_coex_cfg_init(struct rtw_dev *rtwdev)
+{
+ /* enable TBTT nterrupt */
+ rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
+
+ /* BT report packet sample rate */
+ /* 0x790[5:0]=0x5 */
+ rtw_write8_set(rtwdev, REG_BT_TDMA_TIME, 0x05);
+
+ /* enable BT counter statistics */
+ rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1);
+
+ /* enable PTA (3-wire function form BT side) */
+ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN);
+ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_AOD_GPIO3);
+
+ /* enable PTA (tx/rx signal form WiFi side) */
+ rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN);
+ /* wl tx signal to PTA not case EDCCA */
+ rtw_write8_clr(rtwdev, REG_QUEUE_CTRL, BIT_PTA_EDCCA_EN);
+ /* GNT_BT=1 while select both */
+ rtw_write8_set(rtwdev, REG_BT_COEX_V2, BIT_GNT_BT_POLARITY);
+ /* BT_CCA = ~GNT_WL_BB, (not or GNT_BT_BB, LTE_Rx */
+ rtw_write8_clr(rtwdev, REG_DUMMY_PAGE4_V1, BIT_BTCCA_CTRL);
+
+ /* to avoid RF parameter error */
+ rtw_write_rf(rtwdev, RF_PATH_B, 0x1, 0xfffff, 0x40000);
+}
+
+static void rtw8822c_coex_cfg_gnt_fix(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_stat *coex_stat = &coex->stat;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u32 rf_0x1;
+
+ if (coex_stat->gnt_workaround_state == coex_stat->wl_coex_mode)
+ return;
+
+ coex_stat->gnt_workaround_state = coex_stat->wl_coex_mode;
+
+ if ((coex_stat->kt_ver == 0 && coex->under_5g) || coex->freerun)
+ rf_0x1 = 0x40021;
+ else
+ rf_0x1 = 0x40000;
+
+ /* BT at S1 for Shared-Ant */
+ if (efuse->share_ant)
+ rf_0x1 |= BIT(13);
+
+ rtw_write_rf(rtwdev, RF_PATH_B, 0x1, 0xfffff, rf_0x1);
+
+ /* WL-S0 2G RF TRX cannot be masked by GNT_BT
+ * enable "WLS0 BB chage RF mode if GNT_BT = 1" for shared-antenna type
+ * disable:0x1860[3] = 1, enable:0x1860[3] = 0
+ *
+ * enable "DAC off if GNT_WL = 0" for non-shared-antenna
+ * disable 0x1c30[22] = 0,
+ * enable: 0x1c30[22] = 1, 0x1c38[12] = 0, 0x1c38[28] = 1
+ *
+ * disable WL-S1 BB chage RF mode if GNT_BT
+ * since RF TRx mask can do it
+ */
+ rtw_write8_mask(rtwdev, 0x1c32, BIT(6), 1);
+ rtw_write8_mask(rtwdev, 0x1c39, BIT(4), 0);
+ rtw_write8_mask(rtwdev, 0x1c3b, BIT(4), 1);
+ rtw_write8_mask(rtwdev, 0x4160, BIT(3), 1);
+
+ /* disable WL-S0 BB chage RF mode if wifi is at 5G,
+ * or antenna path is separated
+ */
+ if (coex_stat->wl_coex_mode == COEX_WLINK_5G ||
+ coex->under_5g || !efuse->share_ant) {
+ if (coex_stat->kt_ver >= 3) {
+ rtw_write8_mask(rtwdev, 0x1860, BIT(3), 0);
+ rtw_write8_mask(rtwdev, 0x1ca7, BIT(3), 1);
+ } else {
+ rtw_write8_mask(rtwdev, 0x1860, BIT(3), 1);
+ }
+ } else {
+ /* shared-antenna */
+ rtw_write8_mask(rtwdev, 0x1860, BIT(3), 0);
+ if (coex_stat->kt_ver >= 3)
+ rtw_write8_mask(rtwdev, 0x1ca7, BIT(3), 0);
+ }
+}
+
+static void rtw8822c_coex_cfg_gnt_debug(struct rtw_dev *rtwdev)
+{
+ rtw_write8_mask(rtwdev, 0x66, BIT(4), 0);
+ rtw_write8_mask(rtwdev, 0x67, BIT(0), 0);
+ rtw_write8_mask(rtwdev, 0x42, BIT(3), 0);
+ rtw_write8_mask(rtwdev, 0x65, BIT(7), 0);
+ rtw_write8_mask(rtwdev, 0x73, BIT(3), 0);
+}
+
+static void rtw8822c_coex_cfg_rfe_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+
+ coex_rfe->rfe_module_type = rtwdev->efuse.rfe_option;
+ coex_rfe->ant_switch_polarity = 0;
+ coex_rfe->ant_switch_exist = false;
+ coex_rfe->ant_switch_with_bt = false;
+ coex_rfe->ant_switch_diversity = false;
+
+ if (efuse->share_ant)
+ coex_rfe->wlg_at_btg = true;
+ else
+ coex_rfe->wlg_at_btg = false;
+
+ /* disable LTE coex in wifi side */
+ rtw_coex_write_indirect_reg(rtwdev, 0x38, BIT_LTE_COEX_EN, 0x0);
+ rtw_coex_write_indirect_reg(rtwdev, 0xa0, MASKLWORD, 0xffff);
+ rtw_coex_write_indirect_reg(rtwdev, 0xa4, MASKLWORD, 0xffff);
+}
+
+static void rtw8822c_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+
+ if (wl_pwr == coex_dm->cur_wl_pwr_lvl)
+ return;
+
+ coex_dm->cur_wl_pwr_lvl = wl_pwr;
+}
+
+static void rtw8822c_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_dm *coex_dm = &coex->dm;
+
+ if (low_gain == coex_dm->cur_wl_rx_low_gain_en)
+ return;
+
+ coex_dm->cur_wl_rx_low_gain_en = low_gain;
+
+ if (coex_dm->cur_wl_rx_low_gain_en) {
+ /* set Rx filter corner RCK offset */
+ rtw_write_rf(rtwdev, RF_PATH_A, 0xde, 0xfffff, 0x22);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x1d, 0xfffff, 0x36);
+ rtw_write_rf(rtwdev, RF_PATH_B, 0xde, 0xfffff, 0x22);
+ rtw_write_rf(rtwdev, RF_PATH_B, 0x1d, 0xfffff, 0x36);
+ } else {
+ /* set Rx filter corner RCK offset */
+ rtw_write_rf(rtwdev, RF_PATH_A, 0xde, 0xfffff, 0x20);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x1d, 0xfffff, 0x0);
+ rtw_write_rf(rtwdev, RF_PATH_B, 0x1d, 0xfffff, 0x0);
+ }
+}
+
static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -2232,8 +2397,160 @@ static struct rtw_chip_ops rtw8822c_ops = {
.cfg_ldo25 = rtw8822c_cfg_ldo25,
.false_alarm_statistics = rtw8822c_false_alarm_statistics,
.do_iqk = rtw8822c_do_iqk,
+
+ .coex_set_init = rtw8822c_coex_cfg_init,
+ .coex_set_ant_switch = NULL,
+ .coex_set_gnt_fix = rtw8822c_coex_cfg_gnt_fix,
+ .coex_set_gnt_debug = rtw8822c_coex_cfg_gnt_debug,
+ .coex_set_rfe_type = rtw8822c_coex_cfg_rfe_type,
+ .coex_set_wl_tx_power = rtw8822c_coex_cfg_wl_tx_power,
+ .coex_set_wl_rx_gain = rtw8822c_coex_cfg_wl_rx_gain,
+};
+
+/* Shared-Antenna Coex Table */
+static const struct coex_table_para table_sant_8822c[] = {
+ {0xffffffff, 0xffffffff}, /* case-0 */
+ {0x55555555, 0x55555555},
+ {0x66555555, 0x66555555},
+ {0xaaaaaaaa, 0xaaaaaaaa},
+ {0x5a5a5a5a, 0x5a5a5a5a},
+ {0xfafafafa, 0xfafafafa}, /* case-5 */
+ {0x6a5a6a5a, 0xaaaaaaaa},
+ {0x6a5a56aa, 0x6a5a56aa},
+ {0x6a5a5a5a, 0x6a5a5a5a},
+ {0x66555555, 0x5a5a5a5a},
+ {0x66555555, 0x6a5a5a5a}, /* case-10 */
+ {0x66555555, 0xfafafafa},
+ {0x66555555, 0x6a5a5aaa},
+ {0x66555555, 0x5aaa5aaa},
+ {0x66555555, 0xaaaa5aaa},
+ {0x66555555, 0xaaaaaaaa}, /* case-15 */
+ {0xffff55ff, 0xfafafafa},
+ {0xffff55ff, 0x6afa5afa},
+ {0xaaffffaa, 0xfafafafa},
+ {0xaa5555aa, 0x5a5a5a5a},
+ {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */
+ {0xaa5555aa, 0xaaaaaaaa},
+ {0xffffffff, 0x5a5a5a5a},
+ {0xffffffff, 0x6a5a5a5a},
+ {0xffffffff, 0x55555555},
+ {0xffffffff, 0x6a5a5aaa}, /* case-25 */
+ {0x55555555, 0x5a5a5a5a},
+ {0x55555555, 0xaaaaaaaa},
+ {0x55555555, 0x6a5a6a5a},
+ {0x66556655, 0x66556655}
+};
+
+/* Non-Shared-Antenna Coex Table */
+static const struct coex_table_para table_nsant_8822c[] = {
+ {0xffffffff, 0xffffffff}, /* case-100 */
+ {0x55555555, 0x55555555},
+ {0x66555555, 0x66555555},
+ {0xaaaaaaaa, 0xaaaaaaaa},
+ {0x5a5a5a5a, 0x5a5a5a5a},
+ {0xfafafafa, 0xfafafafa}, /* case-105 */
+ {0x5afa5afa, 0x5afa5afa},
+ {0x55555555, 0xfafafafa},
+ {0x66555555, 0xfafafafa},
+ {0x66555555, 0x5a5a5a5a},
+ {0x66555555, 0x6a5a5a5a}, /* case-110 */
+ {0x66555555, 0xaaaaaaaa},
+ {0xffff55ff, 0xfafafafa},
+ {0xffff55ff, 0x5afa5afa},
+ {0xffff55ff, 0xaaaaaaaa},
+ {0xaaffffaa, 0xfafafafa}, /* case-115 */
+ {0xaaffffaa, 0x5afa5afa},
+ {0xaaffffaa, 0xaaaaaaaa},
+ {0xffffffff, 0xfafafafa},
+ {0xffffffff, 0x5afa5afa},
+ {0xffffffff, 0xaaaaaaaa},/* case-120 */
+ {0x55ff55ff, 0x5afa5afa},
+ {0x55ff55ff, 0xaaaaaaaa},
+ {0x55ff55ff, 0x55ff55ff}
+};
+
+/* Shared-Antenna TDMA */
+static const struct coex_tdma_para tdma_sant_8822c[] = {
+ { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */
+ { {0x61, 0x45, 0x03, 0x11, 0x11} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x11} },
+ { {0x61, 0x30, 0x03, 0x11, 0x11} },
+ { {0x61, 0x20, 0x03, 0x11, 0x11} },
+ { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-5 */
+ { {0x61, 0x45, 0x03, 0x11, 0x10} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x10} },
+ { {0x61, 0x30, 0x03, 0x11, 0x10} },
+ { {0x61, 0x20, 0x03, 0x11, 0x10} },
+ { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */
+ { {0x61, 0x08, 0x03, 0x11, 0x14} },
+ { {0x61, 0x08, 0x03, 0x10, 0x14} },
+ { {0x51, 0x08, 0x03, 0x10, 0x54} },
+ { {0x51, 0x08, 0x03, 0x10, 0x55} },
+ { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */
+ { {0x51, 0x45, 0x03, 0x10, 0x10} },
+ { {0x51, 0x3a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x30, 0x03, 0x10, 0x50} },
+ { {0x51, 0x20, 0x03, 0x10, 0x50} },
+ { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-20 */
+ { {0x51, 0x4a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x0c, 0x03, 0x10, 0x54} },
+ { {0x55, 0x08, 0x03, 0x10, 0x54} },
+ { {0x65, 0x10, 0x03, 0x11, 0x11} },
+ { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */
+ { {0x51, 0x08, 0x03, 0x10, 0x50} }
};
+/* Non-Shared-Antenna TDMA */
+static const struct coex_tdma_para tdma_nsant_8822c[] = {
+ { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-100 */
+ { {0x61, 0x45, 0x03, 0x11, 0x11} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x11} },
+ { {0x61, 0x30, 0x03, 0x11, 0x11} },
+ { {0x61, 0x20, 0x03, 0x11, 0x11} },
+ { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-105 */
+ { {0x61, 0x45, 0x03, 0x11, 0x10} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x10} },
+ { {0x61, 0x30, 0x03, 0x11, 0x10} },
+ { {0x61, 0x20, 0x03, 0x11, 0x10} },
+ { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-110 */
+ { {0x61, 0x08, 0x03, 0x11, 0x14} },
+ { {0x61, 0x08, 0x03, 0x10, 0x14} },
+ { {0x51, 0x08, 0x03, 0x10, 0x54} },
+ { {0x51, 0x08, 0x03, 0x10, 0x55} },
+ { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-115 */
+ { {0x51, 0x45, 0x03, 0x10, 0x50} },
+ { {0x51, 0x3a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x30, 0x03, 0x10, 0x50} },
+ { {0x51, 0x20, 0x03, 0x10, 0x50} },
+ { {0x51, 0x10, 0x03, 0x10, 0x50} } /* case-120 */
+};
+
+/* rssi in percentage % (dbm = % - 100) */
+static const u8 wl_rssi_step_8822c[] = {60, 50, 44, 30};
+static const u8 bt_rssi_step_8822c[] = {8, 15, 20, 25};
+static const struct coex_5g_afh_map afh_5g_8822c[] = { {0, 0, 0} };
+
+/* wl_tx_dec_power, bt_tx_dec_power, wl_rx_gain, bt_rx_lna_constrain */
+static const struct coex_rf_para rf_para_tx_8822c[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 16, false, 7}, /* for WL-CPT */
+ {8, 17, true, 4},
+ {7, 18, true, 4},
+ {6, 19, true, 4},
+ {5, 20, true, 4}
+};
+
+static const struct coex_rf_para rf_para_rx_8822c[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 16, false, 7}, /* for WL-CPT */
+ {3, 24, true, 5},
+ {2, 26, true, 5},
+ {1, 27, true, 5},
+ {0, 28, true, 5}
+};
+
+static_assert(ARRAY_SIZE(rf_para_tx_8822c) == ARRAY_SIZE(rf_para_rx_8822c));
+
struct rtw_chip_info rtw8822c_hw_spec = {
.ops = &rtw8822c_ops,
.id = RTW_CHIP_TYPE_8822C,
@@ -2272,6 +2589,32 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.rf_tbl = {&rtw8822c_rf_a_tbl, &rtw8822c_rf_b_tbl},
.rfe_defs = rtw8822c_rfe_defs,
.rfe_defs_size = ARRAY_SIZE(rtw8822c_rfe_defs),
+
+ .coex_para_ver = 0x19062706,
+ .bt_desired_ver = 0x6,
+ .scbd_support = true,
+ .new_scbd10_def = true,
+ .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
+ .bt_rssi_type = COEX_BTRSSI_DBM,
+ .ant_isolation = 15,
+ .rssi_tolerance = 2,
+ .wl_rssi_step = wl_rssi_step_8822c,
+ .bt_rssi_step = bt_rssi_step_8822c,
+ .table_sant_num = ARRAY_SIZE(table_sant_8822c),
+ .table_sant = table_sant_8822c,
+ .table_nsant_num = ARRAY_SIZE(table_nsant_8822c),
+ .table_nsant = table_nsant_8822c,
+ .tdma_sant_num = ARRAY_SIZE(tdma_sant_8822c),
+ .tdma_sant = tdma_sant_8822c,
+ .tdma_nsant_num = ARRAY_SIZE(tdma_nsant_8822c),
+ .tdma_nsant = tdma_nsant_8822c,
+ .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8822c),
+ .wl_rf_para_tx = rf_para_tx_8822c,
+ .wl_rf_para_rx = rf_para_rx_8822c,
+ .bt_afh_span_bw20 = 0x24,
+ .bt_afh_span_bw40 = 0x36,
+ .afh_5g_num = ARRAY_SIZE(afh_5g_8822c),
+ .afh_5g = afh_5g_8822c,
};
EXPORT_SYMBOL(rtw8822c_hw_spec);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
index 18e609a69829..6c7eaa75b98b 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
@@ -9403,885 +9403,1762 @@ static const u32 rtw8822c_rf_b[] = {
RTW_DECL_TABLE_RF_RADIO(rtw8822c_rf_b, B);
-static const u8 rtw8822c_txpwr_lmt_type0[] = {
- 0, 0, 0, 0, 1, 72, 2, 0, 0, 0, 1, 60,
- 0, 0, 0, 0, 2, 72, 2, 0, 0, 0, 2, 60,
- 0, 0, 0, 0, 3, 76, 2, 0, 0, 0, 3, 60,
- 0, 0, 0, 0, 4, 76, 2, 0, 0, 0, 4, 60,
- 0, 0, 0, 0, 5, 76, 2, 0, 0, 0, 5, 60,
- 0, 0, 0, 0, 6, 76, 2, 0, 0, 0, 6, 60,
- 0, 0, 0, 0, 7, 76, 2, 0, 0, 0, 7, 60,
- 0, 0, 0, 0, 8, 76, 2, 0, 0, 0, 8, 60,
- 0, 0, 0, 0, 9, 76, 2, 0, 0, 0, 9, 60,
- 0, 0, 0, 0, 10, 72, 2, 0, 0, 0, 10, 60,
- 0, 0, 0, 0, 11, 72, 2, 0, 0, 0, 11, 60,
- 0, 0, 0, 0, 12, 52, 2, 0, 0, 0, 12, 60,
- 0, 0, 0, 0, 13, 48, 2, 0, 0, 0, 13, 60,
- 0, 0, 0, 0, 14, 127, 2, 0, 0, 0, 14, 127,
- 0, 0, 0, 1, 1, 52, 2, 0, 0, 1, 1, 60,
- 0, 0, 0, 1, 2, 60, 2, 0, 0, 1, 2, 60,
- 0, 0, 0, 1, 3, 64, 2, 0, 0, 1, 3, 60,
- 0, 0, 0, 1, 4, 68, 2, 0, 0, 1, 4, 60,
- 0, 0, 0, 1, 5, 76, 2, 0, 0, 1, 5, 60,
- 0, 0, 0, 1, 6, 76, 2, 0, 0, 1, 6, 60,
- 0, 0, 0, 1, 7, 76, 2, 0, 0, 1, 7, 60,
- 0, 0, 0, 1, 8, 68, 2, 0, 0, 1, 8, 60,
- 0, 0, 0, 1, 9, 64, 2, 0, 0, 1, 9, 60,
- 0, 0, 0, 1, 10, 60, 2, 0, 0, 1, 10, 60,
- 0, 0, 0, 1, 11, 52, 2, 0, 0, 1, 11, 60,
- 0, 0, 0, 1, 12, 40, 2, 0, 0, 1, 12, 60,
- 0, 0, 0, 1, 13, 28, 2, 0, 0, 1, 13, 60,
- 0, 0, 0, 1, 14, 127, 2, 0, 0, 1, 14, 127,
- 0, 0, 0, 2, 1, 52, 2, 0, 0, 2, 1, 60,
- 0, 0, 0, 2, 2, 60, 2, 0, 0, 2, 2, 60,
- 0, 0, 0, 2, 3, 64, 2, 0, 0, 2, 3, 60,
- 0, 0, 0, 2, 4, 68, 2, 0, 0, 2, 4, 60,
- 0, 0, 0, 2, 5, 76, 2, 0, 0, 2, 5, 60,
- 0, 0, 0, 2, 6, 76, 2, 0, 0, 2, 6, 60,
- 0, 0, 0, 2, 7, 76, 2, 0, 0, 2, 7, 60,
- 0, 0, 0, 2, 8, 68, 2, 0, 0, 2, 8, 60,
- 0, 0, 0, 2, 9, 64, 2, 0, 0, 2, 9, 60,
- 0, 0, 0, 2, 10, 60, 2, 0, 0, 2, 10, 60,
- 0, 0, 0, 2, 11, 52, 2, 0, 0, 2, 11, 60,
- 0, 0, 0, 2, 12, 40, 2, 0, 0, 2, 12, 60,
- 0, 0, 0, 2, 13, 28, 2, 0, 0, 2, 13, 60,
- 0, 0, 0, 2, 14, 127, 2, 0, 0, 2, 14, 127,
- 0, 0, 0, 3, 1, 52, 2, 0, 0, 3, 1, 36,
- 0, 0, 0, 3, 2, 60, 2, 0, 0, 3, 2, 36,
- 0, 0, 0, 3, 3, 64, 2, 0, 0, 3, 3, 36,
- 0, 0, 0, 3, 4, 68, 2, 0, 0, 3, 4, 36,
- 0, 0, 0, 3, 5, 76, 2, 0, 0, 3, 5, 36,
- 0, 0, 0, 3, 6, 76, 2, 0, 0, 3, 6, 36,
- 0, 0, 0, 3, 7, 76, 2, 0, 0, 3, 7, 36,
- 0, 0, 0, 3, 8, 68, 2, 0, 0, 3, 8, 36,
- 0, 0, 0, 3, 9, 64, 2, 0, 0, 3, 9, 36,
- 0, 0, 0, 3, 10, 60, 2, 0, 0, 3, 10, 36,
- 0, 0, 0, 3, 11, 52, 2, 0, 0, 3, 11, 36,
- 0, 0, 0, 3, 12, 40, 2, 0, 0, 3, 12, 36,
- 0, 0, 0, 3, 13, 28, 2, 0, 0, 3, 13, 36,
- 0, 0, 0, 3, 14, 127, 2, 0, 0, 3, 14, 127,
- 0, 0, 1, 2, 1, 127, 2, 0, 1, 2, 1, 127,
- 0, 0, 1, 2, 2, 127, 2, 0, 1, 2, 2, 127,
- 0, 0, 1, 2, 3, 52, 2, 0, 1, 2, 3, 60,
- 0, 0, 1, 2, 4, 52, 2, 0, 1, 2, 4, 60,
- 0, 0, 1, 2, 5, 60, 2, 0, 1, 2, 5, 60,
- 0, 0, 1, 2, 6, 64, 2, 0, 1, 2, 6, 60,
- 0, 0, 1, 2, 7, 60, 2, 0, 1, 2, 7, 60,
- 0, 0, 1, 2, 8, 52, 2, 0, 1, 2, 8, 60,
- 0, 0, 1, 2, 9, 52, 2, 0, 1, 2, 9, 60,
- 0, 0, 1, 2, 10, 40, 2, 0, 1, 2, 10, 60,
- 0, 0, 1, 2, 11, 28, 2, 0, 1, 2, 11, 60,
- 0, 0, 1, 2, 12, 127, 2, 0, 1, 2, 12, 127,
- 0, 0, 1, 2, 13, 127, 2, 0, 1, 2, 13, 127,
- 0, 0, 1, 2, 14, 127, 2, 0, 1, 2, 14, 127,
- 0, 0, 1, 3, 1, 127, 2, 0, 1, 3, 1, 127,
- 0, 0, 1, 3, 2, 127, 2, 0, 1, 3, 2, 127,
- 0, 0, 1, 3, 3, 48, 2, 0, 1, 3, 3, 36,
- 0, 0, 1, 3, 4, 48, 2, 0, 1, 3, 4, 36,
- 0, 0, 1, 3, 5, 60, 2, 0, 1, 3, 5, 36,
- 0, 0, 1, 3, 6, 64, 2, 0, 1, 3, 6, 36,
- 0, 0, 1, 3, 7, 60, 2, 0, 1, 3, 7, 36,
- 0, 0, 1, 3, 8, 52, 2, 0, 1, 3, 8, 36,
- 0, 0, 1, 3, 9, 52, 2, 0, 1, 3, 9, 36,
- 0, 0, 1, 3, 10, 40, 2, 0, 1, 3, 10, 36,
- 0, 0, 1, 3, 11, 26, 2, 0, 1, 3, 11, 36,
- 0, 0, 1, 3, 12, 127, 2, 0, 1, 3, 12, 127,
- 0, 0, 1, 3, 13, 127, 2, 0, 1, 3, 13, 127,
- 0, 0, 1, 3, 14, 127, 2, 0, 1, 3, 14, 127,
- 0, 1, 0, 1, 36, 74, 2, 1, 0, 1, 36, 62,
- 0, 1, 0, 1, 40, 76, 2, 1, 0, 1, 40, 62,
- 0, 1, 0, 1, 44, 76, 2, 1, 0, 1, 44, 62,
- 0, 1, 0, 1, 48, 76, 2, 1, 0, 1, 48, 62,
- 0, 1, 0, 1, 52, 76, 2, 1, 0, 1, 52, 62,
- 0, 1, 0, 1, 56, 76, 2, 1, 0, 1, 56, 62,
- 0, 1, 0, 1, 60, 76, 2, 1, 0, 1, 60, 62,
- 0, 1, 0, 1, 64, 74, 2, 1, 0, 1, 64, 62,
- 0, 1, 0, 1, 100, 72, 2, 1, 0, 1, 100, 62,
- 0, 1, 0, 1, 104, 76, 2, 1, 0, 1, 104, 62,
- 0, 1, 0, 1, 108, 76, 2, 1, 0, 1, 108, 62,
- 0, 1, 0, 1, 112, 76, 2, 1, 0, 1, 112, 62,
- 0, 1, 0, 1, 116, 76, 2, 1, 0, 1, 116, 62,
- 0, 1, 0, 1, 120, 76, 2, 1, 0, 1, 120, 62,
- 0, 1, 0, 1, 124, 76, 2, 1, 0, 1, 124, 62,
- 0, 1, 0, 1, 128, 76, 2, 1, 0, 1, 128, 62,
- 0, 1, 0, 1, 132, 76, 2, 1, 0, 1, 132, 62,
- 0, 1, 0, 1, 136, 76, 2, 1, 0, 1, 136, 62,
- 0, 1, 0, 1, 140, 72, 2, 1, 0, 1, 140, 62,
- 0, 1, 0, 1, 144, 76, 2, 1, 0, 1, 144, 127,
- 0, 1, 0, 1, 149, 76, 2, 1, 0, 1, 149, -128,
- 0, 1, 0, 1, 153, 76, 2, 1, 0, 1, 153, -128,
- 0, 1, 0, 1, 157, 76, 2, 1, 0, 1, 157, -128,
- 0, 1, 0, 1, 161, 76, 2, 1, 0, 1, 161, -128,
- 0, 1, 0, 1, 165, 76, 2, 1, 0, 1, 165, -128,
- 0, 1, 0, 2, 36, 72, 2, 1, 0, 2, 36, 62,
- 0, 1, 0, 2, 40, 76, 2, 1, 0, 2, 40, 62,
- 0, 1, 0, 2, 44, 76, 2, 1, 0, 2, 44, 62,
- 0, 1, 0, 2, 48, 76, 2, 1, 0, 2, 48, 62,
- 0, 1, 0, 2, 52, 76, 2, 1, 0, 2, 52, 62,
- 0, 1, 0, 2, 56, 76, 2, 1, 0, 2, 56, 62,
- 0, 1, 0, 2, 60, 76, 2, 1, 0, 2, 60, 62,
- 0, 1, 0, 2, 64, 74, 2, 1, 0, 2, 64, 62,
- 0, 1, 0, 2, 100, 70, 2, 1, 0, 2, 100, 62,
- 0, 1, 0, 2, 104, 76, 2, 1, 0, 2, 104, 62,
- 0, 1, 0, 2, 108, 76, 2, 1, 0, 2, 108, 62,
- 0, 1, 0, 2, 112, 76, 2, 1, 0, 2, 112, 62,
- 0, 1, 0, 2, 116, 76, 2, 1, 0, 2, 116, 62,
- 0, 1, 0, 2, 120, 76, 2, 1, 0, 2, 120, 62,
- 0, 1, 0, 2, 124, 76, 2, 1, 0, 2, 124, 62,
- 0, 1, 0, 2, 128, 76, 2, 1, 0, 2, 128, 62,
- 0, 1, 0, 2, 132, 76, 2, 1, 0, 2, 132, 62,
- 0, 1, 0, 2, 136, 76, 2, 1, 0, 2, 136, 62,
- 0, 1, 0, 2, 140, 70, 2, 1, 0, 2, 140, 62,
- 0, 1, 0, 2, 144, 76, 2, 1, 0, 2, 144, 127,
- 0, 1, 0, 2, 149, 76, 2, 1, 0, 2, 149, -128,
- 0, 1, 0, 2, 153, 76, 2, 1, 0, 2, 153, -128,
- 0, 1, 0, 2, 157, 76, 2, 1, 0, 2, 157, -128,
- 0, 1, 0, 2, 161, 76, 2, 1, 0, 2, 161, -128,
- 0, 1, 0, 2, 165, 76, 2, 1, 0, 2, 165, -128,
- 0, 1, 0, 3, 36, 68, 2, 1, 0, 3, 36, 38,
- 0, 1, 0, 3, 40, 68, 2, 1, 0, 3, 40, 38,
- 0, 1, 0, 3, 44, 68, 2, 1, 0, 3, 44, 38,
- 0, 1, 0, 3, 48, 68, 2, 1, 0, 3, 48, 38,
- 0, 1, 0, 3, 52, 68, 2, 1, 0, 3, 52, 38,
- 0, 1, 0, 3, 56, 68, 2, 1, 0, 3, 56, 38,
- 0, 1, 0, 3, 60, 66, 2, 1, 0, 3, 60, 38,
- 0, 1, 0, 3, 64, 68, 2, 1, 0, 3, 64, 38,
- 0, 1, 0, 3, 100, 60, 2, 1, 0, 3, 100, 38,
- 0, 1, 0, 3, 104, 68, 2, 1, 0, 3, 104, 38,
- 0, 1, 0, 3, 108, 68, 2, 1, 0, 3, 108, 38,
- 0, 1, 0, 3, 112, 68, 2, 1, 0, 3, 112, 38,
- 0, 1, 0, 3, 116, 68, 2, 1, 0, 3, 116, 38,
- 0, 1, 0, 3, 120, 68, 2, 1, 0, 3, 120, 38,
- 0, 1, 0, 3, 124, 68, 2, 1, 0, 3, 124, 38,
- 0, 1, 0, 3, 128, 68, 2, 1, 0, 3, 128, 38,
- 0, 1, 0, 3, 132, 68, 2, 1, 0, 3, 132, 38,
- 0, 1, 0, 3, 136, 68, 2, 1, 0, 3, 136, 38,
- 0, 1, 0, 3, 140, 60, 2, 1, 0, 3, 140, 38,
- 0, 1, 0, 3, 144, 68, 2, 1, 0, 3, 144, 127,
- 0, 1, 0, 3, 149, 76, 2, 1, 0, 3, 149, -128,
- 0, 1, 0, 3, 153, 76, 2, 1, 0, 3, 153, -128,
- 0, 1, 0, 3, 157, 76, 2, 1, 0, 3, 157, -128,
- 0, 1, 0, 3, 161, 76, 2, 1, 0, 3, 161, -128,
- 0, 1, 0, 3, 165, 76, 2, 1, 0, 3, 165, -128,
- 0, 1, 1, 2, 38, 66, 2, 1, 1, 2, 38, 64,
- 0, 1, 1, 2, 46, 72, 2, 1, 1, 2, 46, 64,
- 0, 1, 1, 2, 54, 72, 2, 1, 1, 2, 54, 64,
- 0, 1, 1, 2, 62, 64, 2, 1, 1, 2, 62, 64,
- 0, 1, 1, 2, 102, 58, 2, 1, 1, 2, 102, 64,
- 0, 1, 1, 2, 110, 72, 2, 1, 1, 2, 110, 64,
- 0, 1, 1, 2, 118, 72, 2, 1, 1, 2, 118, 64,
- 0, 1, 1, 2, 126, 72, 2, 1, 1, 2, 126, 64,
- 0, 1, 1, 2, 134, 72, 2, 1, 1, 2, 134, 64,
- 0, 1, 1, 2, 142, 72, 2, 1, 1, 2, 142, 127,
- 0, 1, 1, 2, 151, 72, 2, 1, 1, 2, 151, -128,
- 0, 1, 1, 2, 159, 72, 2, 1, 1, 2, 159, -128,
- 0, 1, 1, 3, 38, 60, 2, 1, 1, 3, 38, 40,
- 0, 1, 1, 3, 46, 68, 2, 1, 1, 3, 46, 40,
- 0, 1, 1, 3, 54, 68, 2, 1, 1, 3, 54, 40,
- 0, 1, 1, 3, 62, 58, 2, 1, 1, 3, 62, 40,
- 0, 1, 1, 3, 102, 54, 2, 1, 1, 3, 102, 40,
- 0, 1, 1, 3, 110, 68, 2, 1, 1, 3, 110, 40,
- 0, 1, 1, 3, 118, 68, 2, 1, 1, 3, 118, 40,
- 0, 1, 1, 3, 126, 68, 2, 1, 1, 3, 126, 40,
- 0, 1, 1, 3, 134, 68, 2, 1, 1, 3, 134, 40,
- 0, 1, 1, 3, 142, 68, 2, 1, 1, 3, 142, 127,
- 0, 1, 1, 3, 151, 72, 2, 1, 1, 3, 151, -128,
- 0, 1, 1, 3, 159, 72, 2, 1, 1, 3, 159, -128,
- 0, 1, 2, 4, 42, 64, 2, 1, 2, 4, 42, 64,
- 0, 1, 2, 4, 58, 62, 2, 1, 2, 4, 58, 64,
- 0, 1, 2, 4, 106, 58, 2, 1, 2, 4, 106, 64,
- 0, 1, 2, 4, 122, 72, 2, 1, 2, 4, 122, 64,
- 0, 1, 2, 4, 138, 72, 2, 1, 2, 4, 138, 127,
- 0, 1, 2, 4, 155, 72, 2, 1, 2, 4, 155, -128,
- 0, 1, 2, 5, 42, 54, 2, 1, 2, 5, 42, 40,
- 0, 1, 2, 5, 58, 52, 2, 1, 2, 5, 58, 40,
- 0, 1, 2, 5, 106, 50, 2, 1, 2, 5, 106, 40,
- 0, 1, 2, 5, 122, 66, 2, 1, 2, 5, 122, 40,
- 0, 1, 2, 5, 138, 66, 2, 1, 2, 5, 138, 127,
- 0, 1, 2, 5, 155, 62, 2, 1, 2, 5, 155, -128,
- 1, 0, 0, 0, 1, 68, 3, 0, 0, 0, 1, 72,
- 4, 0, 0, 0, 1, 76, 5, 0, 0, 0, 1, 60,
- 6, 0, 0, 0, 1, 72, 7, 0, 0, 0, 1, 60,
- 8, 0, 0, 0, 1, 72, 1, 0, 0, 0, 2, 68,
- 3, 0, 0, 0, 2, 72, 4, 0, 0, 0, 2, 76,
- 5, 0, 0, 0, 2, 60, 6, 0, 0, 0, 2, 72,
- 7, 0, 0, 0, 2, 60, 8, 0, 0, 0, 2, 72,
- 1, 0, 0, 0, 3, 68, 3, 0, 0, 0, 3, 76,
- 4, 0, 0, 0, 3, 76, 5, 0, 0, 0, 3, 60,
- 6, 0, 0, 0, 3, 76, 7, 0, 0, 0, 3, 60,
- 8, 0, 0, 0, 3, 76, 1, 0, 0, 0, 4, 68,
- 3, 0, 0, 0, 4, 76, 4, 0, 0, 0, 4, 76,
- 5, 0, 0, 0, 4, 60, 6, 0, 0, 0, 4, 76,
- 7, 0, 0, 0, 4, 60, 8, 0, 0, 0, 4, 76,
- 1, 0, 0, 0, 5, 68, 3, 0, 0, 0, 5, 76,
- 4, 0, 0, 0, 5, 76, 5, 0, 0, 0, 5, 60,
- 6, 0, 0, 0, 5, 76, 7, 0, 0, 0, 5, 60,
- 8, 0, 0, 0, 5, 76, 1, 0, 0, 0, 6, 68,
- 3, 0, 0, 0, 6, 76, 4, 0, 0, 0, 6, 76,
- 5, 0, 0, 0, 6, 60, 6, 0, 0, 0, 6, 76,
- 7, 0, 0, 0, 6, 60, 8, 0, 0, 0, 6, 76,
- 1, 0, 0, 0, 7, 68, 3, 0, 0, 0, 7, 76,
- 4, 0, 0, 0, 7, 76, 5, 0, 0, 0, 7, 60,
- 6, 0, 0, 0, 7, 76, 7, 0, 0, 0, 7, 60,
- 8, 0, 0, 0, 7, 76, 1, 0, 0, 0, 8, 68,
- 3, 0, 0, 0, 8, 76, 4, 0, 0, 0, 8, 76,
- 5, 0, 0, 0, 8, 60, 6, 0, 0, 0, 8, 76,
- 7, 0, 0, 0, 8, 60, 8, 0, 0, 0, 8, 76,
- 1, 0, 0, 0, 9, 68, 3, 0, 0, 0, 9, 76,
- 4, 0, 0, 0, 9, 76, 5, 0, 0, 0, 9, 60,
- 6, 0, 0, 0, 9, 76, 7, 0, 0, 0, 9, 60,
- 8, 0, 0, 0, 9, 76, 1, 0, 0, 0, 10, 68,
- 3, 0, 0, 0, 10, 72, 4, 0, 0, 0, 10, 76,
- 5, 0, 0, 0, 10, 60, 6, 0, 0, 0, 10, 72,
- 7, 0, 0, 0, 10, 60, 8, 0, 0, 0, 10, 72,
- 1, 0, 0, 0, 11, 68, 3, 0, 0, 0, 11, 72,
- 4, 0, 0, 0, 11, 76, 5, 0, 0, 0, 11, 60,
- 6, 0, 0, 0, 11, 72, 7, 0, 0, 0, 11, 60,
- 8, 0, 0, 0, 11, 72, 1, 0, 0, 0, 12, 68,
- 3, 0, 0, 0, 12, 52, 4, 0, 0, 0, 12, 76,
- 5, 0, 0, 0, 12, 60, 6, 0, 0, 0, 12, 52,
- 7, 0, 0, 0, 12, 60, 8, 0, 0, 0, 12, 52,
- 1, 0, 0, 0, 13, 68, 3, 0, 0, 0, 13, 48,
- 4, 0, 0, 0, 13, 76, 5, 0, 0, 0, 13, 60,
- 6, 0, 0, 0, 13, 48, 7, 0, 0, 0, 13, 60,
- 8, 0, 0, 0, 13, 48, 1, 0, 0, 0, 14, 68,
- 3, 0, 0, 0, 14, 127, 4, 0, 0, 0, 14, 127,
- 5, 0, 0, 0, 14, 127, 6, 0, 0, 0, 14, 127,
- 7, 0, 0, 0, 14, 127, 8, 0, 0, 0, 14, 127,
- 1, 0, 0, 1, 1, 76, 3, 0, 0, 1, 1, 52,
- 4, 0, 0, 1, 1, 76, 5, 0, 0, 1, 1, 60,
- 6, 0, 0, 1, 1, 52, 7, 0, 0, 1, 1, 60,
- 8, 0, 0, 1, 1, 52, 1, 0, 0, 1, 2, 76,
- 3, 0, 0, 1, 2, 60, 4, 0, 0, 1, 2, 76,
- 5, 0, 0, 1, 2, 60, 6, 0, 0, 1, 2, 60,
- 7, 0, 0, 1, 2, 60, 8, 0, 0, 1, 2, 60,
- 1, 0, 0, 1, 3, 76, 3, 0, 0, 1, 3, 64,
- 4, 0, 0, 1, 3, 76, 5, 0, 0, 1, 3, 60,
- 6, 0, 0, 1, 3, 64, 7, 0, 0, 1, 3, 60,
- 8, 0, 0, 1, 3, 64, 1, 0, 0, 1, 4, 76,
- 3, 0, 0, 1, 4, 68, 4, 0, 0, 1, 4, 76,
- 5, 0, 0, 1, 4, 60, 6, 0, 0, 1, 4, 68,
- 7, 0, 0, 1, 4, 60, 8, 0, 0, 1, 4, 68,
- 1, 0, 0, 1, 5, 76, 3, 0, 0, 1, 5, 76,
- 4, 0, 0, 1, 5, 76, 5, 0, 0, 1, 5, 60,
- 6, 0, 0, 1, 5, 76, 7, 0, 0, 1, 5, 60,
- 8, 0, 0, 1, 5, 76, 1, 0, 0, 1, 6, 76,
- 3, 0, 0, 1, 6, 76, 4, 0, 0, 1, 6, 76,
- 5, 0, 0, 1, 6, 60, 6, 0, 0, 1, 6, 76,
- 7, 0, 0, 1, 6, 60, 8, 0, 0, 1, 6, 76,
- 1, 0, 0, 1, 7, 76, 3, 0, 0, 1, 7, 76,
- 4, 0, 0, 1, 7, 76, 5, 0, 0, 1, 7, 60,
- 6, 0, 0, 1, 7, 76, 7, 0, 0, 1, 7, 60,
- 8, 0, 0, 1, 7, 76, 1, 0, 0, 1, 8, 76,
- 3, 0, 0, 1, 8, 68, 4, 0, 0, 1, 8, 76,
- 5, 0, 0, 1, 8, 60, 6, 0, 0, 1, 8, 68,
- 7, 0, 0, 1, 8, 60, 8, 0, 0, 1, 8, 68,
- 1, 0, 0, 1, 9, 76, 3, 0, 0, 1, 9, 64,
- 4, 0, 0, 1, 9, 76, 5, 0, 0, 1, 9, 60,
- 6, 0, 0, 1, 9, 64, 7, 0, 0, 1, 9, 60,
- 8, 0, 0, 1, 9, 64, 1, 0, 0, 1, 10, 76,
- 3, 0, 0, 1, 10, 60, 4, 0, 0, 1, 10, 76,
- 5, 0, 0, 1, 10, 60, 6, 0, 0, 1, 10, 60,
- 7, 0, 0, 1, 10, 60, 8, 0, 0, 1, 10, 60,
- 1, 0, 0, 1, 11, 76, 3, 0, 0, 1, 11, 52,
- 4, 0, 0, 1, 11, 76, 5, 0, 0, 1, 11, 60,
- 6, 0, 0, 1, 11, 52, 7, 0, 0, 1, 11, 60,
- 8, 0, 0, 1, 11, 52, 1, 0, 0, 1, 12, 76,
- 3, 0, 0, 1, 12, 40, 4, 0, 0, 1, 12, 76,
- 5, 0, 0, 1, 12, 60, 6, 0, 0, 1, 12, 40,
- 7, 0, 0, 1, 12, 60, 8, 0, 0, 1, 12, 40,
- 1, 0, 0, 1, 13, 76, 3, 0, 0, 1, 13, 28,
- 4, 0, 0, 1, 13, 70, 5, 0, 0, 1, 13, 60,
- 6, 0, 0, 1, 13, 28, 7, 0, 0, 1, 13, 60,
- 8, 0, 0, 1, 13, 28, 1, 0, 0, 1, 14, 127,
- 3, 0, 0, 1, 14, 127, 4, 0, 0, 1, 14, 127,
- 5, 0, 0, 1, 14, 127, 6, 0, 0, 1, 14, 127,
- 7, 0, 0, 1, 14, 127, 8, 0, 0, 1, 14, 127,
- 1, 0, 0, 2, 1, 76, 3, 0, 0, 2, 1, 52,
- 4, 0, 0, 2, 1, 76, 5, 0, 0, 2, 1, 60,
- 6, 0, 0, 2, 1, 52, 7, 0, 0, 2, 1, 60,
- 8, 0, 0, 2, 1, 52, 1, 0, 0, 2, 2, 76,
- 3, 0, 0, 2, 2, 60, 4, 0, 0, 2, 2, 76,
- 5, 0, 0, 2, 2, 60, 6, 0, 0, 2, 2, 60,
- 7, 0, 0, 2, 2, 60, 8, 0, 0, 2, 2, 60,
- 1, 0, 0, 2, 3, 76, 3, 0, 0, 2, 3, 64,
- 4, 0, 0, 2, 3, 76, 5, 0, 0, 2, 3, 60,
- 6, 0, 0, 2, 3, 64, 7, 0, 0, 2, 3, 60,
- 8, 0, 0, 2, 3, 64, 1, 0, 0, 2, 4, 76,
- 3, 0, 0, 2, 4, 68, 4, 0, 0, 2, 4, 76,
- 5, 0, 0, 2, 4, 60, 6, 0, 0, 2, 4, 68,
- 7, 0, 0, 2, 4, 60, 8, 0, 0, 2, 4, 68,
- 1, 0, 0, 2, 5, 76, 3, 0, 0, 2, 5, 76,
- 4, 0, 0, 2, 5, 76, 5, 0, 0, 2, 5, 60,
- 6, 0, 0, 2, 5, 76, 7, 0, 0, 2, 5, 60,
- 8, 0, 0, 2, 5, 76, 1, 0, 0, 2, 6, 76,
- 3, 0, 0, 2, 6, 76, 4, 0, 0, 2, 6, 76,
- 5, 0, 0, 2, 6, 60, 6, 0, 0, 2, 6, 76,
- 7, 0, 0, 2, 6, 60, 8, 0, 0, 2, 6, 76,
- 1, 0, 0, 2, 7, 76, 3, 0, 0, 2, 7, 76,
- 4, 0, 0, 2, 7, 76, 5, 0, 0, 2, 7, 60,
- 6, 0, 0, 2, 7, 76, 7, 0, 0, 2, 7, 60,
- 8, 0, 0, 2, 7, 76, 1, 0, 0, 2, 8, 76,
- 3, 0, 0, 2, 8, 68, 4, 0, 0, 2, 8, 76,
- 5, 0, 0, 2, 8, 60, 6, 0, 0, 2, 8, 68,
- 7, 0, 0, 2, 8, 60, 8, 0, 0, 2, 8, 68,
- 1, 0, 0, 2, 9, 76, 3, 0, 0, 2, 9, 64,
- 4, 0, 0, 2, 9, 76, 5, 0, 0, 2, 9, 60,
- 6, 0, 0, 2, 9, 64, 7, 0, 0, 2, 9, 60,
- 8, 0, 0, 2, 9, 64, 1, 0, 0, 2, 10, 76,
- 3, 0, 0, 2, 10, 60, 4, 0, 0, 2, 10, 76,
- 5, 0, 0, 2, 10, 60, 6, 0, 0, 2, 10, 60,
- 7, 0, 0, 2, 10, 60, 8, 0, 0, 2, 10, 60,
- 1, 0, 0, 2, 11, 76, 3, 0, 0, 2, 11, 52,
- 4, 0, 0, 2, 11, 76, 5, 0, 0, 2, 11, 60,
- 6, 0, 0, 2, 11, 52, 7, 0, 0, 2, 11, 60,
- 8, 0, 0, 2, 11, 52, 1, 0, 0, 2, 12, 76,
- 3, 0, 0, 2, 12, 40, 4, 0, 0, 2, 12, 76,
- 5, 0, 0, 2, 12, 60, 6, 0, 0, 2, 12, 40,
- 7, 0, 0, 2, 12, 60, 8, 0, 0, 2, 12, 40,
- 1, 0, 0, 2, 13, 76, 3, 0, 0, 2, 13, 28,
- 4, 0, 0, 2, 13, 72, 5, 0, 0, 2, 13, 60,
- 6, 0, 0, 2, 13, 28, 7, 0, 0, 2, 13, 60,
- 8, 0, 0, 2, 13, 28, 1, 0, 0, 2, 14, 127,
- 3, 0, 0, 2, 14, 127, 4, 0, 0, 2, 14, 127,
- 5, 0, 0, 2, 14, 127, 6, 0, 0, 2, 14, 127,
- 7, 0, 0, 2, 14, 127, 8, 0, 0, 2, 14, 127,
- 1, 0, 0, 3, 1, 66, 3, 0, 0, 3, 1, 52,
- 4, 0, 0, 3, 1, 68, 5, 0, 0, 3, 1, 36,
- 6, 0, 0, 3, 1, 52, 7, 0, 0, 3, 1, 36,
- 8, 0, 0, 3, 1, 52, 1, 0, 0, 3, 2, 66,
- 3, 0, 0, 3, 2, 60, 4, 0, 0, 3, 2, 70,
- 5, 0, 0, 3, 2, 36, 6, 0, 0, 3, 2, 60,
- 7, 0, 0, 3, 2, 36, 8, 0, 0, 3, 2, 60,
- 1, 0, 0, 3, 3, 66, 3, 0, 0, 3, 3, 64,
- 4, 0, 0, 3, 3, 70, 5, 0, 0, 3, 3, 36,
- 6, 0, 0, 3, 3, 64, 7, 0, 0, 3, 3, 36,
- 8, 0, 0, 3, 3, 64, 1, 0, 0, 3, 4, 66,
- 3, 0, 0, 3, 4, 68, 4, 0, 0, 3, 4, 70,
- 5, 0, 0, 3, 4, 36, 6, 0, 0, 3, 4, 68,
- 7, 0, 0, 3, 4, 36, 8, 0, 0, 3, 4, 68,
- 1, 0, 0, 3, 5, 66, 3, 0, 0, 3, 5, 76,
- 4, 0, 0, 3, 5, 70, 5, 0, 0, 3, 5, 36,
- 6, 0, 0, 3, 5, 76, 7, 0, 0, 3, 5, 36,
- 8, 0, 0, 3, 5, 76, 1, 0, 0, 3, 6, 66,
- 3, 0, 0, 3, 6, 76, 4, 0, 0, 3, 6, 70,
- 5, 0, 0, 3, 6, 36, 6, 0, 0, 3, 6, 76,
- 7, 0, 0, 3, 6, 36, 8, 0, 0, 3, 6, 76,
- 1, 0, 0, 3, 7, 66, 3, 0, 0, 3, 7, 76,
- 4, 0, 0, 3, 7, 70, 5, 0, 0, 3, 7, 36,
- 6, 0, 0, 3, 7, 76, 7, 0, 0, 3, 7, 36,
- 8, 0, 0, 3, 7, 76, 1, 0, 0, 3, 8, 66,
- 3, 0, 0, 3, 8, 68, 4, 0, 0, 3, 8, 70,
- 5, 0, 0, 3, 8, 36, 6, 0, 0, 3, 8, 68,
- 7, 0, 0, 3, 8, 36, 8, 0, 0, 3, 8, 68,
- 1, 0, 0, 3, 9, 66, 3, 0, 0, 3, 9, 64,
- 4, 0, 0, 3, 9, 70, 5, 0, 0, 3, 9, 36,
- 6, 0, 0, 3, 9, 64, 7, 0, 0, 3, 9, 36,
- 8, 0, 0, 3, 9, 64, 1, 0, 0, 3, 10, 66,
- 3, 0, 0, 3, 10, 60, 4, 0, 0, 3, 10, 70,
- 5, 0, 0, 3, 10, 36, 6, 0, 0, 3, 10, 60,
- 7, 0, 0, 3, 10, 36, 8, 0, 0, 3, 10, 60,
- 1, 0, 0, 3, 11, 66, 3, 0, 0, 3, 11, 52,
- 4, 0, 0, 3, 11, 70, 5, 0, 0, 3, 11, 36,
- 6, 0, 0, 3, 11, 52, 7, 0, 0, 3, 11, 36,
- 8, 0, 0, 3, 11, 52, 1, 0, 0, 3, 12, 66,
- 3, 0, 0, 3, 12, 40, 4, 0, 0, 3, 12, 70,
- 5, 0, 0, 3, 12, 36, 6, 0, 0, 3, 12, 40,
- 7, 0, 0, 3, 12, 36, 8, 0, 0, 3, 12, 40,
- 1, 0, 0, 3, 13, 66, 3, 0, 0, 3, 13, 28,
- 4, 0, 0, 3, 13, 62, 5, 0, 0, 3, 13, 36,
- 6, 0, 0, 3, 13, 28, 7, 0, 0, 3, 13, 36,
- 8, 0, 0, 3, 13, 28, 1, 0, 0, 3, 14, 127,
- 3, 0, 0, 3, 14, 127, 4, 0, 0, 3, 14, 127,
- 5, 0, 0, 3, 14, 127, 6, 0, 0, 3, 14, 127,
- 7, 0, 0, 3, 14, 127, 8, 0, 0, 3, 14, 127,
- 1, 0, 1, 2, 1, 127, 3, 0, 1, 2, 1, 127,
- 4, 0, 1, 2, 1, 127, 5, 0, 1, 2, 1, 127,
- 6, 0, 1, 2, 1, 127, 7, 0, 1, 2, 1, 127,
- 8, 0, 1, 2, 1, 127, 1, 0, 1, 2, 2, 127,
- 3, 0, 1, 2, 2, 127, 4, 0, 1, 2, 2, 127,
- 5, 0, 1, 2, 2, 127, 6, 0, 1, 2, 2, 127,
- 7, 0, 1, 2, 2, 127, 8, 0, 1, 2, 2, 127,
- 1, 0, 1, 2, 3, 72, 3, 0, 1, 2, 3, 52,
- 4, 0, 1, 2, 3, 72, 5, 0, 1, 2, 3, 60,
- 6, 0, 1, 2, 3, 52, 7, 0, 1, 2, 3, 60,
- 8, 0, 1, 2, 3, 52, 1, 0, 1, 2, 4, 72,
- 3, 0, 1, 2, 4, 52, 4, 0, 1, 2, 4, 72,
- 5, 0, 1, 2, 4, 60, 6, 0, 1, 2, 4, 52,
- 7, 0, 1, 2, 4, 60, 8, 0, 1, 2, 4, 52,
- 1, 0, 1, 2, 5, 72, 3, 0, 1, 2, 5, 60,
- 4, 0, 1, 2, 5, 72, 5, 0, 1, 2, 5, 60,
- 6, 0, 1, 2, 5, 60, 7, 0, 1, 2, 5, 60,
- 8, 0, 1, 2, 5, 60, 1, 0, 1, 2, 6, 72,
- 3, 0, 1, 2, 6, 64, 4, 0, 1, 2, 6, 72,
- 5, 0, 1, 2, 6, 60, 6, 0, 1, 2, 6, 64,
- 7, 0, 1, 2, 6, 60, 8, 0, 1, 2, 6, 64,
- 1, 0, 1, 2, 7, 72, 3, 0, 1, 2, 7, 60,
- 4, 0, 1, 2, 7, 72, 5, 0, 1, 2, 7, 60,
- 6, 0, 1, 2, 7, 60, 7, 0, 1, 2, 7, 60,
- 8, 0, 1, 2, 7, 60, 1, 0, 1, 2, 8, 72,
- 3, 0, 1, 2, 8, 52, 4, 0, 1, 2, 8, 72,
- 5, 0, 1, 2, 8, 60, 6, 0, 1, 2, 8, 52,
- 7, 0, 1, 2, 8, 60, 8, 0, 1, 2, 8, 52,
- 1, 0, 1, 2, 9, 72, 3, 0, 1, 2, 9, 52,
- 4, 0, 1, 2, 9, 72, 5, 0, 1, 2, 9, 60,
- 6, 0, 1, 2, 9, 52, 7, 0, 1, 2, 9, 60,
- 8, 0, 1, 2, 9, 52, 1, 0, 1, 2, 10, 72,
- 3, 0, 1, 2, 10, 40, 4, 0, 1, 2, 10, 72,
- 5, 0, 1, 2, 10, 60, 6, 0, 1, 2, 10, 40,
- 7, 0, 1, 2, 10, 60, 8, 0, 1, 2, 10, 40,
- 1, 0, 1, 2, 11, 72, 3, 0, 1, 2, 11, 28,
- 4, 0, 1, 2, 11, 70, 5, 0, 1, 2, 11, 60,
- 6, 0, 1, 2, 11, 28, 7, 0, 1, 2, 11, 60,
- 8, 0, 1, 2, 11, 28, 1, 0, 1, 2, 12, 127,
- 3, 0, 1, 2, 12, 127, 4, 0, 1, 2, 12, 127,
- 5, 0, 1, 2, 12, 127, 6, 0, 1, 2, 12, 127,
- 7, 0, 1, 2, 12, 127, 8, 0, 1, 2, 12, 127,
- 1, 0, 1, 2, 13, 127, 3, 0, 1, 2, 13, 127,
- 4, 0, 1, 2, 13, 127, 5, 0, 1, 2, 13, 127,
- 6, 0, 1, 2, 13, 127, 7, 0, 1, 2, 13, 127,
- 8, 0, 1, 2, 13, 127, 1, 0, 1, 2, 14, 127,
- 3, 0, 1, 2, 14, 127, 4, 0, 1, 2, 14, 127,
- 5, 0, 1, 2, 14, 127, 6, 0, 1, 2, 14, 127,
- 7, 0, 1, 2, 14, 127, 8, 0, 1, 2, 14, 127,
- 1, 0, 1, 3, 1, 127, 3, 0, 1, 3, 1, 127,
- 4, 0, 1, 3, 1, 127, 5, 0, 1, 3, 1, 127,
- 6, 0, 1, 3, 1, 127, 7, 0, 1, 3, 1, 127,
- 8, 0, 1, 3, 1, 127, 1, 0, 1, 3, 2, 127,
- 3, 0, 1, 3, 2, 127, 4, 0, 1, 3, 2, 127,
- 5, 0, 1, 3, 2, 127, 6, 0, 1, 3, 2, 127,
- 7, 0, 1, 3, 2, 127, 8, 0, 1, 3, 2, 127,
- 1, 0, 1, 3, 3, 66, 3, 0, 1, 3, 3, 48,
- 4, 0, 1, 3, 3, 66, 5, 0, 1, 3, 3, 36,
- 6, 0, 1, 3, 3, 48, 7, 0, 1, 3, 3, 36,
- 8, 0, 1, 3, 3, 48, 1, 0, 1, 3, 4, 66,
- 3, 0, 1, 3, 4, 48, 4, 0, 1, 3, 4, 70,
- 5, 0, 1, 3, 4, 36, 6, 0, 1, 3, 4, 48,
- 7, 0, 1, 3, 4, 36, 8, 0, 1, 3, 4, 48,
- 1, 0, 1, 3, 5, 66, 3, 0, 1, 3, 5, 60,
- 4, 0, 1, 3, 5, 70, 5, 0, 1, 3, 5, 36,
- 6, 0, 1, 3, 5, 60, 7, 0, 1, 3, 5, 36,
- 8, 0, 1, 3, 5, 60, 1, 0, 1, 3, 6, 66,
- 3, 0, 1, 3, 6, 64, 4, 0, 1, 3, 6, 70,
- 5, 0, 1, 3, 6, 36, 6, 0, 1, 3, 6, 64,
- 7, 0, 1, 3, 6, 36, 8, 0, 1, 3, 6, 64,
- 1, 0, 1, 3, 7, 66, 3, 0, 1, 3, 7, 60,
- 4, 0, 1, 3, 7, 70, 5, 0, 1, 3, 7, 36,
- 6, 0, 1, 3, 7, 60, 7, 0, 1, 3, 7, 36,
- 8, 0, 1, 3, 7, 60, 1, 0, 1, 3, 8, 66,
- 3, 0, 1, 3, 8, 52, 4, 0, 1, 3, 8, 70,
- 5, 0, 1, 3, 8, 36, 6, 0, 1, 3, 8, 52,
- 7, 0, 1, 3, 8, 36, 8, 0, 1, 3, 8, 52,
- 1, 0, 1, 3, 9, 66, 3, 0, 1, 3, 9, 52,
- 4, 0, 1, 3, 9, 70, 5, 0, 1, 3, 9, 36,
- 6, 0, 1, 3, 9, 52, 7, 0, 1, 3, 9, 36,
- 8, 0, 1, 3, 9, 52, 1, 0, 1, 3, 10, 66,
- 3, 0, 1, 3, 10, 40, 4, 0, 1, 3, 10, 70,
- 5, 0, 1, 3, 10, 36, 6, 0, 1, 3, 10, 40,
- 7, 0, 1, 3, 10, 36, 8, 0, 1, 3, 10, 40,
- 1, 0, 1, 3, 11, 66, 3, 0, 1, 3, 11, 26,
- 4, 0, 1, 3, 11, 66, 5, 0, 1, 3, 11, 36,
- 6, 0, 1, 3, 11, 26, 7, 0, 1, 3, 11, 36,
- 8, 0, 1, 3, 11, 26, 1, 0, 1, 3, 12, 127,
- 3, 0, 1, 3, 12, 127, 4, 0, 1, 3, 12, 127,
- 5, 0, 1, 3, 12, 127, 6, 0, 1, 3, 12, 127,
- 7, 0, 1, 3, 12, 127, 8, 0, 1, 3, 12, 127,
- 1, 0, 1, 3, 13, 127, 3, 0, 1, 3, 13, 127,
- 4, 0, 1, 3, 13, 127, 5, 0, 1, 3, 13, 127,
- 6, 0, 1, 3, 13, 127, 7, 0, 1, 3, 13, 127,
- 8, 0, 1, 3, 13, 127, 1, 0, 1, 3, 14, 127,
- 3, 0, 1, 3, 14, 127, 4, 0, 1, 3, 14, 127,
- 5, 0, 1, 3, 14, 127, 6, 0, 1, 3, 14, 127,
- 7, 0, 1, 3, 14, 127, 8, 0, 1, 3, 14, 127,
- 1, 1, 0, 1, 36, 60, 3, 1, 0, 1, 36, 62,
- 4, 1, 0, 1, 36, 76, 5, 1, 0, 1, 36, 62,
- 6, 1, 0, 1, 36, 64, 7, 1, 0, 1, 36, 54,
- 8, 1, 0, 1, 36, 62, 1, 1, 0, 1, 40, 62,
- 3, 1, 0, 1, 40, 62, 4, 1, 0, 1, 40, 76,
- 5, 1, 0, 1, 40, 62, 6, 1, 0, 1, 40, 64,
- 7, 1, 0, 1, 40, 54, 8, 1, 0, 1, 40, 62,
- 1, 1, 0, 1, 44, 62, 3, 1, 0, 1, 44, 62,
- 4, 1, 0, 1, 44, 76, 5, 1, 0, 1, 44, 62,
- 6, 1, 0, 1, 44, 64, 7, 1, 0, 1, 44, 54,
- 8, 1, 0, 1, 44, 62, 1, 1, 0, 1, 48, 62,
- 3, 1, 0, 1, 48, 62, 4, 1, 0, 1, 48, 76,
- 5, 1, 0, 1, 48, 62, 6, 1, 0, 1, 48, 64,
- 7, 1, 0, 1, 48, 54, 8, 1, 0, 1, 48, 62,
- 1, 1, 0, 1, 52, 62, 3, 1, 0, 1, 52, 64,
- 4, 1, 0, 1, 52, 76, 5, 1, 0, 1, 52, 62,
- 6, 1, 0, 1, 52, 76, 7, 1, 0, 1, 52, 54,
- 8, 1, 0, 1, 52, 76, 1, 1, 0, 1, 56, 62,
- 3, 1, 0, 1, 56, 64, 4, 1, 0, 1, 56, 76,
- 5, 1, 0, 1, 56, 62, 6, 1, 0, 1, 56, 76,
- 7, 1, 0, 1, 56, 54, 8, 1, 0, 1, 56, 76,
- 1, 1, 0, 1, 60, 62, 3, 1, 0, 1, 60, 64,
- 4, 1, 0, 1, 60, 76, 5, 1, 0, 1, 60, 62,
- 6, 1, 0, 1, 60, 76, 7, 1, 0, 1, 60, 54,
- 8, 1, 0, 1, 60, 76, 1, 1, 0, 1, 64, 60,
- 3, 1, 0, 1, 64, 64, 4, 1, 0, 1, 64, 76,
- 5, 1, 0, 1, 64, 62, 6, 1, 0, 1, 64, 74,
- 7, 1, 0, 1, 64, 54, 8, 1, 0, 1, 64, 74,
- 1, 1, 0, 1, 100, 76, 3, 1, 0, 1, 100, 72,
- 4, 1, 0, 1, 100, 76, 5, 1, 0, 1, 100, 62,
- 6, 1, 0, 1, 100, 72, 7, 1, 0, 1, 100, 54,
- 8, 1, 0, 1, 100, 72, 1, 1, 0, 1, 104, 76,
- 3, 1, 0, 1, 104, 76, 4, 1, 0, 1, 104, 76,
- 5, 1, 0, 1, 104, 62, 6, 1, 0, 1, 104, 76,
- 7, 1, 0, 1, 104, 54, 8, 1, 0, 1, 104, 76,
- 1, 1, 0, 1, 108, 76, 3, 1, 0, 1, 108, 76,
- 4, 1, 0, 1, 108, 76, 5, 1, 0, 1, 108, 62,
- 6, 1, 0, 1, 108, 76, 7, 1, 0, 1, 108, 54,
- 8, 1, 0, 1, 108, 76, 1, 1, 0, 1, 112, 76,
- 3, 1, 0, 1, 112, 76, 4, 1, 0, 1, 112, 76,
- 5, 1, 0, 1, 112, 62, 6, 1, 0, 1, 112, 76,
- 7, 1, 0, 1, 112, 54, 8, 1, 0, 1, 112, 76,
- 1, 1, 0, 1, 116, 76, 3, 1, 0, 1, 116, 76,
- 4, 1, 0, 1, 116, 76, 5, 1, 0, 1, 116, 62,
- 6, 1, 0, 1, 116, 76, 7, 1, 0, 1, 116, 54,
- 8, 1, 0, 1, 116, 76, 1, 1, 0, 1, 120, 76,
- 3, 1, 0, 1, 120, 127, 4, 1, 0, 1, 120, 76,
- 5, 1, 0, 1, 120, 127, 6, 1, 0, 1, 120, 76,
- 7, 1, 0, 1, 120, 54, 8, 1, 0, 1, 120, 76,
- 1, 1, 0, 1, 124, 76, 3, 1, 0, 1, 124, 127,
- 4, 1, 0, 1, 124, 76, 5, 1, 0, 1, 124, 127,
- 6, 1, 0, 1, 124, 76, 7, 1, 0, 1, 124, 54,
- 8, 1, 0, 1, 124, 76, 1, 1, 0, 1, 128, 76,
- 3, 1, 0, 1, 128, 127, 4, 1, 0, 1, 128, 76,
- 5, 1, 0, 1, 128, 127, 6, 1, 0, 1, 128, 76,
- 7, 1, 0, 1, 128, 54, 8, 1, 0, 1, 128, 76,
- 1, 1, 0, 1, 132, 76, 3, 1, 0, 1, 132, 76,
- 4, 1, 0, 1, 132, 76, 5, 1, 0, 1, 132, 62,
- 6, 1, 0, 1, 132, 76, 7, 1, 0, 1, 132, 54,
- 8, 1, 0, 1, 132, 76, 1, 1, 0, 1, 136, 76,
- 3, 1, 0, 1, 136, 76, 4, 1, 0, 1, 136, 76,
- 5, 1, 0, 1, 136, 62, 6, 1, 0, 1, 136, 76,
- 7, 1, 0, 1, 136, 127, 8, 1, 0, 1, 136, 76,
- 1, 1, 0, 1, 140, 76, 3, 1, 0, 1, 140, 72,
- 4, 1, 0, 1, 140, 76, 5, 1, 0, 1, 140, 62,
- 6, 1, 0, 1, 140, 72, 7, 1, 0, 1, 140, 127,
- 8, 1, 0, 1, 140, 72, 1, 1, 0, 1, 144, 127,
- 3, 1, 0, 1, 144, 76, 4, 1, 0, 1, 144, 76,
- 5, 1, 0, 1, 144, 127, 6, 1, 0, 1, 144, 76,
- 7, 1, 0, 1, 144, 127, 8, 1, 0, 1, 144, 76,
- 1, 1, 0, 1, 149, 127, 3, 1, 0, 1, 149, 76,
- 4, 1, 0, 1, 149, 74, 5, 1, 0, 1, 149, 76,
- 6, 1, 0, 1, 149, 76, 7, 1, 0, 1, 149, 54,
- 8, 1, 0, 1, 149, 76, 1, 1, 0, 1, 153, 127,
- 3, 1, 0, 1, 153, 76, 4, 1, 0, 1, 153, 74,
- 5, 1, 0, 1, 153, 76, 6, 1, 0, 1, 153, 76,
- 7, 1, 0, 1, 153, 54, 8, 1, 0, 1, 153, 76,
- 1, 1, 0, 1, 157, 127, 3, 1, 0, 1, 157, 76,
- 4, 1, 0, 1, 157, 74, 5, 1, 0, 1, 157, 76,
- 6, 1, 0, 1, 157, 76, 7, 1, 0, 1, 157, 54,
- 8, 1, 0, 1, 157, 76, 1, 1, 0, 1, 161, 127,
- 3, 1, 0, 1, 161, 76, 4, 1, 0, 1, 161, 74,
- 5, 1, 0, 1, 161, 76, 6, 1, 0, 1, 161, 76,
- 7, 1, 0, 1, 161, 54, 8, 1, 0, 1, 161, 76,
- 1, 1, 0, 1, 165, 127, 3, 1, 0, 1, 165, 76,
- 4, 1, 0, 1, 165, 74, 5, 1, 0, 1, 165, 76,
- 6, 1, 0, 1, 165, 76, 7, 1, 0, 1, 165, 54,
- 8, 1, 0, 1, 165, 76, 1, 1, 0, 2, 36, 62,
- 3, 1, 0, 2, 36, 62, 4, 1, 0, 2, 36, 76,
- 5, 1, 0, 2, 36, 62, 6, 1, 0, 2, 36, 64,
- 7, 1, 0, 2, 36, 54, 8, 1, 0, 2, 36, 62,
- 1, 1, 0, 2, 40, 62, 3, 1, 0, 2, 40, 62,
- 4, 1, 0, 2, 40, 76, 5, 1, 0, 2, 40, 62,
- 6, 1, 0, 2, 40, 64, 7, 1, 0, 2, 40, 54,
- 8, 1, 0, 2, 40, 62, 1, 1, 0, 2, 44, 62,
- 3, 1, 0, 2, 44, 62, 4, 1, 0, 2, 44, 76,
- 5, 1, 0, 2, 44, 62, 6, 1, 0, 2, 44, 64,
- 7, 1, 0, 2, 44, 54, 8, 1, 0, 2, 44, 62,
- 1, 1, 0, 2, 48, 62, 3, 1, 0, 2, 48, 62,
- 4, 1, 0, 2, 48, 76, 5, 1, 0, 2, 48, 62,
- 6, 1, 0, 2, 48, 64, 7, 1, 0, 2, 48, 54,
- 8, 1, 0, 2, 48, 62, 1, 1, 0, 2, 52, 62,
- 3, 1, 0, 2, 52, 64, 4, 1, 0, 2, 52, 76,
- 5, 1, 0, 2, 52, 62, 6, 1, 0, 2, 52, 76,
- 7, 1, 0, 2, 52, 54, 8, 1, 0, 2, 52, 76,
- 1, 1, 0, 2, 56, 62, 3, 1, 0, 2, 56, 64,
- 4, 1, 0, 2, 56, 76, 5, 1, 0, 2, 56, 62,
- 6, 1, 0, 2, 56, 76, 7, 1, 0, 2, 56, 54,
- 8, 1, 0, 2, 56, 76, 1, 1, 0, 2, 60, 62,
- 3, 1, 0, 2, 60, 64, 4, 1, 0, 2, 60, 76,
- 5, 1, 0, 2, 60, 62, 6, 1, 0, 2, 60, 76,
- 7, 1, 0, 2, 60, 54, 8, 1, 0, 2, 60, 76,
- 1, 1, 0, 2, 64, 60, 3, 1, 0, 2, 64, 64,
- 4, 1, 0, 2, 64, 74, 5, 1, 0, 2, 64, 62,
- 6, 1, 0, 2, 64, 74, 7, 1, 0, 2, 64, 54,
- 8, 1, 0, 2, 64, 74, 1, 1, 0, 2, 100, 76,
- 3, 1, 0, 2, 100, 70, 4, 1, 0, 2, 100, 76,
- 5, 1, 0, 2, 100, 62, 6, 1, 0, 2, 100, 70,
- 7, 1, 0, 2, 100, 54, 8, 1, 0, 2, 100, 70,
- 1, 1, 0, 2, 104, 76, 3, 1, 0, 2, 104, 76,
- 4, 1, 0, 2, 104, 76, 5, 1, 0, 2, 104, 62,
- 6, 1, 0, 2, 104, 76, 7, 1, 0, 2, 104, 54,
- 8, 1, 0, 2, 104, 76, 1, 1, 0, 2, 108, 76,
- 3, 1, 0, 2, 108, 76, 4, 1, 0, 2, 108, 76,
- 5, 1, 0, 2, 108, 62, 6, 1, 0, 2, 108, 76,
- 7, 1, 0, 2, 108, 54, 8, 1, 0, 2, 108, 76,
- 1, 1, 0, 2, 112, 76, 3, 1, 0, 2, 112, 76,
- 4, 1, 0, 2, 112, 76, 5, 1, 0, 2, 112, 62,
- 6, 1, 0, 2, 112, 76, 7, 1, 0, 2, 112, 54,
- 8, 1, 0, 2, 112, 76, 1, 1, 0, 2, 116, 76,
- 3, 1, 0, 2, 116, 76, 4, 1, 0, 2, 116, 76,
- 5, 1, 0, 2, 116, 62, 6, 1, 0, 2, 116, 76,
- 7, 1, 0, 2, 116, 54, 8, 1, 0, 2, 116, 76,
- 1, 1, 0, 2, 120, 76, 3, 1, 0, 2, 120, 127,
- 4, 1, 0, 2, 120, 76, 5, 1, 0, 2, 120, 127,
- 6, 1, 0, 2, 120, 76, 7, 1, 0, 2, 120, 54,
- 8, 1, 0, 2, 120, 76, 1, 1, 0, 2, 124, 76,
- 3, 1, 0, 2, 124, 127, 4, 1, 0, 2, 124, 76,
- 5, 1, 0, 2, 124, 127, 6, 1, 0, 2, 124, 76,
- 7, 1, 0, 2, 124, 54, 8, 1, 0, 2, 124, 76,
- 1, 1, 0, 2, 128, 76, 3, 1, 0, 2, 128, 127,
- 4, 1, 0, 2, 128, 76, 5, 1, 0, 2, 128, 127,
- 6, 1, 0, 2, 128, 76, 7, 1, 0, 2, 128, 54,
- 8, 1, 0, 2, 128, 76, 1, 1, 0, 2, 132, 76,
- 3, 1, 0, 2, 132, 76, 4, 1, 0, 2, 132, 76,
- 5, 1, 0, 2, 132, 62, 6, 1, 0, 2, 132, 76,
- 7, 1, 0, 2, 132, 54, 8, 1, 0, 2, 132, 76,
- 1, 1, 0, 2, 136, 76, 3, 1, 0, 2, 136, 76,
- 4, 1, 0, 2, 136, 76, 5, 1, 0, 2, 136, 62,
- 6, 1, 0, 2, 136, 76, 7, 1, 0, 2, 136, 127,
- 8, 1, 0, 2, 136, 76, 1, 1, 0, 2, 140, 76,
- 3, 1, 0, 2, 140, 70, 4, 1, 0, 2, 140, 76,
- 5, 1, 0, 2, 140, 62, 6, 1, 0, 2, 140, 70,
- 7, 1, 0, 2, 140, 127, 8, 1, 0, 2, 140, 70,
- 1, 1, 0, 2, 144, 127, 3, 1, 0, 2, 144, 76,
- 4, 1, 0, 2, 144, 76, 5, 1, 0, 2, 144, 127,
- 6, 1, 0, 2, 144, 76, 7, 1, 0, 2, 144, 127,
- 8, 1, 0, 2, 144, 76, 1, 1, 0, 2, 149, 127,
- 3, 1, 0, 2, 149, 76, 4, 1, 0, 2, 149, 74,
- 5, 1, 0, 2, 149, 76, 6, 1, 0, 2, 149, 76,
- 7, 1, 0, 2, 149, 54, 8, 1, 0, 2, 149, 76,
- 1, 1, 0, 2, 153, 127, 3, 1, 0, 2, 153, 76,
- 4, 1, 0, 2, 153, 74, 5, 1, 0, 2, 153, 76,
- 6, 1, 0, 2, 153, 76, 7, 1, 0, 2, 153, 54,
- 8, 1, 0, 2, 153, 76, 1, 1, 0, 2, 157, 127,
- 3, 1, 0, 2, 157, 76, 4, 1, 0, 2, 157, 74,
- 5, 1, 0, 2, 157, 76, 6, 1, 0, 2, 157, 76,
- 7, 1, 0, 2, 157, 54, 8, 1, 0, 2, 157, 76,
- 1, 1, 0, 2, 161, 127, 3, 1, 0, 2, 161, 76,
- 4, 1, 0, 2, 161, 74, 5, 1, 0, 2, 161, 76,
- 6, 1, 0, 2, 161, 76, 7, 1, 0, 2, 161, 54,
- 8, 1, 0, 2, 161, 76, 1, 1, 0, 2, 165, 127,
- 3, 1, 0, 2, 165, 76, 4, 1, 0, 2, 165, 74,
- 5, 1, 0, 2, 165, 76, 6, 1, 0, 2, 165, 76,
- 7, 1, 0, 2, 165, 54, 8, 1, 0, 2, 165, 76,
- 1, 1, 0, 3, 36, 50, 3, 1, 0, 3, 36, 38,
- 4, 1, 0, 3, 36, 66, 5, 1, 0, 3, 36, 38,
- 6, 1, 0, 3, 36, 52, 7, 1, 0, 3, 36, 30,
- 8, 1, 0, 3, 36, 50, 1, 1, 0, 3, 40, 50,
- 3, 1, 0, 3, 40, 38, 4, 1, 0, 3, 40, 66,
- 5, 1, 0, 3, 40, 38, 6, 1, 0, 3, 40, 52,
- 7, 1, 0, 3, 40, 30, 8, 1, 0, 3, 40, 50,
- 1, 1, 0, 3, 44, 50, 3, 1, 0, 3, 44, 38,
- 4, 1, 0, 3, 44, 66, 5, 1, 0, 3, 44, 38,
- 6, 1, 0, 3, 44, 52, 7, 1, 0, 3, 44, 30,
- 8, 1, 0, 3, 44, 50, 1, 1, 0, 3, 48, 50,
- 3, 1, 0, 3, 48, 38, 4, 1, 0, 3, 48, 66,
- 5, 1, 0, 3, 48, 38, 6, 1, 0, 3, 48, 52,
- 7, 1, 0, 3, 48, 30, 8, 1, 0, 3, 48, 50,
- 1, 1, 0, 3, 52, 50, 3, 1, 0, 3, 52, 40,
- 4, 1, 0, 3, 52, 66, 5, 1, 0, 3, 52, 38,
- 6, 1, 0, 3, 52, 68, 7, 1, 0, 3, 52, 30,
- 8, 1, 0, 3, 52, 68, 1, 1, 0, 3, 56, 50,
- 3, 1, 0, 3, 56, 40, 4, 1, 0, 3, 56, 66,
- 5, 1, 0, 3, 56, 38, 6, 1, 0, 3, 56, 68,
- 7, 1, 0, 3, 56, 30, 8, 1, 0, 3, 56, 68,
- 1, 1, 0, 3, 60, 50, 3, 1, 0, 3, 60, 40,
- 4, 1, 0, 3, 60, 66, 5, 1, 0, 3, 60, 38,
- 6, 1, 0, 3, 60, 66, 7, 1, 0, 3, 60, 30,
- 8, 1, 0, 3, 60, 66, 1, 1, 0, 3, 64, 50,
- 3, 1, 0, 3, 64, 40, 4, 1, 0, 3, 64, 66,
- 5, 1, 0, 3, 64, 38, 6, 1, 0, 3, 64, 68,
- 7, 1, 0, 3, 64, 30, 8, 1, 0, 3, 64, 68,
- 1, 1, 0, 3, 100, 70, 3, 1, 0, 3, 100, 60,
- 4, 1, 0, 3, 100, 64, 5, 1, 0, 3, 100, 38,
- 6, 1, 0, 3, 100, 60, 7, 1, 0, 3, 100, 30,
- 8, 1, 0, 3, 100, 60, 1, 1, 0, 3, 104, 70,
- 3, 1, 0, 3, 104, 68, 4, 1, 0, 3, 104, 64,
- 5, 1, 0, 3, 104, 38, 6, 1, 0, 3, 104, 68,
- 7, 1, 0, 3, 104, 30, 8, 1, 0, 3, 104, 68,
- 1, 1, 0, 3, 108, 70, 3, 1, 0, 3, 108, 68,
- 4, 1, 0, 3, 108, 64, 5, 1, 0, 3, 108, 38,
- 6, 1, 0, 3, 108, 68, 7, 1, 0, 3, 108, 30,
- 8, 1, 0, 3, 108, 68, 1, 1, 0, 3, 112, 70,
- 3, 1, 0, 3, 112, 68, 4, 1, 0, 3, 112, 64,
- 5, 1, 0, 3, 112, 38, 6, 1, 0, 3, 112, 68,
- 7, 1, 0, 3, 112, 30, 8, 1, 0, 3, 112, 68,
- 1, 1, 0, 3, 116, 70, 3, 1, 0, 3, 116, 68,
- 4, 1, 0, 3, 116, 64, 5, 1, 0, 3, 116, 38,
- 6, 1, 0, 3, 116, 68, 7, 1, 0, 3, 116, 30,
- 8, 1, 0, 3, 116, 68, 1, 1, 0, 3, 120, 70,
- 3, 1, 0, 3, 120, 127, 4, 1, 0, 3, 120, 64,
- 5, 1, 0, 3, 120, 127, 6, 1, 0, 3, 120, 68,
- 7, 1, 0, 3, 120, 30, 8, 1, 0, 3, 120, 68,
- 1, 1, 0, 3, 124, 70, 3, 1, 0, 3, 124, 127,
- 4, 1, 0, 3, 124, 64, 5, 1, 0, 3, 124, 127,
- 6, 1, 0, 3, 124, 68, 7, 1, 0, 3, 124, 30,
- 8, 1, 0, 3, 124, 68, 1, 1, 0, 3, 128, 70,
- 3, 1, 0, 3, 128, 127, 4, 1, 0, 3, 128, 64,
- 5, 1, 0, 3, 128, 127, 6, 1, 0, 3, 128, 68,
- 7, 1, 0, 3, 128, 30, 8, 1, 0, 3, 128, 68,
- 1, 1, 0, 3, 132, 70, 3, 1, 0, 3, 132, 68,
- 4, 1, 0, 3, 132, 64, 5, 1, 0, 3, 132, 38,
- 6, 1, 0, 3, 132, 68, 7, 1, 0, 3, 132, 30,
- 8, 1, 0, 3, 132, 68, 1, 1, 0, 3, 136, 70,
- 3, 1, 0, 3, 136, 68, 4, 1, 0, 3, 136, 64,
- 5, 1, 0, 3, 136, 38, 6, 1, 0, 3, 136, 68,
- 7, 1, 0, 3, 136, 127, 8, 1, 0, 3, 136, 68,
- 1, 1, 0, 3, 140, 70, 3, 1, 0, 3, 140, 60,
- 4, 1, 0, 3, 140, 64, 5, 1, 0, 3, 140, 38,
- 6, 1, 0, 3, 140, 60, 7, 1, 0, 3, 140, 127,
- 8, 1, 0, 3, 140, 60, 1, 1, 0, 3, 144, 127,
- 3, 1, 0, 3, 144, 68, 4, 1, 0, 3, 144, 64,
- 5, 1, 0, 3, 144, 127, 6, 1, 0, 3, 144, 68,
- 7, 1, 0, 3, 144, 127, 8, 1, 0, 3, 144, 68,
- 1, 1, 0, 3, 149, 127, 3, 1, 0, 3, 149, 76,
- 4, 1, 0, 3, 149, 60, 5, 1, 0, 3, 149, 76,
- 6, 1, 0, 3, 149, 76, 7, 1, 0, 3, 149, 30,
- 8, 1, 0, 3, 149, 72, 1, 1, 0, 3, 153, 127,
- 3, 1, 0, 3, 153, 76, 4, 1, 0, 3, 153, 60,
- 5, 1, 0, 3, 153, 76, 6, 1, 0, 3, 153, 76,
- 7, 1, 0, 3, 153, 30, 8, 1, 0, 3, 153, 76,
- 1, 1, 0, 3, 157, 127, 3, 1, 0, 3, 157, 76,
- 4, 1, 0, 3, 157, 60, 5, 1, 0, 3, 157, 76,
- 6, 1, 0, 3, 157, 76, 7, 1, 0, 3, 157, 30,
- 8, 1, 0, 3, 157, 76, 1, 1, 0, 3, 161, 127,
- 3, 1, 0, 3, 161, 76, 4, 1, 0, 3, 161, 60,
- 5, 1, 0, 3, 161, 76, 6, 1, 0, 3, 161, 76,
- 7, 1, 0, 3, 161, 30, 8, 1, 0, 3, 161, 76,
- 1, 1, 0, 3, 165, 127, 3, 1, 0, 3, 165, 76,
- 4, 1, 0, 3, 165, 60, 5, 1, 0, 3, 165, 76,
- 6, 1, 0, 3, 165, 76, 7, 1, 0, 3, 165, 30,
- 8, 1, 0, 3, 165, 76, 1, 1, 1, 2, 38, 62,
- 3, 1, 1, 2, 38, 64, 4, 1, 1, 2, 38, 72,
- 5, 1, 1, 2, 38, 64, 6, 1, 1, 2, 38, 64,
- 7, 1, 1, 2, 38, 54, 8, 1, 1, 2, 38, 62,
- 1, 1, 1, 2, 46, 62, 3, 1, 1, 2, 46, 64,
- 4, 1, 1, 2, 46, 72, 5, 1, 1, 2, 46, 64,
- 6, 1, 1, 2, 46, 64, 7, 1, 1, 2, 46, 54,
- 8, 1, 1, 2, 46, 62, 1, 1, 1, 2, 54, 62,
- 3, 1, 1, 2, 54, 64, 4, 1, 1, 2, 54, 72,
- 5, 1, 1, 2, 54, 64, 6, 1, 1, 2, 54, 72,
- 7, 1, 1, 2, 54, 54, 8, 1, 1, 2, 54, 72,
- 1, 1, 1, 2, 62, 62, 3, 1, 1, 2, 62, 64,
- 4, 1, 1, 2, 62, 70, 5, 1, 1, 2, 62, 64,
- 6, 1, 1, 2, 62, 64, 7, 1, 1, 2, 62, 54,
- 8, 1, 1, 2, 62, 64, 1, 1, 1, 2, 102, 72,
- 3, 1, 1, 2, 102, 58, 4, 1, 1, 2, 102, 72,
- 5, 1, 1, 2, 102, 64, 6, 1, 1, 2, 102, 58,
- 7, 1, 1, 2, 102, 54, 8, 1, 1, 2, 102, 58,
- 1, 1, 1, 2, 110, 72, 3, 1, 1, 2, 110, 72,
- 4, 1, 1, 2, 110, 72, 5, 1, 1, 2, 110, 64,
- 6, 1, 1, 2, 110, 72, 7, 1, 1, 2, 110, 54,
- 8, 1, 1, 2, 110, 72, 1, 1, 1, 2, 118, 72,
- 3, 1, 1, 2, 118, 127, 4, 1, 1, 2, 118, 72,
- 5, 1, 1, 2, 118, 127, 6, 1, 1, 2, 118, 72,
- 7, 1, 1, 2, 118, 54, 8, 1, 1, 2, 118, 72,
- 1, 1, 1, 2, 126, 72, 3, 1, 1, 2, 126, 127,
- 4, 1, 1, 2, 126, 72, 5, 1, 1, 2, 126, 127,
- 6, 1, 1, 2, 126, 72, 7, 1, 1, 2, 126, 54,
- 8, 1, 1, 2, 126, 72, 1, 1, 1, 2, 134, 72,
- 3, 1, 1, 2, 134, 72, 4, 1, 1, 2, 134, 72,
- 5, 1, 1, 2, 134, 64, 6, 1, 1, 2, 134, 72,
- 7, 1, 1, 2, 134, 127, 8, 1, 1, 2, 134, 72,
- 1, 1, 1, 2, 142, 127, 3, 1, 1, 2, 142, 72,
- 4, 1, 1, 2, 142, 72, 5, 1, 1, 2, 142, 127,
- 6, 1, 1, 2, 142, 72, 7, 1, 1, 2, 142, 127,
- 8, 1, 1, 2, 142, 72, 1, 1, 1, 2, 151, 127,
- 3, 1, 1, 2, 151, 72, 4, 1, 1, 2, 151, 72,
- 5, 1, 1, 2, 151, 72, 6, 1, 1, 2, 151, 72,
- 7, 1, 1, 2, 151, 54, 8, 1, 1, 2, 151, 72,
- 1, 1, 1, 2, 159, 127, 3, 1, 1, 2, 159, 72,
- 4, 1, 1, 2, 159, 72, 5, 1, 1, 2, 159, 72,
- 6, 1, 1, 2, 159, 72, 7, 1, 1, 2, 159, 54,
- 8, 1, 1, 2, 159, 72, 1, 1, 1, 3, 38, 50,
- 3, 1, 1, 3, 38, 40, 4, 1, 1, 3, 38, 62,
- 5, 1, 1, 3, 38, 40, 6, 1, 1, 3, 38, 52,
- 7, 1, 1, 3, 38, 30, 8, 1, 1, 3, 38, 50,
- 1, 1, 1, 3, 46, 50, 3, 1, 1, 3, 46, 40,
- 4, 1, 1, 3, 46, 62, 5, 1, 1, 3, 46, 40,
- 6, 1, 1, 3, 46, 52, 7, 1, 1, 3, 46, 30,
- 8, 1, 1, 3, 46, 50, 1, 1, 1, 3, 54, 50,
- 3, 1, 1, 3, 54, 40, 4, 1, 1, 3, 54, 62,
- 5, 1, 1, 3, 54, 40, 6, 1, 1, 3, 54, 68,
- 7, 1, 1, 3, 54, 30, 8, 1, 1, 3, 54, 68,
- 1, 1, 1, 3, 62, 48, 3, 1, 1, 3, 62, 40,
- 4, 1, 1, 3, 62, 58, 5, 1, 1, 3, 62, 40,
- 6, 1, 1, 3, 62, 58, 7, 1, 1, 3, 62, 30,
- 8, 1, 1, 3, 62, 58, 1, 1, 1, 3, 102, 70,
- 3, 1, 1, 3, 102, 54, 4, 1, 1, 3, 102, 64,
- 5, 1, 1, 3, 102, 40, 6, 1, 1, 3, 102, 54,
- 7, 1, 1, 3, 102, 30, 8, 1, 1, 3, 102, 54,
- 1, 1, 1, 3, 110, 70, 3, 1, 1, 3, 110, 68,
- 4, 1, 1, 3, 110, 64, 5, 1, 1, 3, 110, 40,
- 6, 1, 1, 3, 110, 68, 7, 1, 1, 3, 110, 30,
- 8, 1, 1, 3, 110, 68, 1, 1, 1, 3, 118, 70,
- 3, 1, 1, 3, 118, 127, 4, 1, 1, 3, 118, 64,
- 5, 1, 1, 3, 118, 127, 6, 1, 1, 3, 118, 68,
- 7, 1, 1, 3, 118, 30, 8, 1, 1, 3, 118, 68,
- 1, 1, 1, 3, 126, 70, 3, 1, 1, 3, 126, 127,
- 4, 1, 1, 3, 126, 64, 5, 1, 1, 3, 126, 127,
- 6, 1, 1, 3, 126, 68, 7, 1, 1, 3, 126, 30,
- 8, 1, 1, 3, 126, 68, 1, 1, 1, 3, 134, 70,
- 3, 1, 1, 3, 134, 68, 4, 1, 1, 3, 134, 64,
- 5, 1, 1, 3, 134, 40, 6, 1, 1, 3, 134, 68,
- 7, 1, 1, 3, 134, 127, 8, 1, 1, 3, 134, 68,
- 1, 1, 1, 3, 142, 127, 3, 1, 1, 3, 142, 68,
- 4, 1, 1, 3, 142, 64, 5, 1, 1, 3, 142, 127,
- 6, 1, 1, 3, 142, 68, 7, 1, 1, 3, 142, 127,
- 8, 1, 1, 3, 142, 68, 1, 1, 1, 3, 151, 127,
- 3, 1, 1, 3, 151, 72, 4, 1, 1, 3, 151, 66,
- 5, 1, 1, 3, 151, 72, 6, 1, 1, 3, 151, 72,
- 7, 1, 1, 3, 151, 30, 8, 1, 1, 3, 151, 68,
- 1, 1, 1, 3, 159, 127, 3, 1, 1, 3, 159, 72,
- 4, 1, 1, 3, 159, 66, 5, 1, 1, 3, 159, 72,
- 6, 1, 1, 3, 159, 72, 7, 1, 1, 3, 159, 30,
- 8, 1, 1, 3, 159, 72, 1, 1, 2, 4, 42, 64,
- 3, 1, 2, 4, 42, 64, 4, 1, 2, 4, 42, 68,
- 5, 1, 2, 4, 42, 64, 6, 1, 2, 4, 42, 64,
- 7, 1, 2, 4, 42, 54, 8, 1, 2, 4, 42, 62,
- 1, 1, 2, 4, 58, 64, 3, 1, 2, 4, 58, 62,
- 4, 1, 2, 4, 58, 64, 5, 1, 2, 4, 58, 64,
- 6, 1, 2, 4, 58, 62, 7, 1, 2, 4, 58, 54,
- 8, 1, 2, 4, 58, 62, 1, 1, 2, 4, 106, 72,
- 3, 1, 2, 4, 106, 58, 4, 1, 2, 4, 106, 66,
- 5, 1, 2, 4, 106, 64, 6, 1, 2, 4, 106, 58,
- 7, 1, 2, 4, 106, 54, 8, 1, 2, 4, 106, 58,
- 1, 1, 2, 4, 122, 72, 3, 1, 2, 4, 122, 127,
- 4, 1, 2, 4, 122, 68, 5, 1, 2, 4, 122, 127,
- 6, 1, 2, 4, 122, 72, 7, 1, 2, 4, 122, 54,
- 8, 1, 2, 4, 122, 72, 1, 1, 2, 4, 138, 127,
- 3, 1, 2, 4, 138, 72, 4, 1, 2, 4, 138, 68,
- 5, 1, 2, 4, 138, 127, 6, 1, 2, 4, 138, 72,
- 7, 1, 2, 4, 138, 127, 8, 1, 2, 4, 138, 72,
- 1, 1, 2, 4, 155, 127, 3, 1, 2, 4, 155, 72,
- 4, 1, 2, 4, 155, 68, 5, 1, 2, 4, 155, 72,
- 6, 1, 2, 4, 155, 72, 7, 1, 2, 4, 155, 54,
- 8, 1, 2, 4, 155, 68, 1, 1, 2, 5, 42, 50,
- 3, 1, 2, 5, 42, 40, 4, 1, 2, 5, 42, 58,
- 5, 1, 2, 5, 42, 40, 6, 1, 2, 5, 42, 52,
- 7, 1, 2, 5, 42, 30, 8, 1, 2, 5, 42, 50,
- 1, 1, 2, 5, 58, 50, 3, 1, 2, 5, 58, 40,
- 4, 1, 2, 5, 58, 56, 5, 1, 2, 5, 58, 40,
- 6, 1, 2, 5, 58, 52, 7, 1, 2, 5, 58, 30,
- 8, 1, 2, 5, 58, 52, 1, 1, 2, 5, 106, 72,
- 3, 1, 2, 5, 106, 50, 4, 1, 2, 5, 106, 56,
- 5, 1, 2, 5, 106, 40, 6, 1, 2, 5, 106, 50,
- 7, 1, 2, 5, 106, 30, 8, 1, 2, 5, 106, 50,
- 1, 1, 2, 5, 122, 72, 3, 1, 2, 5, 122, 127,
- 4, 1, 2, 5, 122, 56, 5, 1, 2, 5, 122, 127,
- 6, 1, 2, 5, 122, 66, 7, 1, 2, 5, 122, 30,
- 8, 1, 2, 5, 122, 66, 1, 1, 2, 5, 138, 127,
- 3, 1, 2, 5, 138, 66, 4, 1, 2, 5, 138, 58,
- 5, 1, 2, 5, 138, 127, 6, 1, 2, 5, 138, 66,
- 7, 1, 2, 5, 138, 127, 8, 1, 2, 5, 138, 66,
- 1, 1, 2, 5, 155, 127, 3, 1, 2, 5, 155, 62,
- 4, 1, 2, 5, 155, 58, 5, 1, 2, 5, 155, 72,
- 6, 1, 2, 5, 155, 62, 7, 1, 2, 5, 155, 30,
- 8, 1, 2, 5, 155, 62
+static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
+ { 0, 0, 0, 0, 1, 72, },
+ { 2, 0, 0, 0, 1, 60, },
+ { 0, 0, 0, 0, 2, 72, },
+ { 2, 0, 0, 0, 2, 60, },
+ { 0, 0, 0, 0, 3, 76, },
+ { 2, 0, 0, 0, 3, 60, },
+ { 0, 0, 0, 0, 4, 76, },
+ { 2, 0, 0, 0, 4, 60, },
+ { 0, 0, 0, 0, 5, 76, },
+ { 2, 0, 0, 0, 5, 60, },
+ { 0, 0, 0, 0, 6, 76, },
+ { 2, 0, 0, 0, 6, 60, },
+ { 0, 0, 0, 0, 7, 76, },
+ { 2, 0, 0, 0, 7, 60, },
+ { 0, 0, 0, 0, 8, 76, },
+ { 2, 0, 0, 0, 8, 60, },
+ { 0, 0, 0, 0, 9, 76, },
+ { 2, 0, 0, 0, 9, 60, },
+ { 0, 0, 0, 0, 10, 72, },
+ { 2, 0, 0, 0, 10, 60, },
+ { 0, 0, 0, 0, 11, 72, },
+ { 2, 0, 0, 0, 11, 60, },
+ { 0, 0, 0, 0, 12, 52, },
+ { 2, 0, 0, 0, 12, 60, },
+ { 0, 0, 0, 0, 13, 48, },
+ { 2, 0, 0, 0, 13, 60, },
+ { 0, 0, 0, 0, 14, 127, },
+ { 2, 0, 0, 0, 14, 127, },
+ { 0, 0, 0, 1, 1, 52, },
+ { 2, 0, 0, 1, 1, 60, },
+ { 0, 0, 0, 1, 2, 60, },
+ { 2, 0, 0, 1, 2, 60, },
+ { 0, 0, 0, 1, 3, 64, },
+ { 2, 0, 0, 1, 3, 60, },
+ { 0, 0, 0, 1, 4, 68, },
+ { 2, 0, 0, 1, 4, 60, },
+ { 0, 0, 0, 1, 5, 76, },
+ { 2, 0, 0, 1, 5, 60, },
+ { 0, 0, 0, 1, 6, 76, },
+ { 2, 0, 0, 1, 6, 60, },
+ { 0, 0, 0, 1, 7, 76, },
+ { 2, 0, 0, 1, 7, 60, },
+ { 0, 0, 0, 1, 8, 68, },
+ { 2, 0, 0, 1, 8, 60, },
+ { 0, 0, 0, 1, 9, 64, },
+ { 2, 0, 0, 1, 9, 60, },
+ { 0, 0, 0, 1, 10, 60, },
+ { 2, 0, 0, 1, 10, 60, },
+ { 0, 0, 0, 1, 11, 52, },
+ { 2, 0, 0, 1, 11, 60, },
+ { 0, 0, 0, 1, 12, 40, },
+ { 2, 0, 0, 1, 12, 60, },
+ { 0, 0, 0, 1, 13, 28, },
+ { 2, 0, 0, 1, 13, 60, },
+ { 0, 0, 0, 1, 14, 127, },
+ { 2, 0, 0, 1, 14, 127, },
+ { 0, 0, 0, 2, 1, 52, },
+ { 2, 0, 0, 2, 1, 60, },
+ { 0, 0, 0, 2, 2, 60, },
+ { 2, 0, 0, 2, 2, 60, },
+ { 0, 0, 0, 2, 3, 64, },
+ { 2, 0, 0, 2, 3, 60, },
+ { 0, 0, 0, 2, 4, 68, },
+ { 2, 0, 0, 2, 4, 60, },
+ { 0, 0, 0, 2, 5, 76, },
+ { 2, 0, 0, 2, 5, 60, },
+ { 0, 0, 0, 2, 6, 76, },
+ { 2, 0, 0, 2, 6, 60, },
+ { 0, 0, 0, 2, 7, 76, },
+ { 2, 0, 0, 2, 7, 60, },
+ { 0, 0, 0, 2, 8, 68, },
+ { 2, 0, 0, 2, 8, 60, },
+ { 0, 0, 0, 2, 9, 64, },
+ { 2, 0, 0, 2, 9, 60, },
+ { 0, 0, 0, 2, 10, 60, },
+ { 2, 0, 0, 2, 10, 60, },
+ { 0, 0, 0, 2, 11, 52, },
+ { 2, 0, 0, 2, 11, 60, },
+ { 0, 0, 0, 2, 12, 40, },
+ { 2, 0, 0, 2, 12, 60, },
+ { 0, 0, 0, 2, 13, 28, },
+ { 2, 0, 0, 2, 13, 60, },
+ { 0, 0, 0, 2, 14, 127, },
+ { 2, 0, 0, 2, 14, 127, },
+ { 0, 0, 0, 3, 1, 52, },
+ { 2, 0, 0, 3, 1, 36, },
+ { 0, 0, 0, 3, 2, 60, },
+ { 2, 0, 0, 3, 2, 36, },
+ { 0, 0, 0, 3, 3, 64, },
+ { 2, 0, 0, 3, 3, 36, },
+ { 0, 0, 0, 3, 4, 68, },
+ { 2, 0, 0, 3, 4, 36, },
+ { 0, 0, 0, 3, 5, 76, },
+ { 2, 0, 0, 3, 5, 36, },
+ { 0, 0, 0, 3, 6, 76, },
+ { 2, 0, 0, 3, 6, 36, },
+ { 0, 0, 0, 3, 7, 76, },
+ { 2, 0, 0, 3, 7, 36, },
+ { 0, 0, 0, 3, 8, 68, },
+ { 2, 0, 0, 3, 8, 36, },
+ { 0, 0, 0, 3, 9, 64, },
+ { 2, 0, 0, 3, 9, 36, },
+ { 0, 0, 0, 3, 10, 60, },
+ { 2, 0, 0, 3, 10, 36, },
+ { 0, 0, 0, 3, 11, 52, },
+ { 2, 0, 0, 3, 11, 36, },
+ { 0, 0, 0, 3, 12, 40, },
+ { 2, 0, 0, 3, 12, 36, },
+ { 0, 0, 0, 3, 13, 28, },
+ { 2, 0, 0, 3, 13, 36, },
+ { 0, 0, 0, 3, 14, 127, },
+ { 2, 0, 0, 3, 14, 127, },
+ { 0, 0, 1, 2, 1, 127, },
+ { 2, 0, 1, 2, 1, 127, },
+ { 0, 0, 1, 2, 2, 127, },
+ { 2, 0, 1, 2, 2, 127, },
+ { 0, 0, 1, 2, 3, 52, },
+ { 2, 0, 1, 2, 3, 60, },
+ { 0, 0, 1, 2, 4, 52, },
+ { 2, 0, 1, 2, 4, 60, },
+ { 0, 0, 1, 2, 5, 60, },
+ { 2, 0, 1, 2, 5, 60, },
+ { 0, 0, 1, 2, 6, 64, },
+ { 2, 0, 1, 2, 6, 60, },
+ { 0, 0, 1, 2, 7, 60, },
+ { 2, 0, 1, 2, 7, 60, },
+ { 0, 0, 1, 2, 8, 52, },
+ { 2, 0, 1, 2, 8, 60, },
+ { 0, 0, 1, 2, 9, 52, },
+ { 2, 0, 1, 2, 9, 60, },
+ { 0, 0, 1, 2, 10, 40, },
+ { 2, 0, 1, 2, 10, 60, },
+ { 0, 0, 1, 2, 11, 28, },
+ { 2, 0, 1, 2, 11, 60, },
+ { 0, 0, 1, 2, 12, 127, },
+ { 2, 0, 1, 2, 12, 127, },
+ { 0, 0, 1, 2, 13, 127, },
+ { 2, 0, 1, 2, 13, 127, },
+ { 0, 0, 1, 2, 14, 127, },
+ { 2, 0, 1, 2, 14, 127, },
+ { 0, 0, 1, 3, 1, 127, },
+ { 2, 0, 1, 3, 1, 127, },
+ { 0, 0, 1, 3, 2, 127, },
+ { 2, 0, 1, 3, 2, 127, },
+ { 0, 0, 1, 3, 3, 48, },
+ { 2, 0, 1, 3, 3, 36, },
+ { 0, 0, 1, 3, 4, 48, },
+ { 2, 0, 1, 3, 4, 36, },
+ { 0, 0, 1, 3, 5, 60, },
+ { 2, 0, 1, 3, 5, 36, },
+ { 0, 0, 1, 3, 6, 64, },
+ { 2, 0, 1, 3, 6, 36, },
+ { 0, 0, 1, 3, 7, 60, },
+ { 2, 0, 1, 3, 7, 36, },
+ { 0, 0, 1, 3, 8, 52, },
+ { 2, 0, 1, 3, 8, 36, },
+ { 0, 0, 1, 3, 9, 52, },
+ { 2, 0, 1, 3, 9, 36, },
+ { 0, 0, 1, 3, 10, 40, },
+ { 2, 0, 1, 3, 10, 36, },
+ { 0, 0, 1, 3, 11, 26, },
+ { 2, 0, 1, 3, 11, 36, },
+ { 0, 0, 1, 3, 12, 127, },
+ { 2, 0, 1, 3, 12, 127, },
+ { 0, 0, 1, 3, 13, 127, },
+ { 2, 0, 1, 3, 13, 127, },
+ { 0, 0, 1, 3, 14, 127, },
+ { 2, 0, 1, 3, 14, 127, },
+ { 0, 1, 0, 1, 36, 74, },
+ { 2, 1, 0, 1, 36, 62, },
+ { 0, 1, 0, 1, 40, 76, },
+ { 2, 1, 0, 1, 40, 62, },
+ { 0, 1, 0, 1, 44, 76, },
+ { 2, 1, 0, 1, 44, 62, },
+ { 0, 1, 0, 1, 48, 76, },
+ { 2, 1, 0, 1, 48, 62, },
+ { 0, 1, 0, 1, 52, 76, },
+ { 2, 1, 0, 1, 52, 62, },
+ { 0, 1, 0, 1, 56, 76, },
+ { 2, 1, 0, 1, 56, 62, },
+ { 0, 1, 0, 1, 60, 76, },
+ { 2, 1, 0, 1, 60, 62, },
+ { 0, 1, 0, 1, 64, 74, },
+ { 2, 1, 0, 1, 64, 62, },
+ { 0, 1, 0, 1, 100, 72, },
+ { 2, 1, 0, 1, 100, 62, },
+ { 0, 1, 0, 1, 104, 76, },
+ { 2, 1, 0, 1, 104, 62, },
+ { 0, 1, 0, 1, 108, 76, },
+ { 2, 1, 0, 1, 108, 62, },
+ { 0, 1, 0, 1, 112, 76, },
+ { 2, 1, 0, 1, 112, 62, },
+ { 0, 1, 0, 1, 116, 76, },
+ { 2, 1, 0, 1, 116, 62, },
+ { 0, 1, 0, 1, 120, 76, },
+ { 2, 1, 0, 1, 120, 62, },
+ { 0, 1, 0, 1, 124, 76, },
+ { 2, 1, 0, 1, 124, 62, },
+ { 0, 1, 0, 1, 128, 76, },
+ { 2, 1, 0, 1, 128, 62, },
+ { 0, 1, 0, 1, 132, 76, },
+ { 2, 1, 0, 1, 132, 62, },
+ { 0, 1, 0, 1, 136, 76, },
+ { 2, 1, 0, 1, 136, 62, },
+ { 0, 1, 0, 1, 140, 72, },
+ { 2, 1, 0, 1, 140, 62, },
+ { 0, 1, 0, 1, 144, 76, },
+ { 2, 1, 0, 1, 144, 127, },
+ { 0, 1, 0, 1, 149, 76, },
+ { 2, 1, 0, 1, 149, -128, },
+ { 0, 1, 0, 1, 153, 76, },
+ { 2, 1, 0, 1, 153, -128, },
+ { 0, 1, 0, 1, 157, 76, },
+ { 2, 1, 0, 1, 157, -128, },
+ { 0, 1, 0, 1, 161, 76, },
+ { 2, 1, 0, 1, 161, -128, },
+ { 0, 1, 0, 1, 165, 76, },
+ { 2, 1, 0, 1, 165, -128, },
+ { 0, 1, 0, 2, 36, 72, },
+ { 2, 1, 0, 2, 36, 62, },
+ { 0, 1, 0, 2, 40, 76, },
+ { 2, 1, 0, 2, 40, 62, },
+ { 0, 1, 0, 2, 44, 76, },
+ { 2, 1, 0, 2, 44, 62, },
+ { 0, 1, 0, 2, 48, 76, },
+ { 2, 1, 0, 2, 48, 62, },
+ { 0, 1, 0, 2, 52, 76, },
+ { 2, 1, 0, 2, 52, 62, },
+ { 0, 1, 0, 2, 56, 76, },
+ { 2, 1, 0, 2, 56, 62, },
+ { 0, 1, 0, 2, 60, 76, },
+ { 2, 1, 0, 2, 60, 62, },
+ { 0, 1, 0, 2, 64, 74, },
+ { 2, 1, 0, 2, 64, 62, },
+ { 0, 1, 0, 2, 100, 70, },
+ { 2, 1, 0, 2, 100, 62, },
+ { 0, 1, 0, 2, 104, 76, },
+ { 2, 1, 0, 2, 104, 62, },
+ { 0, 1, 0, 2, 108, 76, },
+ { 2, 1, 0, 2, 108, 62, },
+ { 0, 1, 0, 2, 112, 76, },
+ { 2, 1, 0, 2, 112, 62, },
+ { 0, 1, 0, 2, 116, 76, },
+ { 2, 1, 0, 2, 116, 62, },
+ { 0, 1, 0, 2, 120, 76, },
+ { 2, 1, 0, 2, 120, 62, },
+ { 0, 1, 0, 2, 124, 76, },
+ { 2, 1, 0, 2, 124, 62, },
+ { 0, 1, 0, 2, 128, 76, },
+ { 2, 1, 0, 2, 128, 62, },
+ { 0, 1, 0, 2, 132, 76, },
+ { 2, 1, 0, 2, 132, 62, },
+ { 0, 1, 0, 2, 136, 76, },
+ { 2, 1, 0, 2, 136, 62, },
+ { 0, 1, 0, 2, 140, 70, },
+ { 2, 1, 0, 2, 140, 62, },
+ { 0, 1, 0, 2, 144, 76, },
+ { 2, 1, 0, 2, 144, 127, },
+ { 0, 1, 0, 2, 149, 76, },
+ { 2, 1, 0, 2, 149, -128, },
+ { 0, 1, 0, 2, 153, 76, },
+ { 2, 1, 0, 2, 153, -128, },
+ { 0, 1, 0, 2, 157, 76, },
+ { 2, 1, 0, 2, 157, -128, },
+ { 0, 1, 0, 2, 161, 76, },
+ { 2, 1, 0, 2, 161, -128, },
+ { 0, 1, 0, 2, 165, 76, },
+ { 2, 1, 0, 2, 165, -128, },
+ { 0, 1, 0, 3, 36, 68, },
+ { 2, 1, 0, 3, 36, 38, },
+ { 0, 1, 0, 3, 40, 68, },
+ { 2, 1, 0, 3, 40, 38, },
+ { 0, 1, 0, 3, 44, 68, },
+ { 2, 1, 0, 3, 44, 38, },
+ { 0, 1, 0, 3, 48, 68, },
+ { 2, 1, 0, 3, 48, 38, },
+ { 0, 1, 0, 3, 52, 68, },
+ { 2, 1, 0, 3, 52, 38, },
+ { 0, 1, 0, 3, 56, 68, },
+ { 2, 1, 0, 3, 56, 38, },
+ { 0, 1, 0, 3, 60, 66, },
+ { 2, 1, 0, 3, 60, 38, },
+ { 0, 1, 0, 3, 64, 68, },
+ { 2, 1, 0, 3, 64, 38, },
+ { 0, 1, 0, 3, 100, 60, },
+ { 2, 1, 0, 3, 100, 38, },
+ { 0, 1, 0, 3, 104, 68, },
+ { 2, 1, 0, 3, 104, 38, },
+ { 0, 1, 0, 3, 108, 68, },
+ { 2, 1, 0, 3, 108, 38, },
+ { 0, 1, 0, 3, 112, 68, },
+ { 2, 1, 0, 3, 112, 38, },
+ { 0, 1, 0, 3, 116, 68, },
+ { 2, 1, 0, 3, 116, 38, },
+ { 0, 1, 0, 3, 120, 68, },
+ { 2, 1, 0, 3, 120, 38, },
+ { 0, 1, 0, 3, 124, 68, },
+ { 2, 1, 0, 3, 124, 38, },
+ { 0, 1, 0, 3, 128, 68, },
+ { 2, 1, 0, 3, 128, 38, },
+ { 0, 1, 0, 3, 132, 68, },
+ { 2, 1, 0, 3, 132, 38, },
+ { 0, 1, 0, 3, 136, 68, },
+ { 2, 1, 0, 3, 136, 38, },
+ { 0, 1, 0, 3, 140, 60, },
+ { 2, 1, 0, 3, 140, 38, },
+ { 0, 1, 0, 3, 144, 68, },
+ { 2, 1, 0, 3, 144, 127, },
+ { 0, 1, 0, 3, 149, 76, },
+ { 2, 1, 0, 3, 149, -128, },
+ { 0, 1, 0, 3, 153, 76, },
+ { 2, 1, 0, 3, 153, -128, },
+ { 0, 1, 0, 3, 157, 76, },
+ { 2, 1, 0, 3, 157, -128, },
+ { 0, 1, 0, 3, 161, 76, },
+ { 2, 1, 0, 3, 161, -128, },
+ { 0, 1, 0, 3, 165, 76, },
+ { 2, 1, 0, 3, 165, -128, },
+ { 0, 1, 1, 2, 38, 66, },
+ { 2, 1, 1, 2, 38, 64, },
+ { 0, 1, 1, 2, 46, 72, },
+ { 2, 1, 1, 2, 46, 64, },
+ { 0, 1, 1, 2, 54, 72, },
+ { 2, 1, 1, 2, 54, 64, },
+ { 0, 1, 1, 2, 62, 64, },
+ { 2, 1, 1, 2, 62, 64, },
+ { 0, 1, 1, 2, 102, 58, },
+ { 2, 1, 1, 2, 102, 64, },
+ { 0, 1, 1, 2, 110, 72, },
+ { 2, 1, 1, 2, 110, 64, },
+ { 0, 1, 1, 2, 118, 72, },
+ { 2, 1, 1, 2, 118, 64, },
+ { 0, 1, 1, 2, 126, 72, },
+ { 2, 1, 1, 2, 126, 64, },
+ { 0, 1, 1, 2, 134, 72, },
+ { 2, 1, 1, 2, 134, 64, },
+ { 0, 1, 1, 2, 142, 72, },
+ { 2, 1, 1, 2, 142, 127, },
+ { 0, 1, 1, 2, 151, 72, },
+ { 2, 1, 1, 2, 151, -128, },
+ { 0, 1, 1, 2, 159, 72, },
+ { 2, 1, 1, 2, 159, -128, },
+ { 0, 1, 1, 3, 38, 60, },
+ { 2, 1, 1, 3, 38, 40, },
+ { 0, 1, 1, 3, 46, 68, },
+ { 2, 1, 1, 3, 46, 40, },
+ { 0, 1, 1, 3, 54, 68, },
+ { 2, 1, 1, 3, 54, 40, },
+ { 0, 1, 1, 3, 62, 58, },
+ { 2, 1, 1, 3, 62, 40, },
+ { 0, 1, 1, 3, 102, 54, },
+ { 2, 1, 1, 3, 102, 40, },
+ { 0, 1, 1, 3, 110, 68, },
+ { 2, 1, 1, 3, 110, 40, },
+ { 0, 1, 1, 3, 118, 68, },
+ { 2, 1, 1, 3, 118, 40, },
+ { 0, 1, 1, 3, 126, 68, },
+ { 2, 1, 1, 3, 126, 40, },
+ { 0, 1, 1, 3, 134, 68, },
+ { 2, 1, 1, 3, 134, 40, },
+ { 0, 1, 1, 3, 142, 68, },
+ { 2, 1, 1, 3, 142, 127, },
+ { 0, 1, 1, 3, 151, 72, },
+ { 2, 1, 1, 3, 151, -128, },
+ { 0, 1, 1, 3, 159, 72, },
+ { 2, 1, 1, 3, 159, -128, },
+ { 0, 1, 2, 4, 42, 64, },
+ { 2, 1, 2, 4, 42, 64, },
+ { 0, 1, 2, 4, 58, 62, },
+ { 2, 1, 2, 4, 58, 64, },
+ { 0, 1, 2, 4, 106, 58, },
+ { 2, 1, 2, 4, 106, 64, },
+ { 0, 1, 2, 4, 122, 72, },
+ { 2, 1, 2, 4, 122, 64, },
+ { 0, 1, 2, 4, 138, 72, },
+ { 2, 1, 2, 4, 138, 127, },
+ { 0, 1, 2, 4, 155, 72, },
+ { 2, 1, 2, 4, 155, -128, },
+ { 0, 1, 2, 5, 42, 54, },
+ { 2, 1, 2, 5, 42, 40, },
+ { 0, 1, 2, 5, 58, 52, },
+ { 2, 1, 2, 5, 58, 40, },
+ { 0, 1, 2, 5, 106, 50, },
+ { 2, 1, 2, 5, 106, 40, },
+ { 0, 1, 2, 5, 122, 66, },
+ { 2, 1, 2, 5, 122, 40, },
+ { 0, 1, 2, 5, 138, 66, },
+ { 2, 1, 2, 5, 138, 127, },
+ { 0, 1, 2, 5, 155, 62, },
+ { 2, 1, 2, 5, 155, -128, },
+ { 1, 0, 0, 0, 1, 68, },
+ { 3, 0, 0, 0, 1, 72, },
+ { 4, 0, 0, 0, 1, 76, },
+ { 5, 0, 0, 0, 1, 60, },
+ { 6, 0, 0, 0, 1, 72, },
+ { 7, 0, 0, 0, 1, 60, },
+ { 8, 0, 0, 0, 1, 72, },
+ { 1, 0, 0, 0, 2, 68, },
+ { 3, 0, 0, 0, 2, 72, },
+ { 4, 0, 0, 0, 2, 76, },
+ { 5, 0, 0, 0, 2, 60, },
+ { 6, 0, 0, 0, 2, 72, },
+ { 7, 0, 0, 0, 2, 60, },
+ { 8, 0, 0, 0, 2, 72, },
+ { 1, 0, 0, 0, 3, 68, },
+ { 3, 0, 0, 0, 3, 76, },
+ { 4, 0, 0, 0, 3, 76, },
+ { 5, 0, 0, 0, 3, 60, },
+ { 6, 0, 0, 0, 3, 76, },
+ { 7, 0, 0, 0, 3, 60, },
+ { 8, 0, 0, 0, 3, 76, },
+ { 1, 0, 0, 0, 4, 68, },
+ { 3, 0, 0, 0, 4, 76, },
+ { 4, 0, 0, 0, 4, 76, },
+ { 5, 0, 0, 0, 4, 60, },
+ { 6, 0, 0, 0, 4, 76, },
+ { 7, 0, 0, 0, 4, 60, },
+ { 8, 0, 0, 0, 4, 76, },
+ { 1, 0, 0, 0, 5, 68, },
+ { 3, 0, 0, 0, 5, 76, },
+ { 4, 0, 0, 0, 5, 76, },
+ { 5, 0, 0, 0, 5, 60, },
+ { 6, 0, 0, 0, 5, 76, },
+ { 7, 0, 0, 0, 5, 60, },
+ { 8, 0, 0, 0, 5, 76, },
+ { 1, 0, 0, 0, 6, 68, },
+ { 3, 0, 0, 0, 6, 76, },
+ { 4, 0, 0, 0, 6, 76, },
+ { 5, 0, 0, 0, 6, 60, },
+ { 6, 0, 0, 0, 6, 76, },
+ { 7, 0, 0, 0, 6, 60, },
+ { 8, 0, 0, 0, 6, 76, },
+ { 1, 0, 0, 0, 7, 68, },
+ { 3, 0, 0, 0, 7, 76, },
+ { 4, 0, 0, 0, 7, 76, },
+ { 5, 0, 0, 0, 7, 60, },
+ { 6, 0, 0, 0, 7, 76, },
+ { 7, 0, 0, 0, 7, 60, },
+ { 8, 0, 0, 0, 7, 76, },
+ { 1, 0, 0, 0, 8, 68, },
+ { 3, 0, 0, 0, 8, 76, },
+ { 4, 0, 0, 0, 8, 76, },
+ { 5, 0, 0, 0, 8, 60, },
+ { 6, 0, 0, 0, 8, 76, },
+ { 7, 0, 0, 0, 8, 60, },
+ { 8, 0, 0, 0, 8, 76, },
+ { 1, 0, 0, 0, 9, 68, },
+ { 3, 0, 0, 0, 9, 76, },
+ { 4, 0, 0, 0, 9, 76, },
+ { 5, 0, 0, 0, 9, 60, },
+ { 6, 0, 0, 0, 9, 76, },
+ { 7, 0, 0, 0, 9, 60, },
+ { 8, 0, 0, 0, 9, 76, },
+ { 1, 0, 0, 0, 10, 68, },
+ { 3, 0, 0, 0, 10, 72, },
+ { 4, 0, 0, 0, 10, 76, },
+ { 5, 0, 0, 0, 10, 60, },
+ { 6, 0, 0, 0, 10, 72, },
+ { 7, 0, 0, 0, 10, 60, },
+ { 8, 0, 0, 0, 10, 72, },
+ { 1, 0, 0, 0, 11, 68, },
+ { 3, 0, 0, 0, 11, 72, },
+ { 4, 0, 0, 0, 11, 76, },
+ { 5, 0, 0, 0, 11, 60, },
+ { 6, 0, 0, 0, 11, 72, },
+ { 7, 0, 0, 0, 11, 60, },
+ { 8, 0, 0, 0, 11, 72, },
+ { 1, 0, 0, 0, 12, 68, },
+ { 3, 0, 0, 0, 12, 52, },
+ { 4, 0, 0, 0, 12, 76, },
+ { 5, 0, 0, 0, 12, 60, },
+ { 6, 0, 0, 0, 12, 52, },
+ { 7, 0, 0, 0, 12, 60, },
+ { 8, 0, 0, 0, 12, 52, },
+ { 1, 0, 0, 0, 13, 68, },
+ { 3, 0, 0, 0, 13, 48, },
+ { 4, 0, 0, 0, 13, 76, },
+ { 5, 0, 0, 0, 13, 60, },
+ { 6, 0, 0, 0, 13, 48, },
+ { 7, 0, 0, 0, 13, 60, },
+ { 8, 0, 0, 0, 13, 48, },
+ { 1, 0, 0, 0, 14, 68, },
+ { 3, 0, 0, 0, 14, 127, },
+ { 4, 0, 0, 0, 14, 127, },
+ { 5, 0, 0, 0, 14, 127, },
+ { 6, 0, 0, 0, 14, 127, },
+ { 7, 0, 0, 0, 14, 127, },
+ { 8, 0, 0, 0, 14, 127, },
+ { 1, 0, 0, 1, 1, 76, },
+ { 3, 0, 0, 1, 1, 52, },
+ { 4, 0, 0, 1, 1, 76, },
+ { 5, 0, 0, 1, 1, 60, },
+ { 6, 0, 0, 1, 1, 52, },
+ { 7, 0, 0, 1, 1, 60, },
+ { 8, 0, 0, 1, 1, 52, },
+ { 1, 0, 0, 1, 2, 76, },
+ { 3, 0, 0, 1, 2, 60, },
+ { 4, 0, 0, 1, 2, 76, },
+ { 5, 0, 0, 1, 2, 60, },
+ { 6, 0, 0, 1, 2, 60, },
+ { 7, 0, 0, 1, 2, 60, },
+ { 8, 0, 0, 1, 2, 60, },
+ { 1, 0, 0, 1, 3, 76, },
+ { 3, 0, 0, 1, 3, 64, },
+ { 4, 0, 0, 1, 3, 76, },
+ { 5, 0, 0, 1, 3, 60, },
+ { 6, 0, 0, 1, 3, 64, },
+ { 7, 0, 0, 1, 3, 60, },
+ { 8, 0, 0, 1, 3, 64, },
+ { 1, 0, 0, 1, 4, 76, },
+ { 3, 0, 0, 1, 4, 68, },
+ { 4, 0, 0, 1, 4, 76, },
+ { 5, 0, 0, 1, 4, 60, },
+ { 6, 0, 0, 1, 4, 68, },
+ { 7, 0, 0, 1, 4, 60, },
+ { 8, 0, 0, 1, 4, 68, },
+ { 1, 0, 0, 1, 5, 76, },
+ { 3, 0, 0, 1, 5, 76, },
+ { 4, 0, 0, 1, 5, 76, },
+ { 5, 0, 0, 1, 5, 60, },
+ { 6, 0, 0, 1, 5, 76, },
+ { 7, 0, 0, 1, 5, 60, },
+ { 8, 0, 0, 1, 5, 76, },
+ { 1, 0, 0, 1, 6, 76, },
+ { 3, 0, 0, 1, 6, 76, },
+ { 4, 0, 0, 1, 6, 76, },
+ { 5, 0, 0, 1, 6, 60, },
+ { 6, 0, 0, 1, 6, 76, },
+ { 7, 0, 0, 1, 6, 60, },
+ { 8, 0, 0, 1, 6, 76, },
+ { 1, 0, 0, 1, 7, 76, },
+ { 3, 0, 0, 1, 7, 76, },
+ { 4, 0, 0, 1, 7, 76, },
+ { 5, 0, 0, 1, 7, 60, },
+ { 6, 0, 0, 1, 7, 76, },
+ { 7, 0, 0, 1, 7, 60, },
+ { 8, 0, 0, 1, 7, 76, },
+ { 1, 0, 0, 1, 8, 76, },
+ { 3, 0, 0, 1, 8, 68, },
+ { 4, 0, 0, 1, 8, 76, },
+ { 5, 0, 0, 1, 8, 60, },
+ { 6, 0, 0, 1, 8, 68, },
+ { 7, 0, 0, 1, 8, 60, },
+ { 8, 0, 0, 1, 8, 68, },
+ { 1, 0, 0, 1, 9, 76, },
+ { 3, 0, 0, 1, 9, 64, },
+ { 4, 0, 0, 1, 9, 76, },
+ { 5, 0, 0, 1, 9, 60, },
+ { 6, 0, 0, 1, 9, 64, },
+ { 7, 0, 0, 1, 9, 60, },
+ { 8, 0, 0, 1, 9, 64, },
+ { 1, 0, 0, 1, 10, 76, },
+ { 3, 0, 0, 1, 10, 60, },
+ { 4, 0, 0, 1, 10, 76, },
+ { 5, 0, 0, 1, 10, 60, },
+ { 6, 0, 0, 1, 10, 60, },
+ { 7, 0, 0, 1, 10, 60, },
+ { 8, 0, 0, 1, 10, 60, },
+ { 1, 0, 0, 1, 11, 76, },
+ { 3, 0, 0, 1, 11, 52, },
+ { 4, 0, 0, 1, 11, 76, },
+ { 5, 0, 0, 1, 11, 60, },
+ { 6, 0, 0, 1, 11, 52, },
+ { 7, 0, 0, 1, 11, 60, },
+ { 8, 0, 0, 1, 11, 52, },
+ { 1, 0, 0, 1, 12, 76, },
+ { 3, 0, 0, 1, 12, 40, },
+ { 4, 0, 0, 1, 12, 76, },
+ { 5, 0, 0, 1, 12, 60, },
+ { 6, 0, 0, 1, 12, 40, },
+ { 7, 0, 0, 1, 12, 60, },
+ { 8, 0, 0, 1, 12, 40, },
+ { 1, 0, 0, 1, 13, 76, },
+ { 3, 0, 0, 1, 13, 28, },
+ { 4, 0, 0, 1, 13, 70, },
+ { 5, 0, 0, 1, 13, 60, },
+ { 6, 0, 0, 1, 13, 28, },
+ { 7, 0, 0, 1, 13, 60, },
+ { 8, 0, 0, 1, 13, 28, },
+ { 1, 0, 0, 1, 14, 127, },
+ { 3, 0, 0, 1, 14, 127, },
+ { 4, 0, 0, 1, 14, 127, },
+ { 5, 0, 0, 1, 14, 127, },
+ { 6, 0, 0, 1, 14, 127, },
+ { 7, 0, 0, 1, 14, 127, },
+ { 8, 0, 0, 1, 14, 127, },
+ { 1, 0, 0, 2, 1, 76, },
+ { 3, 0, 0, 2, 1, 52, },
+ { 4, 0, 0, 2, 1, 76, },
+ { 5, 0, 0, 2, 1, 60, },
+ { 6, 0, 0, 2, 1, 52, },
+ { 7, 0, 0, 2, 1, 60, },
+ { 8, 0, 0, 2, 1, 52, },
+ { 1, 0, 0, 2, 2, 76, },
+ { 3, 0, 0, 2, 2, 60, },
+ { 4, 0, 0, 2, 2, 76, },
+ { 5, 0, 0, 2, 2, 60, },
+ { 6, 0, 0, 2, 2, 60, },
+ { 7, 0, 0, 2, 2, 60, },
+ { 8, 0, 0, 2, 2, 60, },
+ { 1, 0, 0, 2, 3, 76, },
+ { 3, 0, 0, 2, 3, 64, },
+ { 4, 0, 0, 2, 3, 76, },
+ { 5, 0, 0, 2, 3, 60, },
+ { 6, 0, 0, 2, 3, 64, },
+ { 7, 0, 0, 2, 3, 60, },
+ { 8, 0, 0, 2, 3, 64, },
+ { 1, 0, 0, 2, 4, 76, },
+ { 3, 0, 0, 2, 4, 68, },
+ { 4, 0, 0, 2, 4, 76, },
+ { 5, 0, 0, 2, 4, 60, },
+ { 6, 0, 0, 2, 4, 68, },
+ { 7, 0, 0, 2, 4, 60, },
+ { 8, 0, 0, 2, 4, 68, },
+ { 1, 0, 0, 2, 5, 76, },
+ { 3, 0, 0, 2, 5, 76, },
+ { 4, 0, 0, 2, 5, 76, },
+ { 5, 0, 0, 2, 5, 60, },
+ { 6, 0, 0, 2, 5, 76, },
+ { 7, 0, 0, 2, 5, 60, },
+ { 8, 0, 0, 2, 5, 76, },
+ { 1, 0, 0, 2, 6, 76, },
+ { 3, 0, 0, 2, 6, 76, },
+ { 4, 0, 0, 2, 6, 76, },
+ { 5, 0, 0, 2, 6, 60, },
+ { 6, 0, 0, 2, 6, 76, },
+ { 7, 0, 0, 2, 6, 60, },
+ { 8, 0, 0, 2, 6, 76, },
+ { 1, 0, 0, 2, 7, 76, },
+ { 3, 0, 0, 2, 7, 76, },
+ { 4, 0, 0, 2, 7, 76, },
+ { 5, 0, 0, 2, 7, 60, },
+ { 6, 0, 0, 2, 7, 76, },
+ { 7, 0, 0, 2, 7, 60, },
+ { 8, 0, 0, 2, 7, 76, },
+ { 1, 0, 0, 2, 8, 76, },
+ { 3, 0, 0, 2, 8, 68, },
+ { 4, 0, 0, 2, 8, 76, },
+ { 5, 0, 0, 2, 8, 60, },
+ { 6, 0, 0, 2, 8, 68, },
+ { 7, 0, 0, 2, 8, 60, },
+ { 8, 0, 0, 2, 8, 68, },
+ { 1, 0, 0, 2, 9, 76, },
+ { 3, 0, 0, 2, 9, 64, },
+ { 4, 0, 0, 2, 9, 76, },
+ { 5, 0, 0, 2, 9, 60, },
+ { 6, 0, 0, 2, 9, 64, },
+ { 7, 0, 0, 2, 9, 60, },
+ { 8, 0, 0, 2, 9, 64, },
+ { 1, 0, 0, 2, 10, 76, },
+ { 3, 0, 0, 2, 10, 60, },
+ { 4, 0, 0, 2, 10, 76, },
+ { 5, 0, 0, 2, 10, 60, },
+ { 6, 0, 0, 2, 10, 60, },
+ { 7, 0, 0, 2, 10, 60, },
+ { 8, 0, 0, 2, 10, 60, },
+ { 1, 0, 0, 2, 11, 76, },
+ { 3, 0, 0, 2, 11, 52, },
+ { 4, 0, 0, 2, 11, 76, },
+ { 5, 0, 0, 2, 11, 60, },
+ { 6, 0, 0, 2, 11, 52, },
+ { 7, 0, 0, 2, 11, 60, },
+ { 8, 0, 0, 2, 11, 52, },
+ { 1, 0, 0, 2, 12, 76, },
+ { 3, 0, 0, 2, 12, 40, },
+ { 4, 0, 0, 2, 12, 76, },
+ { 5, 0, 0, 2, 12, 60, },
+ { 6, 0, 0, 2, 12, 40, },
+ { 7, 0, 0, 2, 12, 60, },
+ { 8, 0, 0, 2, 12, 40, },
+ { 1, 0, 0, 2, 13, 76, },
+ { 3, 0, 0, 2, 13, 28, },
+ { 4, 0, 0, 2, 13, 72, },
+ { 5, 0, 0, 2, 13, 60, },
+ { 6, 0, 0, 2, 13, 28, },
+ { 7, 0, 0, 2, 13, 60, },
+ { 8, 0, 0, 2, 13, 28, },
+ { 1, 0, 0, 2, 14, 127, },
+ { 3, 0, 0, 2, 14, 127, },
+ { 4, 0, 0, 2, 14, 127, },
+ { 5, 0, 0, 2, 14, 127, },
+ { 6, 0, 0, 2, 14, 127, },
+ { 7, 0, 0, 2, 14, 127, },
+ { 8, 0, 0, 2, 14, 127, },
+ { 1, 0, 0, 3, 1, 66, },
+ { 3, 0, 0, 3, 1, 52, },
+ { 4, 0, 0, 3, 1, 68, },
+ { 5, 0, 0, 3, 1, 36, },
+ { 6, 0, 0, 3, 1, 52, },
+ { 7, 0, 0, 3, 1, 36, },
+ { 8, 0, 0, 3, 1, 52, },
+ { 1, 0, 0, 3, 2, 66, },
+ { 3, 0, 0, 3, 2, 60, },
+ { 4, 0, 0, 3, 2, 70, },
+ { 5, 0, 0, 3, 2, 36, },
+ { 6, 0, 0, 3, 2, 60, },
+ { 7, 0, 0, 3, 2, 36, },
+ { 8, 0, 0, 3, 2, 60, },
+ { 1, 0, 0, 3, 3, 66, },
+ { 3, 0, 0, 3, 3, 64, },
+ { 4, 0, 0, 3, 3, 70, },
+ { 5, 0, 0, 3, 3, 36, },
+ { 6, 0, 0, 3, 3, 64, },
+ { 7, 0, 0, 3, 3, 36, },
+ { 8, 0, 0, 3, 3, 64, },
+ { 1, 0, 0, 3, 4, 66, },
+ { 3, 0, 0, 3, 4, 68, },
+ { 4, 0, 0, 3, 4, 70, },
+ { 5, 0, 0, 3, 4, 36, },
+ { 6, 0, 0, 3, 4, 68, },
+ { 7, 0, 0, 3, 4, 36, },
+ { 8, 0, 0, 3, 4, 68, },
+ { 1, 0, 0, 3, 5, 66, },
+ { 3, 0, 0, 3, 5, 76, },
+ { 4, 0, 0, 3, 5, 70, },
+ { 5, 0, 0, 3, 5, 36, },
+ { 6, 0, 0, 3, 5, 76, },
+ { 7, 0, 0, 3, 5, 36, },
+ { 8, 0, 0, 3, 5, 76, },
+ { 1, 0, 0, 3, 6, 66, },
+ { 3, 0, 0, 3, 6, 76, },
+ { 4, 0, 0, 3, 6, 70, },
+ { 5, 0, 0, 3, 6, 36, },
+ { 6, 0, 0, 3, 6, 76, },
+ { 7, 0, 0, 3, 6, 36, },
+ { 8, 0, 0, 3, 6, 76, },
+ { 1, 0, 0, 3, 7, 66, },
+ { 3, 0, 0, 3, 7, 76, },
+ { 4, 0, 0, 3, 7, 70, },
+ { 5, 0, 0, 3, 7, 36, },
+ { 6, 0, 0, 3, 7, 76, },
+ { 7, 0, 0, 3, 7, 36, },
+ { 8, 0, 0, 3, 7, 76, },
+ { 1, 0, 0, 3, 8, 66, },
+ { 3, 0, 0, 3, 8, 68, },
+ { 4, 0, 0, 3, 8, 70, },
+ { 5, 0, 0, 3, 8, 36, },
+ { 6, 0, 0, 3, 8, 68, },
+ { 7, 0, 0, 3, 8, 36, },
+ { 8, 0, 0, 3, 8, 68, },
+ { 1, 0, 0, 3, 9, 66, },
+ { 3, 0, 0, 3, 9, 64, },
+ { 4, 0, 0, 3, 9, 70, },
+ { 5, 0, 0, 3, 9, 36, },
+ { 6, 0, 0, 3, 9, 64, },
+ { 7, 0, 0, 3, 9, 36, },
+ { 8, 0, 0, 3, 9, 64, },
+ { 1, 0, 0, 3, 10, 66, },
+ { 3, 0, 0, 3, 10, 60, },
+ { 4, 0, 0, 3, 10, 70, },
+ { 5, 0, 0, 3, 10, 36, },
+ { 6, 0, 0, 3, 10, 60, },
+ { 7, 0, 0, 3, 10, 36, },
+ { 8, 0, 0, 3, 10, 60, },
+ { 1, 0, 0, 3, 11, 66, },
+ { 3, 0, 0, 3, 11, 52, },
+ { 4, 0, 0, 3, 11, 70, },
+ { 5, 0, 0, 3, 11, 36, },
+ { 6, 0, 0, 3, 11, 52, },
+ { 7, 0, 0, 3, 11, 36, },
+ { 8, 0, 0, 3, 11, 52, },
+ { 1, 0, 0, 3, 12, 66, },
+ { 3, 0, 0, 3, 12, 40, },
+ { 4, 0, 0, 3, 12, 70, },
+ { 5, 0, 0, 3, 12, 36, },
+ { 6, 0, 0, 3, 12, 40, },
+ { 7, 0, 0, 3, 12, 36, },
+ { 8, 0, 0, 3, 12, 40, },
+ { 1, 0, 0, 3, 13, 66, },
+ { 3, 0, 0, 3, 13, 28, },
+ { 4, 0, 0, 3, 13, 62, },
+ { 5, 0, 0, 3, 13, 36, },
+ { 6, 0, 0, 3, 13, 28, },
+ { 7, 0, 0, 3, 13, 36, },
+ { 8, 0, 0, 3, 13, 28, },
+ { 1, 0, 0, 3, 14, 127, },
+ { 3, 0, 0, 3, 14, 127, },
+ { 4, 0, 0, 3, 14, 127, },
+ { 5, 0, 0, 3, 14, 127, },
+ { 6, 0, 0, 3, 14, 127, },
+ { 7, 0, 0, 3, 14, 127, },
+ { 8, 0, 0, 3, 14, 127, },
+ { 1, 0, 1, 2, 1, 127, },
+ { 3, 0, 1, 2, 1, 127, },
+ { 4, 0, 1, 2, 1, 127, },
+ { 5, 0, 1, 2, 1, 127, },
+ { 6, 0, 1, 2, 1, 127, },
+ { 7, 0, 1, 2, 1, 127, },
+ { 8, 0, 1, 2, 1, 127, },
+ { 1, 0, 1, 2, 2, 127, },
+ { 3, 0, 1, 2, 2, 127, },
+ { 4, 0, 1, 2, 2, 127, },
+ { 5, 0, 1, 2, 2, 127, },
+ { 6, 0, 1, 2, 2, 127, },
+ { 7, 0, 1, 2, 2, 127, },
+ { 8, 0, 1, 2, 2, 127, },
+ { 1, 0, 1, 2, 3, 72, },
+ { 3, 0, 1, 2, 3, 52, },
+ { 4, 0, 1, 2, 3, 72, },
+ { 5, 0, 1, 2, 3, 60, },
+ { 6, 0, 1, 2, 3, 52, },
+ { 7, 0, 1, 2, 3, 60, },
+ { 8, 0, 1, 2, 3, 52, },
+ { 1, 0, 1, 2, 4, 72, },
+ { 3, 0, 1, 2, 4, 52, },
+ { 4, 0, 1, 2, 4, 72, },
+ { 5, 0, 1, 2, 4, 60, },
+ { 6, 0, 1, 2, 4, 52, },
+ { 7, 0, 1, 2, 4, 60, },
+ { 8, 0, 1, 2, 4, 52, },
+ { 1, 0, 1, 2, 5, 72, },
+ { 3, 0, 1, 2, 5, 60, },
+ { 4, 0, 1, 2, 5, 72, },
+ { 5, 0, 1, 2, 5, 60, },
+ { 6, 0, 1, 2, 5, 60, },
+ { 7, 0, 1, 2, 5, 60, },
+ { 8, 0, 1, 2, 5, 60, },
+ { 1, 0, 1, 2, 6, 72, },
+ { 3, 0, 1, 2, 6, 64, },
+ { 4, 0, 1, 2, 6, 72, },
+ { 5, 0, 1, 2, 6, 60, },
+ { 6, 0, 1, 2, 6, 64, },
+ { 7, 0, 1, 2, 6, 60, },
+ { 8, 0, 1, 2, 6, 64, },
+ { 1, 0, 1, 2, 7, 72, },
+ { 3, 0, 1, 2, 7, 60, },
+ { 4, 0, 1, 2, 7, 72, },
+ { 5, 0, 1, 2, 7, 60, },
+ { 6, 0, 1, 2, 7, 60, },
+ { 7, 0, 1, 2, 7, 60, },
+ { 8, 0, 1, 2, 7, 60, },
+ { 1, 0, 1, 2, 8, 72, },
+ { 3, 0, 1, 2, 8, 52, },
+ { 4, 0, 1, 2, 8, 72, },
+ { 5, 0, 1, 2, 8, 60, },
+ { 6, 0, 1, 2, 8, 52, },
+ { 7, 0, 1, 2, 8, 60, },
+ { 8, 0, 1, 2, 8, 52, },
+ { 1, 0, 1, 2, 9, 72, },
+ { 3, 0, 1, 2, 9, 52, },
+ { 4, 0, 1, 2, 9, 72, },
+ { 5, 0, 1, 2, 9, 60, },
+ { 6, 0, 1, 2, 9, 52, },
+ { 7, 0, 1, 2, 9, 60, },
+ { 8, 0, 1, 2, 9, 52, },
+ { 1, 0, 1, 2, 10, 72, },
+ { 3, 0, 1, 2, 10, 40, },
+ { 4, 0, 1, 2, 10, 72, },
+ { 5, 0, 1, 2, 10, 60, },
+ { 6, 0, 1, 2, 10, 40, },
+ { 7, 0, 1, 2, 10, 60, },
+ { 8, 0, 1, 2, 10, 40, },
+ { 1, 0, 1, 2, 11, 72, },
+ { 3, 0, 1, 2, 11, 28, },
+ { 4, 0, 1, 2, 11, 70, },
+ { 5, 0, 1, 2, 11, 60, },
+ { 6, 0, 1, 2, 11, 28, },
+ { 7, 0, 1, 2, 11, 60, },
+ { 8, 0, 1, 2, 11, 28, },
+ { 1, 0, 1, 2, 12, 127, },
+ { 3, 0, 1, 2, 12, 127, },
+ { 4, 0, 1, 2, 12, 127, },
+ { 5, 0, 1, 2, 12, 127, },
+ { 6, 0, 1, 2, 12, 127, },
+ { 7, 0, 1, 2, 12, 127, },
+ { 8, 0, 1, 2, 12, 127, },
+ { 1, 0, 1, 2, 13, 127, },
+ { 3, 0, 1, 2, 13, 127, },
+ { 4, 0, 1, 2, 13, 127, },
+ { 5, 0, 1, 2, 13, 127, },
+ { 6, 0, 1, 2, 13, 127, },
+ { 7, 0, 1, 2, 13, 127, },
+ { 8, 0, 1, 2, 13, 127, },
+ { 1, 0, 1, 2, 14, 127, },
+ { 3, 0, 1, 2, 14, 127, },
+ { 4, 0, 1, 2, 14, 127, },
+ { 5, 0, 1, 2, 14, 127, },
+ { 6, 0, 1, 2, 14, 127, },
+ { 7, 0, 1, 2, 14, 127, },
+ { 8, 0, 1, 2, 14, 127, },
+ { 1, 0, 1, 3, 1, 127, },
+ { 3, 0, 1, 3, 1, 127, },
+ { 4, 0, 1, 3, 1, 127, },
+ { 5, 0, 1, 3, 1, 127, },
+ { 6, 0, 1, 3, 1, 127, },
+ { 7, 0, 1, 3, 1, 127, },
+ { 8, 0, 1, 3, 1, 127, },
+ { 1, 0, 1, 3, 2, 127, },
+ { 3, 0, 1, 3, 2, 127, },
+ { 4, 0, 1, 3, 2, 127, },
+ { 5, 0, 1, 3, 2, 127, },
+ { 6, 0, 1, 3, 2, 127, },
+ { 7, 0, 1, 3, 2, 127, },
+ { 8, 0, 1, 3, 2, 127, },
+ { 1, 0, 1, 3, 3, 66, },
+ { 3, 0, 1, 3, 3, 48, },
+ { 4, 0, 1, 3, 3, 66, },
+ { 5, 0, 1, 3, 3, 36, },
+ { 6, 0, 1, 3, 3, 48, },
+ { 7, 0, 1, 3, 3, 36, },
+ { 8, 0, 1, 3, 3, 48, },
+ { 1, 0, 1, 3, 4, 66, },
+ { 3, 0, 1, 3, 4, 48, },
+ { 4, 0, 1, 3, 4, 70, },
+ { 5, 0, 1, 3, 4, 36, },
+ { 6, 0, 1, 3, 4, 48, },
+ { 7, 0, 1, 3, 4, 36, },
+ { 8, 0, 1, 3, 4, 48, },
+ { 1, 0, 1, 3, 5, 66, },
+ { 3, 0, 1, 3, 5, 60, },
+ { 4, 0, 1, 3, 5, 70, },
+ { 5, 0, 1, 3, 5, 36, },
+ { 6, 0, 1, 3, 5, 60, },
+ { 7, 0, 1, 3, 5, 36, },
+ { 8, 0, 1, 3, 5, 60, },
+ { 1, 0, 1, 3, 6, 66, },
+ { 3, 0, 1, 3, 6, 64, },
+ { 4, 0, 1, 3, 6, 70, },
+ { 5, 0, 1, 3, 6, 36, },
+ { 6, 0, 1, 3, 6, 64, },
+ { 7, 0, 1, 3, 6, 36, },
+ { 8, 0, 1, 3, 6, 64, },
+ { 1, 0, 1, 3, 7, 66, },
+ { 3, 0, 1, 3, 7, 60, },
+ { 4, 0, 1, 3, 7, 70, },
+ { 5, 0, 1, 3, 7, 36, },
+ { 6, 0, 1, 3, 7, 60, },
+ { 7, 0, 1, 3, 7, 36, },
+ { 8, 0, 1, 3, 7, 60, },
+ { 1, 0, 1, 3, 8, 66, },
+ { 3, 0, 1, 3, 8, 52, },
+ { 4, 0, 1, 3, 8, 70, },
+ { 5, 0, 1, 3, 8, 36, },
+ { 6, 0, 1, 3, 8, 52, },
+ { 7, 0, 1, 3, 8, 36, },
+ { 8, 0, 1, 3, 8, 52, },
+ { 1, 0, 1, 3, 9, 66, },
+ { 3, 0, 1, 3, 9, 52, },
+ { 4, 0, 1, 3, 9, 70, },
+ { 5, 0, 1, 3, 9, 36, },
+ { 6, 0, 1, 3, 9, 52, },
+ { 7, 0, 1, 3, 9, 36, },
+ { 8, 0, 1, 3, 9, 52, },
+ { 1, 0, 1, 3, 10, 66, },
+ { 3, 0, 1, 3, 10, 40, },
+ { 4, 0, 1, 3, 10, 70, },
+ { 5, 0, 1, 3, 10, 36, },
+ { 6, 0, 1, 3, 10, 40, },
+ { 7, 0, 1, 3, 10, 36, },
+ { 8, 0, 1, 3, 10, 40, },
+ { 1, 0, 1, 3, 11, 66, },
+ { 3, 0, 1, 3, 11, 26, },
+ { 4, 0, 1, 3, 11, 66, },
+ { 5, 0, 1, 3, 11, 36, },
+ { 6, 0, 1, 3, 11, 26, },
+ { 7, 0, 1, 3, 11, 36, },
+ { 8, 0, 1, 3, 11, 26, },
+ { 1, 0, 1, 3, 12, 127, },
+ { 3, 0, 1, 3, 12, 127, },
+ { 4, 0, 1, 3, 12, 127, },
+ { 5, 0, 1, 3, 12, 127, },
+ { 6, 0, 1, 3, 12, 127, },
+ { 7, 0, 1, 3, 12, 127, },
+ { 8, 0, 1, 3, 12, 127, },
+ { 1, 0, 1, 3, 13, 127, },
+ { 3, 0, 1, 3, 13, 127, },
+ { 4, 0, 1, 3, 13, 127, },
+ { 5, 0, 1, 3, 13, 127, },
+ { 6, 0, 1, 3, 13, 127, },
+ { 7, 0, 1, 3, 13, 127, },
+ { 8, 0, 1, 3, 13, 127, },
+ { 1, 0, 1, 3, 14, 127, },
+ { 3, 0, 1, 3, 14, 127, },
+ { 4, 0, 1, 3, 14, 127, },
+ { 5, 0, 1, 3, 14, 127, },
+ { 6, 0, 1, 3, 14, 127, },
+ { 7, 0, 1, 3, 14, 127, },
+ { 8, 0, 1, 3, 14, 127, },
+ { 1, 1, 0, 1, 36, 60, },
+ { 3, 1, 0, 1, 36, 62, },
+ { 4, 1, 0, 1, 36, 76, },
+ { 5, 1, 0, 1, 36, 62, },
+ { 6, 1, 0, 1, 36, 64, },
+ { 7, 1, 0, 1, 36, 54, },
+ { 8, 1, 0, 1, 36, 62, },
+ { 1, 1, 0, 1, 40, 62, },
+ { 3, 1, 0, 1, 40, 62, },
+ { 4, 1, 0, 1, 40, 76, },
+ { 5, 1, 0, 1, 40, 62, },
+ { 6, 1, 0, 1, 40, 64, },
+ { 7, 1, 0, 1, 40, 54, },
+ { 8, 1, 0, 1, 40, 62, },
+ { 1, 1, 0, 1, 44, 62, },
+ { 3, 1, 0, 1, 44, 62, },
+ { 4, 1, 0, 1, 44, 76, },
+ { 5, 1, 0, 1, 44, 62, },
+ { 6, 1, 0, 1, 44, 64, },
+ { 7, 1, 0, 1, 44, 54, },
+ { 8, 1, 0, 1, 44, 62, },
+ { 1, 1, 0, 1, 48, 62, },
+ { 3, 1, 0, 1, 48, 62, },
+ { 4, 1, 0, 1, 48, 76, },
+ { 5, 1, 0, 1, 48, 62, },
+ { 6, 1, 0, 1, 48, 64, },
+ { 7, 1, 0, 1, 48, 54, },
+ { 8, 1, 0, 1, 48, 62, },
+ { 1, 1, 0, 1, 52, 62, },
+ { 3, 1, 0, 1, 52, 64, },
+ { 4, 1, 0, 1, 52, 76, },
+ { 5, 1, 0, 1, 52, 62, },
+ { 6, 1, 0, 1, 52, 76, },
+ { 7, 1, 0, 1, 52, 54, },
+ { 8, 1, 0, 1, 52, 76, },
+ { 1, 1, 0, 1, 56, 62, },
+ { 3, 1, 0, 1, 56, 64, },
+ { 4, 1, 0, 1, 56, 76, },
+ { 5, 1, 0, 1, 56, 62, },
+ { 6, 1, 0, 1, 56, 76, },
+ { 7, 1, 0, 1, 56, 54, },
+ { 8, 1, 0, 1, 56, 76, },
+ { 1, 1, 0, 1, 60, 62, },
+ { 3, 1, 0, 1, 60, 64, },
+ { 4, 1, 0, 1, 60, 76, },
+ { 5, 1, 0, 1, 60, 62, },
+ { 6, 1, 0, 1, 60, 76, },
+ { 7, 1, 0, 1, 60, 54, },
+ { 8, 1, 0, 1, 60, 76, },
+ { 1, 1, 0, 1, 64, 60, },
+ { 3, 1, 0, 1, 64, 64, },
+ { 4, 1, 0, 1, 64, 76, },
+ { 5, 1, 0, 1, 64, 62, },
+ { 6, 1, 0, 1, 64, 74, },
+ { 7, 1, 0, 1, 64, 54, },
+ { 8, 1, 0, 1, 64, 74, },
+ { 1, 1, 0, 1, 100, 76, },
+ { 3, 1, 0, 1, 100, 72, },
+ { 4, 1, 0, 1, 100, 76, },
+ { 5, 1, 0, 1, 100, 62, },
+ { 6, 1, 0, 1, 100, 72, },
+ { 7, 1, 0, 1, 100, 54, },
+ { 8, 1, 0, 1, 100, 72, },
+ { 1, 1, 0, 1, 104, 76, },
+ { 3, 1, 0, 1, 104, 76, },
+ { 4, 1, 0, 1, 104, 76, },
+ { 5, 1, 0, 1, 104, 62, },
+ { 6, 1, 0, 1, 104, 76, },
+ { 7, 1, 0, 1, 104, 54, },
+ { 8, 1, 0, 1, 104, 76, },
+ { 1, 1, 0, 1, 108, 76, },
+ { 3, 1, 0, 1, 108, 76, },
+ { 4, 1, 0, 1, 108, 76, },
+ { 5, 1, 0, 1, 108, 62, },
+ { 6, 1, 0, 1, 108, 76, },
+ { 7, 1, 0, 1, 108, 54, },
+ { 8, 1, 0, 1, 108, 76, },
+ { 1, 1, 0, 1, 112, 76, },
+ { 3, 1, 0, 1, 112, 76, },
+ { 4, 1, 0, 1, 112, 76, },
+ { 5, 1, 0, 1, 112, 62, },
+ { 6, 1, 0, 1, 112, 76, },
+ { 7, 1, 0, 1, 112, 54, },
+ { 8, 1, 0, 1, 112, 76, },
+ { 1, 1, 0, 1, 116, 76, },
+ { 3, 1, 0, 1, 116, 76, },
+ { 4, 1, 0, 1, 116, 76, },
+ { 5, 1, 0, 1, 116, 62, },
+ { 6, 1, 0, 1, 116, 76, },
+ { 7, 1, 0, 1, 116, 54, },
+ { 8, 1, 0, 1, 116, 76, },
+ { 1, 1, 0, 1, 120, 76, },
+ { 3, 1, 0, 1, 120, 127, },
+ { 4, 1, 0, 1, 120, 76, },
+ { 5, 1, 0, 1, 120, 127, },
+ { 6, 1, 0, 1, 120, 76, },
+ { 7, 1, 0, 1, 120, 54, },
+ { 8, 1, 0, 1, 120, 76, },
+ { 1, 1, 0, 1, 124, 76, },
+ { 3, 1, 0, 1, 124, 127, },
+ { 4, 1, 0, 1, 124, 76, },
+ { 5, 1, 0, 1, 124, 127, },
+ { 6, 1, 0, 1, 124, 76, },
+ { 7, 1, 0, 1, 124, 54, },
+ { 8, 1, 0, 1, 124, 76, },
+ { 1, 1, 0, 1, 128, 76, },
+ { 3, 1, 0, 1, 128, 127, },
+ { 4, 1, 0, 1, 128, 76, },
+ { 5, 1, 0, 1, 128, 127, },
+ { 6, 1, 0, 1, 128, 76, },
+ { 7, 1, 0, 1, 128, 54, },
+ { 8, 1, 0, 1, 128, 76, },
+ { 1, 1, 0, 1, 132, 76, },
+ { 3, 1, 0, 1, 132, 76, },
+ { 4, 1, 0, 1, 132, 76, },
+ { 5, 1, 0, 1, 132, 62, },
+ { 6, 1, 0, 1, 132, 76, },
+ { 7, 1, 0, 1, 132, 54, },
+ { 8, 1, 0, 1, 132, 76, },
+ { 1, 1, 0, 1, 136, 76, },
+ { 3, 1, 0, 1, 136, 76, },
+ { 4, 1, 0, 1, 136, 76, },
+ { 5, 1, 0, 1, 136, 62, },
+ { 6, 1, 0, 1, 136, 76, },
+ { 7, 1, 0, 1, 136, 127, },
+ { 8, 1, 0, 1, 136, 76, },
+ { 1, 1, 0, 1, 140, 76, },
+ { 3, 1, 0, 1, 140, 72, },
+ { 4, 1, 0, 1, 140, 76, },
+ { 5, 1, 0, 1, 140, 62, },
+ { 6, 1, 0, 1, 140, 72, },
+ { 7, 1, 0, 1, 140, 127, },
+ { 8, 1, 0, 1, 140, 72, },
+ { 1, 1, 0, 1, 144, 127, },
+ { 3, 1, 0, 1, 144, 76, },
+ { 4, 1, 0, 1, 144, 76, },
+ { 5, 1, 0, 1, 144, 127, },
+ { 6, 1, 0, 1, 144, 76, },
+ { 7, 1, 0, 1, 144, 127, },
+ { 8, 1, 0, 1, 144, 76, },
+ { 1, 1, 0, 1, 149, 127, },
+ { 3, 1, 0, 1, 149, 76, },
+ { 4, 1, 0, 1, 149, 74, },
+ { 5, 1, 0, 1, 149, 76, },
+ { 6, 1, 0, 1, 149, 76, },
+ { 7, 1, 0, 1, 149, 54, },
+ { 8, 1, 0, 1, 149, 76, },
+ { 1, 1, 0, 1, 153, 127, },
+ { 3, 1, 0, 1, 153, 76, },
+ { 4, 1, 0, 1, 153, 74, },
+ { 5, 1, 0, 1, 153, 76, },
+ { 6, 1, 0, 1, 153, 76, },
+ { 7, 1, 0, 1, 153, 54, },
+ { 8, 1, 0, 1, 153, 76, },
+ { 1, 1, 0, 1, 157, 127, },
+ { 3, 1, 0, 1, 157, 76, },
+ { 4, 1, 0, 1, 157, 74, },
+ { 5, 1, 0, 1, 157, 76, },
+ { 6, 1, 0, 1, 157, 76, },
+ { 7, 1, 0, 1, 157, 54, },
+ { 8, 1, 0, 1, 157, 76, },
+ { 1, 1, 0, 1, 161, 127, },
+ { 3, 1, 0, 1, 161, 76, },
+ { 4, 1, 0, 1, 161, 74, },
+ { 5, 1, 0, 1, 161, 76, },
+ { 6, 1, 0, 1, 161, 76, },
+ { 7, 1, 0, 1, 161, 54, },
+ { 8, 1, 0, 1, 161, 76, },
+ { 1, 1, 0, 1, 165, 127, },
+ { 3, 1, 0, 1, 165, 76, },
+ { 4, 1, 0, 1, 165, 74, },
+ { 5, 1, 0, 1, 165, 76, },
+ { 6, 1, 0, 1, 165, 76, },
+ { 7, 1, 0, 1, 165, 54, },
+ { 8, 1, 0, 1, 165, 76, },
+ { 1, 1, 0, 2, 36, 62, },
+ { 3, 1, 0, 2, 36, 62, },
+ { 4, 1, 0, 2, 36, 76, },
+ { 5, 1, 0, 2, 36, 62, },
+ { 6, 1, 0, 2, 36, 64, },
+ { 7, 1, 0, 2, 36, 54, },
+ { 8, 1, 0, 2, 36, 62, },
+ { 1, 1, 0, 2, 40, 62, },
+ { 3, 1, 0, 2, 40, 62, },
+ { 4, 1, 0, 2, 40, 76, },
+ { 5, 1, 0, 2, 40, 62, },
+ { 6, 1, 0, 2, 40, 64, },
+ { 7, 1, 0, 2, 40, 54, },
+ { 8, 1, 0, 2, 40, 62, },
+ { 1, 1, 0, 2, 44, 62, },
+ { 3, 1, 0, 2, 44, 62, },
+ { 4, 1, 0, 2, 44, 76, },
+ { 5, 1, 0, 2, 44, 62, },
+ { 6, 1, 0, 2, 44, 64, },
+ { 7, 1, 0, 2, 44, 54, },
+ { 8, 1, 0, 2, 44, 62, },
+ { 1, 1, 0, 2, 48, 62, },
+ { 3, 1, 0, 2, 48, 62, },
+ { 4, 1, 0, 2, 48, 76, },
+ { 5, 1, 0, 2, 48, 62, },
+ { 6, 1, 0, 2, 48, 64, },
+ { 7, 1, 0, 2, 48, 54, },
+ { 8, 1, 0, 2, 48, 62, },
+ { 1, 1, 0, 2, 52, 62, },
+ { 3, 1, 0, 2, 52, 64, },
+ { 4, 1, 0, 2, 52, 76, },
+ { 5, 1, 0, 2, 52, 62, },
+ { 6, 1, 0, 2, 52, 76, },
+ { 7, 1, 0, 2, 52, 54, },
+ { 8, 1, 0, 2, 52, 76, },
+ { 1, 1, 0, 2, 56, 62, },
+ { 3, 1, 0, 2, 56, 64, },
+ { 4, 1, 0, 2, 56, 76, },
+ { 5, 1, 0, 2, 56, 62, },
+ { 6, 1, 0, 2, 56, 76, },
+ { 7, 1, 0, 2, 56, 54, },
+ { 8, 1, 0, 2, 56, 76, },
+ { 1, 1, 0, 2, 60, 62, },
+ { 3, 1, 0, 2, 60, 64, },
+ { 4, 1, 0, 2, 60, 76, },
+ { 5, 1, 0, 2, 60, 62, },
+ { 6, 1, 0, 2, 60, 76, },
+ { 7, 1, 0, 2, 60, 54, },
+ { 8, 1, 0, 2, 60, 76, },
+ { 1, 1, 0, 2, 64, 60, },
+ { 3, 1, 0, 2, 64, 64, },
+ { 4, 1, 0, 2, 64, 74, },
+ { 5, 1, 0, 2, 64, 62, },
+ { 6, 1, 0, 2, 64, 74, },
+ { 7, 1, 0, 2, 64, 54, },
+ { 8, 1, 0, 2, 64, 74, },
+ { 1, 1, 0, 2, 100, 76, },
+ { 3, 1, 0, 2, 100, 70, },
+ { 4, 1, 0, 2, 100, 76, },
+ { 5, 1, 0, 2, 100, 62, },
+ { 6, 1, 0, 2, 100, 70, },
+ { 7, 1, 0, 2, 100, 54, },
+ { 8, 1, 0, 2, 100, 70, },
+ { 1, 1, 0, 2, 104, 76, },
+ { 3, 1, 0, 2, 104, 76, },
+ { 4, 1, 0, 2, 104, 76, },
+ { 5, 1, 0, 2, 104, 62, },
+ { 6, 1, 0, 2, 104, 76, },
+ { 7, 1, 0, 2, 104, 54, },
+ { 8, 1, 0, 2, 104, 76, },
+ { 1, 1, 0, 2, 108, 76, },
+ { 3, 1, 0, 2, 108, 76, },
+ { 4, 1, 0, 2, 108, 76, },
+ { 5, 1, 0, 2, 108, 62, },
+ { 6, 1, 0, 2, 108, 76, },
+ { 7, 1, 0, 2, 108, 54, },
+ { 8, 1, 0, 2, 108, 76, },
+ { 1, 1, 0, 2, 112, 76, },
+ { 3, 1, 0, 2, 112, 76, },
+ { 4, 1, 0, 2, 112, 76, },
+ { 5, 1, 0, 2, 112, 62, },
+ { 6, 1, 0, 2, 112, 76, },
+ { 7, 1, 0, 2, 112, 54, },
+ { 8, 1, 0, 2, 112, 76, },
+ { 1, 1, 0, 2, 116, 76, },
+ { 3, 1, 0, 2, 116, 76, },
+ { 4, 1, 0, 2, 116, 76, },
+ { 5, 1, 0, 2, 116, 62, },
+ { 6, 1, 0, 2, 116, 76, },
+ { 7, 1, 0, 2, 116, 54, },
+ { 8, 1, 0, 2, 116, 76, },
+ { 1, 1, 0, 2, 120, 76, },
+ { 3, 1, 0, 2, 120, 127, },
+ { 4, 1, 0, 2, 120, 76, },
+ { 5, 1, 0, 2, 120, 127, },
+ { 6, 1, 0, 2, 120, 76, },
+ { 7, 1, 0, 2, 120, 54, },
+ { 8, 1, 0, 2, 120, 76, },
+ { 1, 1, 0, 2, 124, 76, },
+ { 3, 1, 0, 2, 124, 127, },
+ { 4, 1, 0, 2, 124, 76, },
+ { 5, 1, 0, 2, 124, 127, },
+ { 6, 1, 0, 2, 124, 76, },
+ { 7, 1, 0, 2, 124, 54, },
+ { 8, 1, 0, 2, 124, 76, },
+ { 1, 1, 0, 2, 128, 76, },
+ { 3, 1, 0, 2, 128, 127, },
+ { 4, 1, 0, 2, 128, 76, },
+ { 5, 1, 0, 2, 128, 127, },
+ { 6, 1, 0, 2, 128, 76, },
+ { 7, 1, 0, 2, 128, 54, },
+ { 8, 1, 0, 2, 128, 76, },
+ { 1, 1, 0, 2, 132, 76, },
+ { 3, 1, 0, 2, 132, 76, },
+ { 4, 1, 0, 2, 132, 76, },
+ { 5, 1, 0, 2, 132, 62, },
+ { 6, 1, 0, 2, 132, 76, },
+ { 7, 1, 0, 2, 132, 54, },
+ { 8, 1, 0, 2, 132, 76, },
+ { 1, 1, 0, 2, 136, 76, },
+ { 3, 1, 0, 2, 136, 76, },
+ { 4, 1, 0, 2, 136, 76, },
+ { 5, 1, 0, 2, 136, 62, },
+ { 6, 1, 0, 2, 136, 76, },
+ { 7, 1, 0, 2, 136, 127, },
+ { 8, 1, 0, 2, 136, 76, },
+ { 1, 1, 0, 2, 140, 76, },
+ { 3, 1, 0, 2, 140, 70, },
+ { 4, 1, 0, 2, 140, 76, },
+ { 5, 1, 0, 2, 140, 62, },
+ { 6, 1, 0, 2, 140, 70, },
+ { 7, 1, 0, 2, 140, 127, },
+ { 8, 1, 0, 2, 140, 70, },
+ { 1, 1, 0, 2, 144, 127, },
+ { 3, 1, 0, 2, 144, 76, },
+ { 4, 1, 0, 2, 144, 76, },
+ { 5, 1, 0, 2, 144, 127, },
+ { 6, 1, 0, 2, 144, 76, },
+ { 7, 1, 0, 2, 144, 127, },
+ { 8, 1, 0, 2, 144, 76, },
+ { 1, 1, 0, 2, 149, 127, },
+ { 3, 1, 0, 2, 149, 76, },
+ { 4, 1, 0, 2, 149, 74, },
+ { 5, 1, 0, 2, 149, 76, },
+ { 6, 1, 0, 2, 149, 76, },
+ { 7, 1, 0, 2, 149, 54, },
+ { 8, 1, 0, 2, 149, 76, },
+ { 1, 1, 0, 2, 153, 127, },
+ { 3, 1, 0, 2, 153, 76, },
+ { 4, 1, 0, 2, 153, 74, },
+ { 5, 1, 0, 2, 153, 76, },
+ { 6, 1, 0, 2, 153, 76, },
+ { 7, 1, 0, 2, 153, 54, },
+ { 8, 1, 0, 2, 153, 76, },
+ { 1, 1, 0, 2, 157, 127, },
+ { 3, 1, 0, 2, 157, 76, },
+ { 4, 1, 0, 2, 157, 74, },
+ { 5, 1, 0, 2, 157, 76, },
+ { 6, 1, 0, 2, 157, 76, },
+ { 7, 1, 0, 2, 157, 54, },
+ { 8, 1, 0, 2, 157, 76, },
+ { 1, 1, 0, 2, 161, 127, },
+ { 3, 1, 0, 2, 161, 76, },
+ { 4, 1, 0, 2, 161, 74, },
+ { 5, 1, 0, 2, 161, 76, },
+ { 6, 1, 0, 2, 161, 76, },
+ { 7, 1, 0, 2, 161, 54, },
+ { 8, 1, 0, 2, 161, 76, },
+ { 1, 1, 0, 2, 165, 127, },
+ { 3, 1, 0, 2, 165, 76, },
+ { 4, 1, 0, 2, 165, 74, },
+ { 5, 1, 0, 2, 165, 76, },
+ { 6, 1, 0, 2, 165, 76, },
+ { 7, 1, 0, 2, 165, 54, },
+ { 8, 1, 0, 2, 165, 76, },
+ { 1, 1, 0, 3, 36, 50, },
+ { 3, 1, 0, 3, 36, 38, },
+ { 4, 1, 0, 3, 36, 66, },
+ { 5, 1, 0, 3, 36, 38, },
+ { 6, 1, 0, 3, 36, 52, },
+ { 7, 1, 0, 3, 36, 30, },
+ { 8, 1, 0, 3, 36, 50, },
+ { 1, 1, 0, 3, 40, 50, },
+ { 3, 1, 0, 3, 40, 38, },
+ { 4, 1, 0, 3, 40, 66, },
+ { 5, 1, 0, 3, 40, 38, },
+ { 6, 1, 0, 3, 40, 52, },
+ { 7, 1, 0, 3, 40, 30, },
+ { 8, 1, 0, 3, 40, 50, },
+ { 1, 1, 0, 3, 44, 50, },
+ { 3, 1, 0, 3, 44, 38, },
+ { 4, 1, 0, 3, 44, 66, },
+ { 5, 1, 0, 3, 44, 38, },
+ { 6, 1, 0, 3, 44, 52, },
+ { 7, 1, 0, 3, 44, 30, },
+ { 8, 1, 0, 3, 44, 50, },
+ { 1, 1, 0, 3, 48, 50, },
+ { 3, 1, 0, 3, 48, 38, },
+ { 4, 1, 0, 3, 48, 66, },
+ { 5, 1, 0, 3, 48, 38, },
+ { 6, 1, 0, 3, 48, 52, },
+ { 7, 1, 0, 3, 48, 30, },
+ { 8, 1, 0, 3, 48, 50, },
+ { 1, 1, 0, 3, 52, 50, },
+ { 3, 1, 0, 3, 52, 40, },
+ { 4, 1, 0, 3, 52, 66, },
+ { 5, 1, 0, 3, 52, 38, },
+ { 6, 1, 0, 3, 52, 68, },
+ { 7, 1, 0, 3, 52, 30, },
+ { 8, 1, 0, 3, 52, 68, },
+ { 1, 1, 0, 3, 56, 50, },
+ { 3, 1, 0, 3, 56, 40, },
+ { 4, 1, 0, 3, 56, 66, },
+ { 5, 1, 0, 3, 56, 38, },
+ { 6, 1, 0, 3, 56, 68, },
+ { 7, 1, 0, 3, 56, 30, },
+ { 8, 1, 0, 3, 56, 68, },
+ { 1, 1, 0, 3, 60, 50, },
+ { 3, 1, 0, 3, 60, 40, },
+ { 4, 1, 0, 3, 60, 66, },
+ { 5, 1, 0, 3, 60, 38, },
+ { 6, 1, 0, 3, 60, 66, },
+ { 7, 1, 0, 3, 60, 30, },
+ { 8, 1, 0, 3, 60, 66, },
+ { 1, 1, 0, 3, 64, 50, },
+ { 3, 1, 0, 3, 64, 40, },
+ { 4, 1, 0, 3, 64, 66, },
+ { 5, 1, 0, 3, 64, 38, },
+ { 6, 1, 0, 3, 64, 68, },
+ { 7, 1, 0, 3, 64, 30, },
+ { 8, 1, 0, 3, 64, 68, },
+ { 1, 1, 0, 3, 100, 70, },
+ { 3, 1, 0, 3, 100, 60, },
+ { 4, 1, 0, 3, 100, 64, },
+ { 5, 1, 0, 3, 100, 38, },
+ { 6, 1, 0, 3, 100, 60, },
+ { 7, 1, 0, 3, 100, 30, },
+ { 8, 1, 0, 3, 100, 60, },
+ { 1, 1, 0, 3, 104, 70, },
+ { 3, 1, 0, 3, 104, 68, },
+ { 4, 1, 0, 3, 104, 64, },
+ { 5, 1, 0, 3, 104, 38, },
+ { 6, 1, 0, 3, 104, 68, },
+ { 7, 1, 0, 3, 104, 30, },
+ { 8, 1, 0, 3, 104, 68, },
+ { 1, 1, 0, 3, 108, 70, },
+ { 3, 1, 0, 3, 108, 68, },
+ { 4, 1, 0, 3, 108, 64, },
+ { 5, 1, 0, 3, 108, 38, },
+ { 6, 1, 0, 3, 108, 68, },
+ { 7, 1, 0, 3, 108, 30, },
+ { 8, 1, 0, 3, 108, 68, },
+ { 1, 1, 0, 3, 112, 70, },
+ { 3, 1, 0, 3, 112, 68, },
+ { 4, 1, 0, 3, 112, 64, },
+ { 5, 1, 0, 3, 112, 38, },
+ { 6, 1, 0, 3, 112, 68, },
+ { 7, 1, 0, 3, 112, 30, },
+ { 8, 1, 0, 3, 112, 68, },
+ { 1, 1, 0, 3, 116, 70, },
+ { 3, 1, 0, 3, 116, 68, },
+ { 4, 1, 0, 3, 116, 64, },
+ { 5, 1, 0, 3, 116, 38, },
+ { 6, 1, 0, 3, 116, 68, },
+ { 7, 1, 0, 3, 116, 30, },
+ { 8, 1, 0, 3, 116, 68, },
+ { 1, 1, 0, 3, 120, 70, },
+ { 3, 1, 0, 3, 120, 127, },
+ { 4, 1, 0, 3, 120, 64, },
+ { 5, 1, 0, 3, 120, 127, },
+ { 6, 1, 0, 3, 120, 68, },
+ { 7, 1, 0, 3, 120, 30, },
+ { 8, 1, 0, 3, 120, 68, },
+ { 1, 1, 0, 3, 124, 70, },
+ { 3, 1, 0, 3, 124, 127, },
+ { 4, 1, 0, 3, 124, 64, },
+ { 5, 1, 0, 3, 124, 127, },
+ { 6, 1, 0, 3, 124, 68, },
+ { 7, 1, 0, 3, 124, 30, },
+ { 8, 1, 0, 3, 124, 68, },
+ { 1, 1, 0, 3, 128, 70, },
+ { 3, 1, 0, 3, 128, 127, },
+ { 4, 1, 0, 3, 128, 64, },
+ { 5, 1, 0, 3, 128, 127, },
+ { 6, 1, 0, 3, 128, 68, },
+ { 7, 1, 0, 3, 128, 30, },
+ { 8, 1, 0, 3, 128, 68, },
+ { 1, 1, 0, 3, 132, 70, },
+ { 3, 1, 0, 3, 132, 68, },
+ { 4, 1, 0, 3, 132, 64, },
+ { 5, 1, 0, 3, 132, 38, },
+ { 6, 1, 0, 3, 132, 68, },
+ { 7, 1, 0, 3, 132, 30, },
+ { 8, 1, 0, 3, 132, 68, },
+ { 1, 1, 0, 3, 136, 70, },
+ { 3, 1, 0, 3, 136, 68, },
+ { 4, 1, 0, 3, 136, 64, },
+ { 5, 1, 0, 3, 136, 38, },
+ { 6, 1, 0, 3, 136, 68, },
+ { 7, 1, 0, 3, 136, 127, },
+ { 8, 1, 0, 3, 136, 68, },
+ { 1, 1, 0, 3, 140, 70, },
+ { 3, 1, 0, 3, 140, 60, },
+ { 4, 1, 0, 3, 140, 64, },
+ { 5, 1, 0, 3, 140, 38, },
+ { 6, 1, 0, 3, 140, 60, },
+ { 7, 1, 0, 3, 140, 127, },
+ { 8, 1, 0, 3, 140, 60, },
+ { 1, 1, 0, 3, 144, 127, },
+ { 3, 1, 0, 3, 144, 68, },
+ { 4, 1, 0, 3, 144, 64, },
+ { 5, 1, 0, 3, 144, 127, },
+ { 6, 1, 0, 3, 144, 68, },
+ { 7, 1, 0, 3, 144, 127, },
+ { 8, 1, 0, 3, 144, 68, },
+ { 1, 1, 0, 3, 149, 127, },
+ { 3, 1, 0, 3, 149, 76, },
+ { 4, 1, 0, 3, 149, 60, },
+ { 5, 1, 0, 3, 149, 76, },
+ { 6, 1, 0, 3, 149, 76, },
+ { 7, 1, 0, 3, 149, 30, },
+ { 8, 1, 0, 3, 149, 72, },
+ { 1, 1, 0, 3, 153, 127, },
+ { 3, 1, 0, 3, 153, 76, },
+ { 4, 1, 0, 3, 153, 60, },
+ { 5, 1, 0, 3, 153, 76, },
+ { 6, 1, 0, 3, 153, 76, },
+ { 7, 1, 0, 3, 153, 30, },
+ { 8, 1, 0, 3, 153, 76, },
+ { 1, 1, 0, 3, 157, 127, },
+ { 3, 1, 0, 3, 157, 76, },
+ { 4, 1, 0, 3, 157, 60, },
+ { 5, 1, 0, 3, 157, 76, },
+ { 6, 1, 0, 3, 157, 76, },
+ { 7, 1, 0, 3, 157, 30, },
+ { 8, 1, 0, 3, 157, 76, },
+ { 1, 1, 0, 3, 161, 127, },
+ { 3, 1, 0, 3, 161, 76, },
+ { 4, 1, 0, 3, 161, 60, },
+ { 5, 1, 0, 3, 161, 76, },
+ { 6, 1, 0, 3, 161, 76, },
+ { 7, 1, 0, 3, 161, 30, },
+ { 8, 1, 0, 3, 161, 76, },
+ { 1, 1, 0, 3, 165, 127, },
+ { 3, 1, 0, 3, 165, 76, },
+ { 4, 1, 0, 3, 165, 60, },
+ { 5, 1, 0, 3, 165, 76, },
+ { 6, 1, 0, 3, 165, 76, },
+ { 7, 1, 0, 3, 165, 30, },
+ { 8, 1, 0, 3, 165, 76, },
+ { 1, 1, 1, 2, 38, 62, },
+ { 3, 1, 1, 2, 38, 64, },
+ { 4, 1, 1, 2, 38, 72, },
+ { 5, 1, 1, 2, 38, 64, },
+ { 6, 1, 1, 2, 38, 64, },
+ { 7, 1, 1, 2, 38, 54, },
+ { 8, 1, 1, 2, 38, 62, },
+ { 1, 1, 1, 2, 46, 62, },
+ { 3, 1, 1, 2, 46, 64, },
+ { 4, 1, 1, 2, 46, 72, },
+ { 5, 1, 1, 2, 46, 64, },
+ { 6, 1, 1, 2, 46, 64, },
+ { 7, 1, 1, 2, 46, 54, },
+ { 8, 1, 1, 2, 46, 62, },
+ { 1, 1, 1, 2, 54, 62, },
+ { 3, 1, 1, 2, 54, 64, },
+ { 4, 1, 1, 2, 54, 72, },
+ { 5, 1, 1, 2, 54, 64, },
+ { 6, 1, 1, 2, 54, 72, },
+ { 7, 1, 1, 2, 54, 54, },
+ { 8, 1, 1, 2, 54, 72, },
+ { 1, 1, 1, 2, 62, 62, },
+ { 3, 1, 1, 2, 62, 64, },
+ { 4, 1, 1, 2, 62, 70, },
+ { 5, 1, 1, 2, 62, 64, },
+ { 6, 1, 1, 2, 62, 64, },
+ { 7, 1, 1, 2, 62, 54, },
+ { 8, 1, 1, 2, 62, 64, },
+ { 1, 1, 1, 2, 102, 72, },
+ { 3, 1, 1, 2, 102, 58, },
+ { 4, 1, 1, 2, 102, 72, },
+ { 5, 1, 1, 2, 102, 64, },
+ { 6, 1, 1, 2, 102, 58, },
+ { 7, 1, 1, 2, 102, 54, },
+ { 8, 1, 1, 2, 102, 58, },
+ { 1, 1, 1, 2, 110, 72, },
+ { 3, 1, 1, 2, 110, 72, },
+ { 4, 1, 1, 2, 110, 72, },
+ { 5, 1, 1, 2, 110, 64, },
+ { 6, 1, 1, 2, 110, 72, },
+ { 7, 1, 1, 2, 110, 54, },
+ { 8, 1, 1, 2, 110, 72, },
+ { 1, 1, 1, 2, 118, 72, },
+ { 3, 1, 1, 2, 118, 127, },
+ { 4, 1, 1, 2, 118, 72, },
+ { 5, 1, 1, 2, 118, 127, },
+ { 6, 1, 1, 2, 118, 72, },
+ { 7, 1, 1, 2, 118, 54, },
+ { 8, 1, 1, 2, 118, 72, },
+ { 1, 1, 1, 2, 126, 72, },
+ { 3, 1, 1, 2, 126, 127, },
+ { 4, 1, 1, 2, 126, 72, },
+ { 5, 1, 1, 2, 126, 127, },
+ { 6, 1, 1, 2, 126, 72, },
+ { 7, 1, 1, 2, 126, 54, },
+ { 8, 1, 1, 2, 126, 72, },
+ { 1, 1, 1, 2, 134, 72, },
+ { 3, 1, 1, 2, 134, 72, },
+ { 4, 1, 1, 2, 134, 72, },
+ { 5, 1, 1, 2, 134, 64, },
+ { 6, 1, 1, 2, 134, 72, },
+ { 7, 1, 1, 2, 134, 127, },
+ { 8, 1, 1, 2, 134, 72, },
+ { 1, 1, 1, 2, 142, 127, },
+ { 3, 1, 1, 2, 142, 72, },
+ { 4, 1, 1, 2, 142, 72, },
+ { 5, 1, 1, 2, 142, 127, },
+ { 6, 1, 1, 2, 142, 72, },
+ { 7, 1, 1, 2, 142, 127, },
+ { 8, 1, 1, 2, 142, 72, },
+ { 1, 1, 1, 2, 151, 127, },
+ { 3, 1, 1, 2, 151, 72, },
+ { 4, 1, 1, 2, 151, 72, },
+ { 5, 1, 1, 2, 151, 72, },
+ { 6, 1, 1, 2, 151, 72, },
+ { 7, 1, 1, 2, 151, 54, },
+ { 8, 1, 1, 2, 151, 72, },
+ { 1, 1, 1, 2, 159, 127, },
+ { 3, 1, 1, 2, 159, 72, },
+ { 4, 1, 1, 2, 159, 72, },
+ { 5, 1, 1, 2, 159, 72, },
+ { 6, 1, 1, 2, 159, 72, },
+ { 7, 1, 1, 2, 159, 54, },
+ { 8, 1, 1, 2, 159, 72, },
+ { 1, 1, 1, 3, 38, 50, },
+ { 3, 1, 1, 3, 38, 40, },
+ { 4, 1, 1, 3, 38, 62, },
+ { 5, 1, 1, 3, 38, 40, },
+ { 6, 1, 1, 3, 38, 52, },
+ { 7, 1, 1, 3, 38, 30, },
+ { 8, 1, 1, 3, 38, 50, },
+ { 1, 1, 1, 3, 46, 50, },
+ { 3, 1, 1, 3, 46, 40, },
+ { 4, 1, 1, 3, 46, 62, },
+ { 5, 1, 1, 3, 46, 40, },
+ { 6, 1, 1, 3, 46, 52, },
+ { 7, 1, 1, 3, 46, 30, },
+ { 8, 1, 1, 3, 46, 50, },
+ { 1, 1, 1, 3, 54, 50, },
+ { 3, 1, 1, 3, 54, 40, },
+ { 4, 1, 1, 3, 54, 62, },
+ { 5, 1, 1, 3, 54, 40, },
+ { 6, 1, 1, 3, 54, 68, },
+ { 7, 1, 1, 3, 54, 30, },
+ { 8, 1, 1, 3, 54, 68, },
+ { 1, 1, 1, 3, 62, 48, },
+ { 3, 1, 1, 3, 62, 40, },
+ { 4, 1, 1, 3, 62, 58, },
+ { 5, 1, 1, 3, 62, 40, },
+ { 6, 1, 1, 3, 62, 58, },
+ { 7, 1, 1, 3, 62, 30, },
+ { 8, 1, 1, 3, 62, 58, },
+ { 1, 1, 1, 3, 102, 70, },
+ { 3, 1, 1, 3, 102, 54, },
+ { 4, 1, 1, 3, 102, 64, },
+ { 5, 1, 1, 3, 102, 40, },
+ { 6, 1, 1, 3, 102, 54, },
+ { 7, 1, 1, 3, 102, 30, },
+ { 8, 1, 1, 3, 102, 54, },
+ { 1, 1, 1, 3, 110, 70, },
+ { 3, 1, 1, 3, 110, 68, },
+ { 4, 1, 1, 3, 110, 64, },
+ { 5, 1, 1, 3, 110, 40, },
+ { 6, 1, 1, 3, 110, 68, },
+ { 7, 1, 1, 3, 110, 30, },
+ { 8, 1, 1, 3, 110, 68, },
+ { 1, 1, 1, 3, 118, 70, },
+ { 3, 1, 1, 3, 118, 127, },
+ { 4, 1, 1, 3, 118, 64, },
+ { 5, 1, 1, 3, 118, 127, },
+ { 6, 1, 1, 3, 118, 68, },
+ { 7, 1, 1, 3, 118, 30, },
+ { 8, 1, 1, 3, 118, 68, },
+ { 1, 1, 1, 3, 126, 70, },
+ { 3, 1, 1, 3, 126, 127, },
+ { 4, 1, 1, 3, 126, 64, },
+ { 5, 1, 1, 3, 126, 127, },
+ { 6, 1, 1, 3, 126, 68, },
+ { 7, 1, 1, 3, 126, 30, },
+ { 8, 1, 1, 3, 126, 68, },
+ { 1, 1, 1, 3, 134, 70, },
+ { 3, 1, 1, 3, 134, 68, },
+ { 4, 1, 1, 3, 134, 64, },
+ { 5, 1, 1, 3, 134, 40, },
+ { 6, 1, 1, 3, 134, 68, },
+ { 7, 1, 1, 3, 134, 127, },
+ { 8, 1, 1, 3, 134, 68, },
+ { 1, 1, 1, 3, 142, 127, },
+ { 3, 1, 1, 3, 142, 68, },
+ { 4, 1, 1, 3, 142, 64, },
+ { 5, 1, 1, 3, 142, 127, },
+ { 6, 1, 1, 3, 142, 68, },
+ { 7, 1, 1, 3, 142, 127, },
+ { 8, 1, 1, 3, 142, 68, },
+ { 1, 1, 1, 3, 151, 127, },
+ { 3, 1, 1, 3, 151, 72, },
+ { 4, 1, 1, 3, 151, 66, },
+ { 5, 1, 1, 3, 151, 72, },
+ { 6, 1, 1, 3, 151, 72, },
+ { 7, 1, 1, 3, 151, 30, },
+ { 8, 1, 1, 3, 151, 68, },
+ { 1, 1, 1, 3, 159, 127, },
+ { 3, 1, 1, 3, 159, 72, },
+ { 4, 1, 1, 3, 159, 66, },
+ { 5, 1, 1, 3, 159, 72, },
+ { 6, 1, 1, 3, 159, 72, },
+ { 7, 1, 1, 3, 159, 30, },
+ { 8, 1, 1, 3, 159, 72, },
+ { 1, 1, 2, 4, 42, 64, },
+ { 3, 1, 2, 4, 42, 64, },
+ { 4, 1, 2, 4, 42, 68, },
+ { 5, 1, 2, 4, 42, 64, },
+ { 6, 1, 2, 4, 42, 64, },
+ { 7, 1, 2, 4, 42, 54, },
+ { 8, 1, 2, 4, 42, 62, },
+ { 1, 1, 2, 4, 58, 64, },
+ { 3, 1, 2, 4, 58, 62, },
+ { 4, 1, 2, 4, 58, 64, },
+ { 5, 1, 2, 4, 58, 64, },
+ { 6, 1, 2, 4, 58, 62, },
+ { 7, 1, 2, 4, 58, 54, },
+ { 8, 1, 2, 4, 58, 62, },
+ { 1, 1, 2, 4, 106, 72, },
+ { 3, 1, 2, 4, 106, 58, },
+ { 4, 1, 2, 4, 106, 66, },
+ { 5, 1, 2, 4, 106, 64, },
+ { 6, 1, 2, 4, 106, 58, },
+ { 7, 1, 2, 4, 106, 54, },
+ { 8, 1, 2, 4, 106, 58, },
+ { 1, 1, 2, 4, 122, 72, },
+ { 3, 1, 2, 4, 122, 127, },
+ { 4, 1, 2, 4, 122, 68, },
+ { 5, 1, 2, 4, 122, 127, },
+ { 6, 1, 2, 4, 122, 72, },
+ { 7, 1, 2, 4, 122, 54, },
+ { 8, 1, 2, 4, 122, 72, },
+ { 1, 1, 2, 4, 138, 127, },
+ { 3, 1, 2, 4, 138, 72, },
+ { 4, 1, 2, 4, 138, 68, },
+ { 5, 1, 2, 4, 138, 127, },
+ { 6, 1, 2, 4, 138, 72, },
+ { 7, 1, 2, 4, 138, 127, },
+ { 8, 1, 2, 4, 138, 72, },
+ { 1, 1, 2, 4, 155, 127, },
+ { 3, 1, 2, 4, 155, 72, },
+ { 4, 1, 2, 4, 155, 68, },
+ { 5, 1, 2, 4, 155, 72, },
+ { 6, 1, 2, 4, 155, 72, },
+ { 7, 1, 2, 4, 155, 54, },
+ { 8, 1, 2, 4, 155, 68, },
+ { 1, 1, 2, 5, 42, 50, },
+ { 3, 1, 2, 5, 42, 40, },
+ { 4, 1, 2, 5, 42, 58, },
+ { 5, 1, 2, 5, 42, 40, },
+ { 6, 1, 2, 5, 42, 52, },
+ { 7, 1, 2, 5, 42, 30, },
+ { 8, 1, 2, 5, 42, 50, },
+ { 1, 1, 2, 5, 58, 50, },
+ { 3, 1, 2, 5, 58, 40, },
+ { 4, 1, 2, 5, 58, 56, },
+ { 5, 1, 2, 5, 58, 40, },
+ { 6, 1, 2, 5, 58, 52, },
+ { 7, 1, 2, 5, 58, 30, },
+ { 8, 1, 2, 5, 58, 52, },
+ { 1, 1, 2, 5, 106, 72, },
+ { 3, 1, 2, 5, 106, 50, },
+ { 4, 1, 2, 5, 106, 56, },
+ { 5, 1, 2, 5, 106, 40, },
+ { 6, 1, 2, 5, 106, 50, },
+ { 7, 1, 2, 5, 106, 30, },
+ { 8, 1, 2, 5, 106, 50, },
+ { 1, 1, 2, 5, 122, 72, },
+ { 3, 1, 2, 5, 122, 127, },
+ { 4, 1, 2, 5, 122, 56, },
+ { 5, 1, 2, 5, 122, 127, },
+ { 6, 1, 2, 5, 122, 66, },
+ { 7, 1, 2, 5, 122, 30, },
+ { 8, 1, 2, 5, 122, 66, },
+ { 1, 1, 2, 5, 138, 127, },
+ { 3, 1, 2, 5, 138, 66, },
+ { 4, 1, 2, 5, 138, 58, },
+ { 5, 1, 2, 5, 138, 127, },
+ { 6, 1, 2, 5, 138, 66, },
+ { 7, 1, 2, 5, 138, 127, },
+ { 8, 1, 2, 5, 138, 66, },
+ { 1, 1, 2, 5, 155, 127, },
+ { 3, 1, 2, 5, 155, 62, },
+ { 4, 1, 2, 5, 155, 58, },
+ { 5, 1, 2, 5, 155, 72, },
+ { 6, 1, 2, 5, 155, 62, },
+ { 7, 1, 2, 5, 155, 30, },
+ { 8, 1, 2, 5, 155, 62, },
};
RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type0);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 49df3bb08d41..ce5e92d82efc 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -1818,7 +1818,8 @@ out:
return status;
}
-static int rsi_mac80211_cancel_roc(struct ieee80211_hw *hw)
+static int rsi_mac80211_cancel_roc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index b42cd50b837e..1bebba4e8527 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -230,19 +230,16 @@ static void rsi_reset_card(struct sdio_func *pfunction)
rsi_dbg(ERR_ZONE, "%s: CMD0 failed : %d\n", __func__, err);
/* Issue CMD5, arg = 0 */
- if (!host->ocr_avail) {
- err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0,
- (MMC_RSP_R4 | MMC_CMD_BCR), &resp);
- if (err)
- rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
- __func__, err);
-
- host->ocr_avail = resp;
- }
+ err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0,
+ (MMC_RSP_R4 | MMC_CMD_BCR), &resp);
+ if (err)
+ rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
+ __func__, err);
+ card->ocr = resp;
/* Issue CMD5, arg = ocr. Wait till card is ready */
for (i = 0; i < 100; i++) {
err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND,
- host->ocr_avail,
+ card->ocr,
(MMC_RSP_R4 | MMC_CMD_BCR), &resp);
if (err) {
rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n",
@@ -844,11 +841,11 @@ static int rsi_init_sdio_interface(struct rsi_hw *adapter,
struct sdio_func *pfunction)
{
struct rsi_91x_sdiodev *rsi_91x_dev;
- int status = -ENOMEM;
+ int status;
rsi_91x_dev = kzalloc(sizeof(*rsi_91x_dev), GFP_KERNEL);
if (!rsi_91x_dev)
- return status;
+ return -ENOMEM;
adapter->rsi_dev = rsi_91x_dev;
@@ -890,7 +887,7 @@ static int rsi_init_sdio_interface(struct rsi_hw *adapter,
#ifdef CONFIG_RSI_DEBUGFS
adapter->num_debugfs_entries = MAX_DEBUGFS_ENTRIES;
#endif
- return status;
+ return 0;
fail:
sdio_disable_func(pfunction);
sdio_release_host(pfunction);
@@ -944,7 +941,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter)
put_unaligned_le32(TA_HOLD_THREAD_VALUE, data);
addr = TA_HOLD_THREAD_REG | RSI_SD_REQUEST_MASTER;
status = rsi_sdio_write_register_multiple(adapter, addr,
- (u8 *)&data,
+ (u8 *)data,
RSI_9116_REG_SIZE);
if (status < 0) {
rsi_dbg(ERR_ZONE, "Unable to hold TA threads\n");
@@ -954,7 +951,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter)
put_unaligned_le32(TA_SOFT_RST_CLR, data);
addr = TA_SOFT_RESET_REG | RSI_SD_REQUEST_MASTER;
status = rsi_sdio_write_register_multiple(adapter, addr,
- (u8 *)&data,
+ (u8 *)data,
RSI_9116_REG_SIZE);
if (status < 0) {
rsi_dbg(ERR_ZONE, "Unable to get TA out of reset\n");
@@ -964,7 +961,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter)
put_unaligned_le32(TA_PC_ZERO, data);
addr = TA_TH0_PC_REG | RSI_SD_REQUEST_MASTER;
status = rsi_sdio_write_register_multiple(adapter, addr,
- (u8 *)&data,
+ (u8 *)data,
RSI_9116_REG_SIZE);
if (status < 0) {
rsi_dbg(ERR_ZONE, "Unable to Reset TA PC value\n");
@@ -975,7 +972,7 @@ static int rsi_sdio_ta_reset(struct rsi_hw *adapter)
put_unaligned_le32(TA_RELEASE_THREAD_VALUE, data);
addr = TA_RELEASE_THREAD_REG | RSI_SD_REQUEST_MASTER;
status = rsi_sdio_write_register_multiple(adapter, addr,
- (u8 *)&data,
+ (u8 *)data,
RSI_9116_REG_SIZE);
if (status < 0) {
rsi_dbg(ERR_ZONE, "Unable to release TA threads\n");
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index b74dc8bc9755..547ad538d8b6 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5749,7 +5749,8 @@ static void wlcore_roc_complete_work(struct work_struct *work)
ieee80211_remain_on_channel_expired(wl->hw);
}
-static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
+static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index a25b17932edb..007bf6803293 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1226,7 +1226,6 @@ fail:
static int wl3501_close(struct net_device *dev)
{
struct wl3501_card *this = netdev_priv(dev);
- int rc = -ENODEV;
unsigned long flags;
struct pcmcia_device *link;
link = this->p_dev;
@@ -1241,10 +1240,9 @@ static int wl3501_close(struct net_device *dev)
/* Mask interrupts from the SUTRO */
wl3501_block_interrupt(this);
- rc = 0;
printk(KERN_INFO "%s: WL3501 closed\n", dev->name);
spin_unlock_irqrestore(&this->lock, flags);
- return rc;
+ return 0;
}
/**
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index c9262ffeefe4..0020b2e8c279 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -136,12 +136,12 @@ static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
static u16 frag_get_pending_idx(skb_frag_t *frag)
{
- return (u16)frag->page_offset;
+ return (u16)skb_frag_off(frag);
}
static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
{
- frag->page_offset = pending_idx;
+ skb_frag_off_set(frag, pending_idx);
}
static inline pending_ring_idx_t pending_index(unsigned i)
@@ -1057,7 +1057,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
int j;
skb->truesize += skb->data_len;
for (j = 0; j < i; j++)
- put_page(frags[j].page.p);
+ put_page(skb_frag_page(&frags[j]));
return -ENOMEM;
}
@@ -1069,8 +1069,8 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
BUG();
offset += len;
- frags[i].page.p = page;
- frags[i].page_offset = 0;
+ __skb_frag_set_page(&frags[i], page);
+ skb_frag_off_set(&frags[i], 0);
skb_frag_size_set(&frags[i], len);
}
@@ -1655,9 +1655,6 @@ static int __init netback_init(void)
#ifdef CONFIG_DEBUG_FS
xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
- if (IS_ERR_OR_NULL(xen_netback_dbg_root))
- pr_warn("Init of debugfs returned %ld!\n",
- PTR_ERR(xen_netback_dbg_root));
#endif /* CONFIG_DEBUG_FS */
return 0;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 41034264bd34..f533b7372d59 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -170,50 +170,26 @@ DEFINE_SHOW_ATTRIBUTE(xenvif_ctrl);
static void xenvif_debugfs_addif(struct xenvif *vif)
{
- struct dentry *pfile;
int i;
- if (IS_ERR_OR_NULL(xen_netback_dbg_root))
- return;
-
vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name,
xen_netback_dbg_root);
- if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root)) {
- for (i = 0; i < vif->num_queues; ++i) {
- char filename[sizeof("io_ring_q") + 4];
-
- snprintf(filename, sizeof(filename), "io_ring_q%d", i);
- pfile = debugfs_create_file(filename,
- 0600,
- vif->xenvif_dbg_root,
- &vif->queues[i],
- &xenvif_dbg_io_ring_ops_fops);
- if (IS_ERR_OR_NULL(pfile))
- pr_warn("Creation of io_ring file returned %ld!\n",
- PTR_ERR(pfile));
- }
+ for (i = 0; i < vif->num_queues; ++i) {
+ char filename[sizeof("io_ring_q") + 4];
- if (vif->ctrl_irq) {
- pfile = debugfs_create_file("ctrl",
- 0400,
- vif->xenvif_dbg_root,
- vif,
- &xenvif_ctrl_fops);
- if (IS_ERR_OR_NULL(pfile))
- pr_warn("Creation of ctrl file returned %ld!\n",
- PTR_ERR(pfile));
- }
- } else
- netdev_warn(vif->dev,
- "Creation of vif debugfs dir returned %ld!\n",
- PTR_ERR(vif->xenvif_dbg_root));
+ snprintf(filename, sizeof(filename), "io_ring_q%d", i);
+ debugfs_create_file(filename, 0600, vif->xenvif_dbg_root,
+ &vif->queues[i],
+ &xenvif_dbg_io_ring_ops_fops);
+ }
+
+ if (vif->ctrl_irq)
+ debugfs_create_file("ctrl", 0400, vif->xenvif_dbg_root, vif,
+ &xenvif_ctrl_fops);
}
static void xenvif_debugfs_delif(struct xenvif *vif)
{
- if (IS_ERR_OR_NULL(xen_netback_dbg_root))
- return;
-
debugfs_remove_recursive(vif->xenvif_dbg_root);
vif->xenvif_dbg_root = NULL;
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8d33970a2950..b930d5f95222 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -531,7 +531,7 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
unsigned long size = skb_frag_size(frag);
- unsigned long offset = frag->page_offset;
+ unsigned long offset = skb_frag_off(frag);
/* Skip unused frames from start of page */
offset &= ~PAGE_MASK;
@@ -674,8 +674,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
/* Requests for all the frags. */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- tx = xennet_make_txreqs(queue, tx, skb,
- skb_frag_page(frag), frag->page_offset,
+ tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
+ skb_frag_off(frag),
skb_frag_size(frag));
}
@@ -1040,7 +1040,7 @@ err:
if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
- skb_shinfo(skb)->frags[0].page_offset = rx->offset;
+ skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
skb->data_len = rx->status;
skb->len += rx->status;
diff --git a/drivers/nfc/nxp-nci/Kconfig b/drivers/nfc/nxp-nci/Kconfig
index 12df2c8cc51d..e1f71deab6fc 100644
--- a/drivers/nfc/nxp-nci/Kconfig
+++ b/drivers/nfc/nxp-nci/Kconfig
@@ -2,10 +2,9 @@
config NFC_NXP_NCI
tristate "NXP-NCI NFC driver"
depends on NFC_NCI
- default n
---help---
- Generic core driver for NXP NCI chips such as the NPC100
- or PN7150 families.
+ Generic core driver for NXP NCI chips such as the NPC100 (PN547),
+ NPC300 (PN548) or PN7150 families.
This is a driver based on the NCI NFC kernel layers and
will thus not work with NXP libnfc library.
@@ -23,4 +22,4 @@ config NFC_NXP_NCI_I2C
To compile this driver as a module, choose m here. The module will
be called nxp_nci_i2c.
- Say Y if unsure.
+ Say N if unsure.
diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c
index 8dafc696719f..a0ce95a287c5 100644
--- a/drivers/nfc/nxp-nci/core.c
+++ b/drivers/nfc/nxp-nci/core.c
@@ -11,10 +11,8 @@
*/
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/nfc.h>
-#include <linux/platform_data/nxp-nci.h>
#include <net/nfc/nci_core.h>
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 4aeb3861b409..307bd2afbe05 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -12,8 +12,6 @@
* Copyright (C) 2012 Intel Corporation. All rights reserved.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -21,9 +19,6 @@
#include <linux/module.h>
#include <linux/nfc.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
-#include <linux/of_irq.h>
-#include <linux/platform_data/nxp-nci.h>
#include <asm/unaligned.h>
#include <net/nfc/nfc.h>
@@ -38,8 +33,8 @@ struct nxp_nci_i2c_phy {
struct i2c_client *i2c_dev;
struct nci_dev *ndev;
- unsigned int gpio_en;
- unsigned int gpio_fw;
+ struct gpio_desc *gpiod_en;
+ struct gpio_desc *gpiod_fw;
int hard_fault; /*
* < 0 if hardware error occurred (e.g. i2c err)
@@ -52,8 +47,8 @@ static int nxp_nci_i2c_set_mode(void *phy_id,
{
struct nxp_nci_i2c_phy *phy = (struct nxp_nci_i2c_phy *) phy_id;
- gpio_set_value(phy->gpio_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0);
- gpio_set_value(phy->gpio_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0);
+ gpiod_set_value(phy->gpiod_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0);
+ gpiod_set_value(phy->gpiod_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0);
usleep_range(10000, 15000);
if (mode == NXP_NCI_MODE_COLD)
@@ -250,116 +245,55 @@ exit_irq_none:
return IRQ_NONE;
}
-static int nxp_nci_i2c_parse_devtree(struct i2c_client *client)
-{
- struct nxp_nci_i2c_phy *phy = i2c_get_clientdata(client);
- struct device_node *pp;
- int r;
-
- pp = client->dev.of_node;
- if (!pp)
- return -ENODEV;
-
- r = of_get_named_gpio(pp, "enable-gpios", 0);
- if (r == -EPROBE_DEFER)
- r = of_get_named_gpio(pp, "enable-gpios", 0);
- if (r < 0) {
- nfc_err(&client->dev, "Failed to get EN gpio, error: %d\n", r);
- return r;
- }
- phy->gpio_en = r;
-
- r = of_get_named_gpio(pp, "firmware-gpios", 0);
- if (r == -EPROBE_DEFER)
- r = of_get_named_gpio(pp, "firmware-gpios", 0);
- if (r < 0) {
- nfc_err(&client->dev, "Failed to get FW gpio, error: %d\n", r);
- return r;
- }
- phy->gpio_fw = r;
-
- return 0;
-}
-
-static int nxp_nci_i2c_acpi_config(struct nxp_nci_i2c_phy *phy)
-{
- struct i2c_client *client = phy->i2c_dev;
- struct gpio_desc *gpiod_en, *gpiod_fw;
+static const struct acpi_gpio_params firmware_gpios = { 1, 0, false };
+static const struct acpi_gpio_params enable_gpios = { 2, 0, false };
- gpiod_en = devm_gpiod_get_index(&client->dev, NULL, 2, GPIOD_OUT_LOW);
- gpiod_fw = devm_gpiod_get_index(&client->dev, NULL, 1, GPIOD_OUT_LOW);
-
- if (IS_ERR(gpiod_en) || IS_ERR(gpiod_fw)) {
- nfc_err(&client->dev, "No GPIOs\n");
- return -EINVAL;
- }
-
- phy->gpio_en = desc_to_gpio(gpiod_en);
- phy->gpio_fw = desc_to_gpio(gpiod_fw);
-
- return 0;
-}
+static const struct acpi_gpio_mapping acpi_nxp_nci_gpios[] = {
+ { "enable-gpios", &enable_gpios, 1 },
+ { "firmware-gpios", &firmware_gpios, 1 },
+ { }
+};
static int nxp_nci_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct nxp_nci_i2c_phy *phy;
- struct nxp_nci_nfc_platform_data *pdata;
int r;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
- r = -ENODEV;
- goto probe_exit;
+ return -ENODEV;
}
phy = devm_kzalloc(&client->dev, sizeof(struct nxp_nci_i2c_phy),
GFP_KERNEL);
- if (!phy) {
- r = -ENOMEM;
- goto probe_exit;
- }
+ if (!phy)
+ return -ENOMEM;
phy->i2c_dev = client;
i2c_set_clientdata(client, phy);
- pdata = client->dev.platform_data;
-
- if (!pdata && client->dev.of_node) {
- r = nxp_nci_i2c_parse_devtree(client);
- if (r < 0) {
- nfc_err(&client->dev, "Failed to get DT data\n");
- goto probe_exit;
- }
- } else if (pdata) {
- phy->gpio_en = pdata->gpio_en;
- phy->gpio_fw = pdata->gpio_fw;
- } else if (ACPI_HANDLE(&client->dev)) {
- r = nxp_nci_i2c_acpi_config(phy);
- if (r < 0)
- goto probe_exit;
- goto nci_probe;
- } else {
- nfc_err(&client->dev, "No platform data\n");
- r = -EINVAL;
- goto probe_exit;
- }
+ r = devm_acpi_dev_add_driver_gpios(dev, acpi_nxp_nci_gpios);
+ if (r)
+ return r;
- r = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_en,
- GPIOF_OUT_INIT_LOW, "nxp_nci_en");
- if (r < 0)
- goto probe_exit;
+ phy->gpiod_en = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(phy->gpiod_en)) {
+ nfc_err(dev, "Failed to get EN gpio\n");
+ return PTR_ERR(phy->gpiod_en);
+ }
- r = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_fw,
- GPIOF_OUT_INIT_LOW, "nxp_nci_fw");
- if (r < 0)
- goto probe_exit;
+ phy->gpiod_fw = devm_gpiod_get(dev, "firmware", GPIOD_OUT_LOW);
+ if (IS_ERR(phy->gpiod_fw)) {
+ nfc_err(dev, "Failed to get FW gpio\n");
+ return PTR_ERR(phy->gpiod_fw);
+ }
-nci_probe:
r = nxp_nci_probe(phy, &client->dev, &i2c_phy_ops,
NXP_NCI_I2C_MAX_PAYLOAD, &phy->ndev);
if (r < 0)
- goto probe_exit;
+ return r;
r = request_threaded_irq(client->irq, NULL,
nxp_nci_i2c_irq_thread_fn,
@@ -368,7 +302,6 @@ nci_probe:
if (r < 0)
nfc_err(&client->dev, "Unable to register IRQ handler\n");
-probe_exit:
return r;
}
@@ -390,14 +323,15 @@ MODULE_DEVICE_TABLE(i2c, nxp_nci_i2c_id_table);
static const struct of_device_id of_nxp_nci_i2c_match[] = {
{ .compatible = "nxp,nxp-nci-i2c", },
- {},
+ {}
};
MODULE_DEVICE_TABLE(of, of_nxp_nci_i2c_match);
#ifdef CONFIG_ACPI
-static struct acpi_device_id acpi_id[] = {
+static const struct acpi_device_id acpi_id[] = {
+ { "NXP1001" },
{ "NXP7471" },
- { },
+ { }
};
MODULE_DEVICE_TABLE(acpi, acpi_id);
#endif
@@ -406,7 +340,7 @@ static struct i2c_driver nxp_nci_i2c_driver = {
.driver = {
.name = NXP_NCI_I2C_DRIVER_NAME,
.acpi_match_table = ACPI_PTR(acpi_id),
- .of_match_table = of_match_ptr(of_nxp_nci_i2c_match),
+ .of_match_table = of_nxp_nci_i2c_match,
},
.probe = nxp_nci_i2c_probe,
.id_table = nxp_nci_i2c_id_table,
diff --git a/drivers/nfc/nxp-nci/nxp-nci.h b/drivers/nfc/nxp-nci/nxp-nci.h
index 6fe7c45544bf..ae3fb2735a4e 100644
--- a/drivers/nfc/nxp-nci/nxp-nci.h
+++ b/drivers/nfc/nxp-nci/nxp-nci.h
@@ -14,7 +14,6 @@
#include <linux/completion.h>
#include <linux/firmware.h>
#include <linux/nfc.h>
-#include <linux/platform_data/nxp-nci.h>
#include <net/nfc/nci_core.h>
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 2ab92409210a..c313de96a357 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -182,6 +182,7 @@ config PCI_LABEL
config PCI_HYPERV
tristate "Hyper-V PCI Frontend"
depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
+ select PCI_HYPERV_INTERFACE
help
The PCI device frontend driver allows the kernel to import arbitrary
PCI devices from a PCI backend to support PCI driver domains.
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index fe9f9f13ce11..70e078238899 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -281,5 +281,12 @@ config VMD
To compile this driver as a module, choose M here: the
module will be called vmd.
+config PCI_HYPERV_INTERFACE
+ tristate "Hyper-V PCI Interface"
+ depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
+ help
+ The Hyper-V PCI Interface is a helper driver allows other drivers to
+ have a common interface with the Hyper-V PCI frontend driver.
+
source "drivers/pci/controller/dwc/Kconfig"
endmenu
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index d56a507495c5..a2a22c9d91af 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o
obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
+obj-$(CONFIG_PCI_HYPERV_INTERFACE) += pci-hyperv-intf.o
obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o
obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
diff --git a/drivers/pci/controller/pci-hyperv-intf.c b/drivers/pci/controller/pci-hyperv-intf.c
new file mode 100644
index 000000000000..cc96be450360
--- /dev/null
+++ b/drivers/pci/controller/pci-hyperv-intf.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) Microsoft Corporation.
+ *
+ * Author:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ *
+ * This small module is a helper driver allows other drivers to
+ * have a common interface with the Hyper-V PCI frontend driver.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/hyperv.h>
+
+struct hyperv_pci_block_ops hvpci_block_ops;
+EXPORT_SYMBOL_GPL(hvpci_block_ops);
+
+int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
+ unsigned int block_id, unsigned int *bytes_returned)
+{
+ if (!hvpci_block_ops.read_block)
+ return -EOPNOTSUPP;
+
+ return hvpci_block_ops.read_block(dev, buf, buf_len, block_id,
+ bytes_returned);
+}
+EXPORT_SYMBOL_GPL(hyperv_read_cfg_blk);
+
+int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
+ unsigned int block_id)
+{
+ if (!hvpci_block_ops.write_block)
+ return -EOPNOTSUPP;
+
+ return hvpci_block_ops.write_block(dev, buf, len, block_id);
+}
+EXPORT_SYMBOL_GPL(hyperv_write_cfg_blk);
+
+int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask))
+{
+ if (!hvpci_block_ops.reg_blk_invalidate)
+ return -EOPNOTSUPP;
+
+ return hvpci_block_ops.reg_blk_invalidate(dev, context,
+ block_invalidate);
+}
+EXPORT_SYMBOL_GPL(hyperv_reg_block_invalidate);
+
+static void __exit exit_hv_pci_intf(void)
+{
+}
+
+static int __init init_hv_pci_intf(void)
+{
+ return 0;
+}
+
+module_init(init_hv_pci_intf);
+module_exit(exit_hv_pci_intf);
+
+MODULE_DESCRIPTION("Hyper-V PCI Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 40b625458afa..9c93ac2215b7 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -365,6 +365,39 @@ struct pci_delete_interrupt {
struct tran_int_desc int_desc;
} __packed;
+/*
+ * Note: the VM must pass a valid block id, wslot and bytes_requested.
+ */
+struct pci_read_block {
+ struct pci_message message_type;
+ u32 block_id;
+ union win_slot_encoding wslot;
+ u32 bytes_requested;
+} __packed;
+
+struct pci_read_block_response {
+ struct vmpacket_descriptor hdr;
+ u32 status;
+ u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
+} __packed;
+
+/*
+ * Note: the VM must pass a valid block id, wslot and byte_count.
+ */
+struct pci_write_block {
+ struct pci_message message_type;
+ u32 block_id;
+ union win_slot_encoding wslot;
+ u32 byte_count;
+ u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
+} __packed;
+
+struct pci_dev_inval_block {
+ struct pci_incoming_message incoming;
+ union win_slot_encoding wslot;
+ u64 block_mask;
+} __packed;
+
struct pci_dev_incoming {
struct pci_incoming_message incoming;
union win_slot_encoding wslot;
@@ -499,6 +532,9 @@ struct hv_pci_dev {
struct hv_pcibus_device *hbus;
struct work_struct wrk;
+ void (*block_invalidate)(void *context, u64 block_mask);
+ void *invalidate_context;
+
/*
* What would be observed if one wrote 0xFFFFFFFF to a BAR and then
* read it back, for each of the BAR offsets within config space.
@@ -817,6 +853,253 @@ static struct pci_ops hv_pcifront_ops = {
.write = hv_pcifront_write_config,
};
+/*
+ * Paravirtual backchannel
+ *
+ * Hyper-V SR-IOV provides a backchannel mechanism in software for
+ * communication between a VF driver and a PF driver. These
+ * "configuration blocks" are similar in concept to PCI configuration space,
+ * but instead of doing reads and writes in 32-bit chunks through a very slow
+ * path, packets of up to 128 bytes can be sent or received asynchronously.
+ *
+ * Nearly every SR-IOV device contains just such a communications channel in
+ * hardware, so using this one in software is usually optional. Using the
+ * software channel, however, allows driver implementers to leverage software
+ * tools that fuzz the communications channel looking for vulnerabilities.
+ *
+ * The usage model for these packets puts the responsibility for reading or
+ * writing on the VF driver. The VF driver sends a read or a write packet,
+ * indicating which "block" is being referred to by number.
+ *
+ * If the PF driver wishes to initiate communication, it can "invalidate" one or
+ * more of the first 64 blocks. This invalidation is delivered via a callback
+ * supplied by the VF driver by this driver.
+ *
+ * No protocol is implied, except that supplied by the PF and VF drivers.
+ */
+
+struct hv_read_config_compl {
+ struct hv_pci_compl comp_pkt;
+ void *buf;
+ unsigned int len;
+ unsigned int bytes_returned;
+};
+
+/**
+ * hv_pci_read_config_compl() - Invoked when a response packet
+ * for a read config block operation arrives.
+ * @context: Identifies the read config operation
+ * @resp: The response packet itself
+ * @resp_packet_size: Size in bytes of the response packet
+ */
+static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
+ int resp_packet_size)
+{
+ struct hv_read_config_compl *comp = context;
+ struct pci_read_block_response *read_resp =
+ (struct pci_read_block_response *)resp;
+ unsigned int data_len, hdr_len;
+
+ hdr_len = offsetof(struct pci_read_block_response, bytes);
+ if (resp_packet_size < hdr_len) {
+ comp->comp_pkt.completion_status = -1;
+ goto out;
+ }
+
+ data_len = resp_packet_size - hdr_len;
+ if (data_len > 0 && read_resp->status == 0) {
+ comp->bytes_returned = min(comp->len, data_len);
+ memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
+ } else {
+ comp->bytes_returned = 0;
+ }
+
+ comp->comp_pkt.completion_status = read_resp->status;
+out:
+ complete(&comp->comp_pkt.host_event);
+}
+
+/**
+ * hv_read_config_block() - Sends a read config block request to
+ * the back-end driver running in the Hyper-V parent partition.
+ * @pdev: The PCI driver's representation for this device.
+ * @buf: Buffer into which the config block will be copied.
+ * @len: Size in bytes of buf.
+ * @block_id: Identifies the config block which has been requested.
+ * @bytes_returned: Size which came back from the back-end driver.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int hv_read_config_block(struct pci_dev *pdev, void *buf, unsigned int len,
+ unsigned int block_id, unsigned int *bytes_returned)
+{
+ struct hv_pcibus_device *hbus =
+ container_of(pdev->bus->sysdata, struct hv_pcibus_device,
+ sysdata);
+ struct {
+ struct pci_packet pkt;
+ char buf[sizeof(struct pci_read_block)];
+ } pkt;
+ struct hv_read_config_compl comp_pkt;
+ struct pci_read_block *read_blk;
+ int ret;
+
+ if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
+ return -EINVAL;
+
+ init_completion(&comp_pkt.comp_pkt.host_event);
+ comp_pkt.buf = buf;
+ comp_pkt.len = len;
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.pkt.completion_func = hv_pci_read_config_compl;
+ pkt.pkt.compl_ctxt = &comp_pkt;
+ read_blk = (struct pci_read_block *)&pkt.pkt.message;
+ read_blk->message_type.type = PCI_READ_BLOCK;
+ read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
+ read_blk->block_id = block_id;
+ read_blk->bytes_requested = len;
+
+ ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
+ sizeof(*read_blk), (unsigned long)&pkt.pkt,
+ VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ return ret;
+
+ ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
+ if (ret)
+ return ret;
+
+ if (comp_pkt.comp_pkt.completion_status != 0 ||
+ comp_pkt.bytes_returned == 0) {
+ dev_err(&hbus->hdev->device,
+ "Read Config Block failed: 0x%x, bytes_returned=%d\n",
+ comp_pkt.comp_pkt.completion_status,
+ comp_pkt.bytes_returned);
+ return -EIO;
+ }
+
+ *bytes_returned = comp_pkt.bytes_returned;
+ return 0;
+}
+
+/**
+ * hv_pci_write_config_compl() - Invoked when a response packet for a write
+ * config block operation arrives.
+ * @context: Identifies the write config operation
+ * @resp: The response packet itself
+ * @resp_packet_size: Size in bytes of the response packet
+ */
+static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
+ int resp_packet_size)
+{
+ struct hv_pci_compl *comp_pkt = context;
+
+ comp_pkt->completion_status = resp->status;
+ complete(&comp_pkt->host_event);
+}
+
+/**
+ * hv_write_config_block() - Sends a write config block request to the
+ * back-end driver running in the Hyper-V parent partition.
+ * @pdev: The PCI driver's representation for this device.
+ * @buf: Buffer from which the config block will be copied.
+ * @len: Size in bytes of buf.
+ * @block_id: Identifies the config block which is being written.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int hv_write_config_block(struct pci_dev *pdev, void *buf, unsigned int len,
+ unsigned int block_id)
+{
+ struct hv_pcibus_device *hbus =
+ container_of(pdev->bus->sysdata, struct hv_pcibus_device,
+ sysdata);
+ struct {
+ struct pci_packet pkt;
+ char buf[sizeof(struct pci_write_block)];
+ u32 reserved;
+ } pkt;
+ struct hv_pci_compl comp_pkt;
+ struct pci_write_block *write_blk;
+ u32 pkt_size;
+ int ret;
+
+ if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
+ return -EINVAL;
+
+ init_completion(&comp_pkt.host_event);
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.pkt.completion_func = hv_pci_write_config_compl;
+ pkt.pkt.compl_ctxt = &comp_pkt;
+ write_blk = (struct pci_write_block *)&pkt.pkt.message;
+ write_blk->message_type.type = PCI_WRITE_BLOCK;
+ write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
+ write_blk->block_id = block_id;
+ write_blk->byte_count = len;
+ memcpy(write_blk->bytes, buf, len);
+ pkt_size = offsetof(struct pci_write_block, bytes) + len;
+ /*
+ * This quirk is required on some hosts shipped around 2018, because
+ * these hosts don't check the pkt_size correctly (new hosts have been
+ * fixed since early 2019). The quirk is also safe on very old hosts
+ * and new hosts, because, on them, what really matters is the length
+ * specified in write_blk->byte_count.
+ */
+ pkt_size += sizeof(pkt.reserved);
+
+ ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
+ (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+ if (ret)
+ return ret;
+
+ ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
+ if (ret)
+ return ret;
+
+ if (comp_pkt.completion_status != 0) {
+ dev_err(&hbus->hdev->device,
+ "Write Config Block failed: 0x%x\n",
+ comp_pkt.completion_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * hv_register_block_invalidate() - Invoked when a config block invalidation
+ * arrives from the back-end driver.
+ * @pdev: The PCI driver's representation for this device.
+ * @context: Identifies the device.
+ * @block_invalidate: Identifies all of the blocks being invalidated.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask))
+{
+ struct hv_pcibus_device *hbus =
+ container_of(pdev->bus->sysdata, struct hv_pcibus_device,
+ sysdata);
+ struct hv_pci_dev *hpdev;
+
+ hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
+ if (!hpdev)
+ return -ENODEV;
+
+ hpdev->block_invalidate = block_invalidate;
+ hpdev->invalidate_context = context;
+
+ put_pcichild(hpdev);
+ return 0;
+
+}
+
/* Interrupt management hooks */
static void hv_int_desc_free(struct hv_pci_dev *hpdev,
struct tran_int_desc *int_desc)
@@ -1968,6 +2251,7 @@ static void hv_pci_onchannelcallback(void *context)
struct pci_response *response;
struct pci_incoming_message *new_message;
struct pci_bus_relations *bus_rel;
+ struct pci_dev_inval_block *inval;
struct pci_dev_incoming *dev_message;
struct hv_pci_dev *hpdev;
@@ -2045,6 +2329,21 @@ static void hv_pci_onchannelcallback(void *context)
}
break;
+ case PCI_INVALIDATE_BLOCK:
+
+ inval = (struct pci_dev_inval_block *)buffer;
+ hpdev = get_pcichild_wslot(hbus,
+ inval->wslot.slot);
+ if (hpdev) {
+ if (hpdev->block_invalidate) {
+ hpdev->block_invalidate(
+ hpdev->invalidate_context,
+ inval->block_mask);
+ }
+ put_pcichild(hpdev);
+ }
+ break;
+
default:
dev_warn(&hbus->hdev->device,
"Unimplemented protocol message %x\n",
@@ -2743,10 +3042,19 @@ static struct hv_driver hv_pci_drv = {
static void __exit exit_hv_pci_drv(void)
{
vmbus_driver_unregister(&hv_pci_drv);
+
+ hvpci_block_ops.read_block = NULL;
+ hvpci_block_ops.write_block = NULL;
+ hvpci_block_ops.reg_blk_invalidate = NULL;
}
static int __init init_hv_pci_drv(void)
{
+ /* Initialize PCI block r/w interface */
+ hvpci_block_ops.read_block = hv_read_config_block;
+ hvpci_block_ops.write_block = hv_write_config_block;
+ hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
+
return vmbus_driver_register(&hv_pci_drv);
}
diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c
index 5b6393e3ea27..0dcfdc806f57 100644
--- a/drivers/ptp/ptp_dte.c
+++ b/drivers/ptp/ptp_dte.c
@@ -248,11 +248,8 @@ static int ptp_dte_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ptp_dte->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(ptp_dte->regs)) {
- dev_err(dev,
- "%s: io remap failed\n", __func__);
+ if (IS_ERR(ptp_dte->regs))
return PTR_ERR(ptp_dte->regs);
- }
spin_lock_init(&ptp_dte->lock);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index a06944399865..a58b45df95d7 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -206,8 +206,6 @@ struct qdio_output_q {
struct qdio_outbuf_state *sbal_state;
/* timer to check for more outbound work */
struct timer_list timer;
- /* used SBALs before tasklet schedule */
- int scan_threshold;
};
/*
@@ -295,6 +293,7 @@ struct qdio_irq {
struct qdio_ssqd_desc ssqd_desc;
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
+ unsigned int scan_threshold; /* used SBALs before tasklet schedule */
int perf_stat_enabled;
struct qdr *qdr;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4142c85e77d8..5b63c505a2f7 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -647,8 +647,6 @@ static void qdio_kick_handler(struct qdio_q *q, unsigned int count)
qperf_inc(q, outbound_handler);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
start, count);
- if (q->u.out.use_cq)
- qdio_handle_aobs(q, start, count);
}
q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
@@ -774,8 +772,11 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
count = get_outbound_buffer_frontier(q, start);
- if (count)
+ if (count) {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
+ if (q->u.out.use_cq)
+ qdio_handle_aobs(q, start, count);
+ }
return count;
}
@@ -879,7 +880,7 @@ static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
struct qdio_q *out;
int i;
- if (!pci_out_supported(irq))
+ if (!pci_out_supported(irq) || !irq->scan_threshold)
return;
for_each_output_queue(irq, out, i)
@@ -972,7 +973,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
}
}
- if (!pci_out_supported(irq_ptr))
+ if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
return;
for_each_output_queue(irq_ptr, q, i) {
@@ -1527,6 +1528,7 @@ set:
static int handle_outbound(struct qdio_q *q, unsigned int callflags,
int bufnr, int count)
{
+ const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
unsigned char state = 0;
int used, rc = 0;
@@ -1565,8 +1567,12 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
rc = qdio_kick_outbound_q(q, 0);
}
+ /* Let drivers implement their own completion scanning: */
+ if (!scan_threshold)
+ return rc;
+
/* in case of SIGA errors we must process the error immediately */
- if (used >= q->u.out.scan_threshold || rc)
+ if (used >= scan_threshold || rc)
qdio_tasklet_schedule(q);
else
/* free the SBALs in case of no further traffic */
@@ -1655,6 +1661,44 @@ rescan:
}
EXPORT_SYMBOL(qdio_start_irq);
+static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
+ unsigned int *error)
+{
+ unsigned int start = q->first_to_check;
+ int count;
+
+ count = q->is_input_q ? qdio_inbound_q_moved(q, start) :
+ qdio_outbound_q_moved(q, start);
+ if (count == 0)
+ return 0;
+
+ *bufnr = start;
+ *error = q->qdio_error;
+
+ /* for the next time */
+ q->first_to_check = add_buf(start, count);
+ q->qdio_error = 0;
+
+ return count;
+}
+
+int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
+ unsigned int *bufnr, unsigned int *error)
+{
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+ struct qdio_q *q;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
+
+ if (need_siga_sync(q))
+ qdio_siga_sync_q(q);
+
+ return __qdio_inspect_queue(q, bufnr, error);
+}
+EXPORT_SYMBOL_GPL(qdio_inspect_queue);
+
/**
* qdio_get_next_buffers - process input buffers
* @cdev: associated ccw_device for the qdio subchannel
@@ -1672,13 +1716,10 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
{
struct qdio_q *q;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
- unsigned int start;
- int count;
if (!irq_ptr)
return -ENODEV;
q = irq_ptr->input_qs[nr];
- start = q->first_to_check;
/*
* Cannot rely on automatic sync after interrupt since queues may
@@ -1689,25 +1730,11 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
qdio_check_outbound_pci_queues(irq_ptr);
- count = qdio_inbound_q_moved(q, start);
- if (count == 0)
- return 0;
-
- start = add_buf(start, count);
- q->first_to_check = start;
-
/* Note: upper-layer MUST stop processing immediately here ... */
if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return -EIO;
- *bufnr = q->first_to_kick;
- *error = q->qdio_error;
-
- /* for the next time */
- q->first_to_kick = add_buf(q->first_to_kick, count);
- q->qdio_error = 0;
-
- return count;
+ return __qdio_inspect_queue(q, bufnr, error);
}
EXPORT_SYMBOL(qdio_get_next_buffers);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index d4101cecdc8d..f4ca1d29d61b 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -248,7 +248,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
q->is_input_q = 0;
- q->u.out.scan_threshold = qdio_init->scan_threshold;
setup_storage_lists(q, irq_ptr, output_sbal_array, i);
output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
@@ -474,6 +473,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
irq_ptr->nr_input_qs = init_data->no_input_qs;
irq_ptr->nr_output_qs = init_data->no_output_qs;
irq_ptr->cdev = init_data->cdev;
+ irq_ptr->scan_threshold = init_data->scan_threshold;
ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
setup_queues(irq_ptr, init_data);
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 4a8a5373cb35..3ce99e4db44d 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -307,8 +307,7 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg)
ch->ccw[1].count = ch->trans_skb->len;
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
ch->prof.send_stamp = jiffies;
- rc = ccw_device_start(ch->cdev, &ch->ccw[0],
- (unsigned long)ch, 0xff, 0);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
ch->prof.doios_multi++;
if (rc != 0) {
priv->stats.tx_dropped += i;
@@ -417,8 +416,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
if (ctcm_checkalloc_buffer(ch))
return;
ch->ccw[1].count = ch->max_bufsize;
- rc = ccw_device_start(ch->cdev, &ch->ccw[0],
- (unsigned long)ch, 0xff, 0);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
if (rc != 0)
ctcm_ccw_check_rc(ch, rc, "normal RX");
}
@@ -478,8 +476,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
- rc = ccw_device_start(ch->cdev, &ch->ccw[0],
- (unsigned long)ch, 0xff, 0);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
if (rc != 0) {
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CTC_STATE_SETUPWAIT);
@@ -527,8 +524,7 @@ static void chx_rxidle(fsm_instance *fi, int event, void *arg)
return;
ch->ccw[1].count = ch->max_bufsize;
fsm_newstate(fi, CTC_STATE_RXIDLE);
- rc = ccw_device_start(ch->cdev, &ch->ccw[0],
- (unsigned long)ch, 0xff, 0);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
if (rc != 0) {
fsm_newstate(fi, CTC_STATE_RXINIT);
ctcm_ccw_check_rc(ch, rc, "initial RX");
@@ -571,8 +567,7 @@ static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
/* Such conditional locking is undeterministic in
* static view. => ignore sparse warnings here. */
- rc = ccw_device_start(ch->cdev, &ch->ccw[6],
- (unsigned long)ch, 0xff, 0);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[6], 0, 0xff, 0);
if (event == CTC_EVENT_TIMER) /* see above comments */
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
if (rc != 0) {
@@ -637,7 +632,7 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, CTC_STATE_STARTWAIT);
fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
- rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+ rc = ccw_device_halt(ch->cdev, 0);
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
if (rc != 0) {
if (rc != -EBUSY)
@@ -672,7 +667,7 @@ static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
* static view. => ignore sparse warnings here. */
oldstate = fsm_getstate(fi);
fsm_newstate(fi, CTC_STATE_TERM);
- rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+ rc = ccw_device_halt(ch->cdev, 0);
if (event == CTC_EVENT_STOP)
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
@@ -799,7 +794,7 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
if (!IS_MPC(ch) &&
(CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
- int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+ int rc = ccw_device_halt(ch->cdev, 0);
if (rc != 0)
ctcm_ccw_check_rc(ch, rc,
"HaltIO in chx_setuperr");
@@ -851,7 +846,7 @@ static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
/* Such conditional locking is a known problem for
* sparse because its undeterministic in static view.
* Warnings should be ignored here. */
- rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+ rc = ccw_device_halt(ch->cdev, 0);
if (event == CTC_EVENT_TIMER)
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
if (rc != 0) {
@@ -947,8 +942,8 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
ch2 = priv->channel[CTCM_WRITE];
fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
- ccw_device_halt(ch->cdev, (unsigned long)ch);
- ccw_device_halt(ch2->cdev, (unsigned long)ch2);
+ ccw_device_halt(ch->cdev, 0);
+ ccw_device_halt(ch2->cdev, 0);
}
/**
@@ -1041,8 +1036,7 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
ctcmpc_dumpit((char *)&ch->ccw[3],
sizeof(struct ccw1) * 3);
- rc = ccw_device_start(ch->cdev, &ch->ccw[3],
- (unsigned long)ch, 0xff, 0);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[3], 0, 0xff, 0);
if (event == CTC_EVENT_TIMER)
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
saveflags);
@@ -1361,8 +1355,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
ch->prof.send_stamp = jiffies;
if (do_debug_ccw)
ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
- rc = ccw_device_start(ch->cdev, &ch->ccw[0],
- (unsigned long)ch, 0xff, 0);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
ch->prof.doios_multi++;
if (rc != 0) {
priv->stats.tx_dropped += i;
@@ -1462,8 +1455,7 @@ again:
if (dolock)
spin_lock_irqsave(
get_ccwdev_lock(ch->cdev), saveflags);
- rc = ccw_device_start(ch->cdev, &ch->ccw[0],
- (unsigned long)ch, 0xff, 0);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
if (dolock) /* see remark about conditional locking */
spin_unlock_irqrestore(
get_ccwdev_lock(ch->cdev), saveflags);
@@ -1569,8 +1561,7 @@ void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
if (event == CTC_EVENT_START)
/* see remark about conditional locking */
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
- rc = ccw_device_start(ch->cdev, &ch->ccw[0],
- (unsigned long)ch, 0xff, 0);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0);
if (event == CTC_EVENT_START)
spin_unlock_irqrestore(
get_ccwdev_lock(ch->cdev), saveflags);
@@ -1825,8 +1816,7 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
wch->prof.send_stamp = jiffies;
- rc = ccw_device_start(wch->cdev, &wch->ccw[3],
- (unsigned long) wch, 0xff, 0);
+ rc = ccw_device_start(wch->cdev, &wch->ccw[3], 0, 0xff, 0);
spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
if ((grp->sweep_req_pend_num == 0) &&
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index f63c5c871d3d..2117870ed855 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -569,8 +569,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
ch->prof.send_stamp = jiffies;
- rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
- (unsigned long)ch, 0xff, 0);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], 0, 0xff, 0);
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
if (ccw_idx == 3)
ch->prof.doios_single++;
@@ -833,8 +832,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
ch->prof.send_stamp = jiffies;
- rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
- (unsigned long)ch, 0xff, 0);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], 0, 0xff, 0);
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
if (ccw_idx == 3)
ch->prof.doios_single++;
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 1534420a0243..ab316baa8284 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1523,8 +1523,7 @@ void mpc_action_send_discontact(unsigned long thischan)
unsigned long saveflags = 0;
spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
- rc = ccw_device_start(ch->cdev, &ch->ccw[15],
- (unsigned long)ch, 0xff, 0);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[15], 0, 0xff, 0);
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
if (rc != 0) {
@@ -1797,8 +1796,7 @@ static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side)
}
fsm_addtimer(&ch->timer, 5000 , CTC_EVENT_TIMER, ch);
- rc = ccw_device_start(ch->cdev, &ch->ccw[8],
- (unsigned long)ch, 0xff, 0);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[8], 0, 0xff, 0);
if (gotlock) /* see remark above about conditional locking */
spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 2d9fe7e4ee40..8f08b0a2917c 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -504,7 +504,7 @@ lcs_clear_channel(struct lcs_channel *channel)
LCS_DBF_TEXT(4,trace,"clearch");
LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- rc = ccw_device_clear(channel->ccwdev, (addr_t) channel);
+ rc = ccw_device_clear(channel->ccwdev, 0);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
LCS_DBF_TEXT_(4, trace, "ecsc%s",
@@ -532,7 +532,7 @@ lcs_stop_channel(struct lcs_channel *channel)
LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
channel->state = LCS_CH_STATE_INIT;
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
- rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
+ rc = ccw_device_halt(channel->ccwdev, 0);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
LCS_DBF_TEXT_(4, trace, "ehsc%s",
@@ -1427,7 +1427,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
channel->state = LCS_CH_STATE_SUSPENDED;
if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
if (irb->scsw.cmd.cc != 0) {
- ccw_device_halt(channel->ccwdev, (addr_t) channel);
+ ccw_device_halt(channel->ccwdev, 0);
return;
}
/* The channel has been stopped by halt_IO. */
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 28db887d38ed..e4b55f9aa062 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -22,6 +22,7 @@
#include <linux/hashtable.h>
#include <linux/ip.h>
#include <linux/refcount.h>
+#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
@@ -30,6 +31,7 @@
#include <net/ipv6.h>
#include <net/if_inet6.h>
#include <net/addrconf.h>
+#include <net/sch_generic.h>
#include <net/tcp.h>
#include <asm/debug.h>
@@ -376,6 +378,28 @@ enum qeth_header_ids {
#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
#define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/
+static inline bool qeth_l2_same_vlan(struct qeth_hdr_layer2 *h1,
+ struct qeth_hdr_layer2 *h2)
+{
+ return !((h1->flags[2] ^ h2->flags[2]) & QETH_LAYER2_FLAG_VLAN) &&
+ h1->vlan_id == h2->vlan_id;
+}
+
+static inline bool qeth_l3_iqd_same_vlan(struct qeth_hdr_layer3 *h1,
+ struct qeth_hdr_layer3 *h2)
+{
+ return !((h1->ext_flags ^ h2->ext_flags) & QETH_HDR_EXT_VLAN_FRAME) &&
+ h1->vlan_id == h2->vlan_id;
+}
+
+static inline bool qeth_l3_same_next_hop(struct qeth_hdr_layer3 *h1,
+ struct qeth_hdr_layer3 *h2)
+{
+ return !((h1->flags ^ h2->flags) & QETH_HDR_IPV6) &&
+ ipv6_addr_equal(&h1->next_hop.ipv6_addr,
+ &h2->next_hop.ipv6_addr);
+}
+
enum qeth_qdio_info_states {
QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED,
@@ -424,6 +448,7 @@ struct qeth_qdio_out_buffer {
struct qdio_buffer *buffer;
atomic_t state;
int next_element_to_fill;
+ unsigned int bytes;
struct sk_buff_head skb_list;
int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
@@ -473,6 +498,8 @@ struct qeth_out_q_stats {
u64 tso_bytes;
u64 packing_mode_switch;
u64 stopped;
+ u64 completion_yield;
+ u64 completion_timer;
/* rtnl_link_stats64 */
u64 tx_packets;
@@ -481,6 +508,8 @@ struct qeth_out_q_stats {
u64 tx_dropped;
};
+#define QETH_TX_TIMER_USECS 500
+
struct qeth_qdio_out_q {
struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
@@ -499,13 +528,36 @@ struct qeth_qdio_out_q {
atomic_t used_buffers;
/* indicates whether PCI flag must be set (or if one is outstanding) */
atomic_t set_pci_flags_count;
+ struct napi_struct napi;
+ struct timer_list timer;
+ struct qeth_hdr *prev_hdr;
+ u8 bulk_start;
};
+#define qeth_for_each_output_queue(card, q, i) \
+ for (i = 0; i < card->qdio.no_out_queues && \
+ (q = card->qdio.out_qs[i]); i++)
+
+#define qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi)
+
+static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue)
+{
+ if (timer_pending(&queue->timer))
+ return;
+ mod_timer(&queue->timer, usecs_to_jiffies(QETH_TX_TIMER_USECS) +
+ jiffies);
+}
+
static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
{
return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q;
}
+static inline bool qeth_out_queue_is_empty(struct qeth_qdio_out_q *queue)
+{
+ return atomic_read(&queue->used_buffers) == 0;
+}
+
struct qeth_qdio_info {
atomic_t state;
/* input */
@@ -572,15 +624,26 @@ struct qeth_channel {
atomic_t irq_pending;
};
+struct qeth_reply {
+ int (*callback)(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data);
+ void *param;
+};
+
struct qeth_cmd_buffer {
+ struct list_head list;
+ struct completion done;
+ spinlock_t lock;
unsigned int length;
refcount_t ref_count;
struct qeth_channel *channel;
- struct qeth_reply *reply;
+ struct qeth_reply reply;
long timeout;
unsigned char *data;
void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob);
- void (*callback)(struct qeth_card *card, struct qeth_cmd_buffer *iob);
+ void (*callback)(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+ unsigned int data_length);
+ int rc;
};
static inline void qeth_get_cmd(struct qeth_cmd_buffer *iob)
@@ -626,19 +689,6 @@ struct qeth_seqno {
__u16 ipa;
};
-struct qeth_reply {
- struct list_head list;
- struct completion received;
- spinlock_t lock;
- int (*callback)(struct qeth_card *, struct qeth_reply *,
- unsigned long);
- u32 seqno;
- unsigned long offset;
- int rc;
- void *param;
- refcount_t refcnt;
-};
-
struct qeth_card_blkt {
int time_total;
int inter_packet;
@@ -651,10 +701,11 @@ struct qeth_card_blkt {
struct qeth_card_info {
unsigned short unit_addr2;
unsigned short cula;
- unsigned short chpid;
+ u8 chpid;
__u16 func_level;
char mcl_level[QETH_MCL_LENGTH + 1];
u8 open_when_online:1;
+ u8 promisc_mode:1;
u8 use_v1_blkt:1;
u8 is_vm_nic:1;
int mac_bits;
@@ -664,7 +715,6 @@ struct qeth_card_info {
int unique_id;
bool layer_enforced;
struct qeth_card_blkt blkt;
- enum qeth_ipa_promisc_modes promisc_mode;
__u32 diagass_support;
__u32 hwtrap;
};
@@ -994,6 +1044,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
enum qeth_diags_cmds sub_cmd,
unsigned int data_length);
+void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason);
void qeth_put_cmd(struct qeth_cmd_buffer *iob);
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
@@ -1005,10 +1056,9 @@ void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
void qeth_clear_working_pool_list(struct qeth_card *);
void qeth_drain_output_queues(struct qeth_card *card);
-void qeth_setadp_promisc_mode(struct qeth_card *);
+void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *);
-void qeth_notify_reply(struct qeth_reply *reply, int reason);
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
u16 cmd_length);
int qeth_query_switch_attributes(struct qeth_card *card,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 6502b148541e..a7868c8133ee 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -63,14 +63,16 @@ static struct device *qeth_core_root_dev;
static struct lock_class_key qdio_out_skb_queue_key;
static void qeth_issue_next_read_cb(struct qeth_card *card,
- struct qeth_cmd_buffer *iob);
+ struct qeth_cmd_buffer *iob,
+ unsigned int data_length);
static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *);
static void qeth_free_qdio_queues(struct qeth_card *card);
static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum iucv_tx_notify notification);
-static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
+static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
+ int budget);
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
static void qeth_close_dev_handler(struct work_struct *work)
@@ -410,7 +412,7 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
/* release here to avoid interleaving between
outbound tasklet and inbound tasklet
regarding notifications and lifecycle */
- qeth_release_skbs(c);
+ qeth_tx_complete_buf(c, forced_cleanup, 0);
c = f->next_pending;
WARN_ON_ONCE(head->next_pending != f);
@@ -536,50 +538,28 @@ static int qeth_issue_next_read(struct qeth_card *card)
return ret;
}
-static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
-{
- struct qeth_reply *reply;
-
- reply = kzalloc(sizeof(*reply), GFP_KERNEL);
- if (reply) {
- refcount_set(&reply->refcnt, 1);
- init_completion(&reply->received);
- spin_lock_init(&reply->lock);
- }
- return reply;
-}
-
-static void qeth_get_reply(struct qeth_reply *reply)
-{
- refcount_inc(&reply->refcnt);
-}
-
-static void qeth_put_reply(struct qeth_reply *reply)
-{
- if (refcount_dec_and_test(&reply->refcnt))
- kfree(reply);
-}
-
-static void qeth_enqueue_reply(struct qeth_card *card, struct qeth_reply *reply)
+static void qeth_enqueue_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
{
spin_lock_irq(&card->lock);
- list_add_tail(&reply->list, &card->cmd_waiter_list);
+ list_add_tail(&iob->list, &card->cmd_waiter_list);
spin_unlock_irq(&card->lock);
}
-static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply)
+static void qeth_dequeue_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
{
spin_lock_irq(&card->lock);
- list_del(&reply->list);
+ list_del(&iob->list);
spin_unlock_irq(&card->lock);
}
-void qeth_notify_reply(struct qeth_reply *reply, int reason)
+void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
{
- reply->rc = reason;
- complete(&reply->received);
+ iob->rc = reason;
+ complete(&iob->done);
}
-EXPORT_SYMBOL_GPL(qeth_notify_reply);
+EXPORT_SYMBOL_GPL(qeth_notify_cmd);
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
struct qeth_card *card)
@@ -657,14 +637,14 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
void qeth_clear_ipacmd_list(struct qeth_card *card)
{
- struct qeth_reply *reply;
+ struct qeth_cmd_buffer *iob;
unsigned long flags;
QETH_CARD_TEXT(card, 4, "clipalst");
spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(reply, &card->cmd_waiter_list, list)
- qeth_notify_reply(reply, -EIO);
+ list_for_each_entry(iob, &card->cmd_waiter_list, list)
+ qeth_notify_cmd(iob, -EIO);
spin_unlock_irqrestore(&card->lock, flags);
}
EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
@@ -693,8 +673,6 @@ static int qeth_check_idx_response(struct qeth_card *card,
void qeth_put_cmd(struct qeth_cmd_buffer *iob)
{
if (refcount_dec_and_test(&iob->ref_count)) {
- if (iob->reply)
- qeth_put_reply(iob->reply);
kfree(iob->data);
kfree(iob);
}
@@ -702,17 +680,15 @@ void qeth_put_cmd(struct qeth_cmd_buffer *iob)
EXPORT_SYMBOL_GPL(qeth_put_cmd);
static void qeth_release_buffer_cb(struct qeth_card *card,
- struct qeth_cmd_buffer *iob)
+ struct qeth_cmd_buffer *iob,
+ unsigned int data_length)
{
qeth_put_cmd(iob);
}
static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
{
- struct qeth_reply *reply = iob->reply;
-
- if (reply)
- qeth_notify_reply(reply, rc);
+ qeth_notify_cmd(iob, rc);
qeth_put_cmd(iob);
}
@@ -736,6 +712,9 @@ struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
return NULL;
}
+ init_completion(&iob->done);
+ spin_lock_init(&iob->lock);
+ INIT_LIST_HEAD(&iob->list);
refcount_set(&iob->ref_count, 1);
iob->channel = channel;
iob->timeout = timeout;
@@ -745,11 +724,13 @@ struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
static void qeth_issue_next_read_cb(struct qeth_card *card,
- struct qeth_cmd_buffer *iob)
+ struct qeth_cmd_buffer *iob,
+ unsigned int data_length)
{
+ struct qeth_cmd_buffer *request = NULL;
struct qeth_ipa_cmd *cmd = NULL;
struct qeth_reply *reply = NULL;
- struct qeth_reply *r;
+ struct qeth_cmd_buffer *tmp;
unsigned long flags;
int rc = 0;
@@ -784,44 +765,39 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
/* match against pending cmd requests */
spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(r, &card->cmd_waiter_list, list) {
- if ((r->seqno == QETH_IDX_COMMAND_SEQNO) ||
- (cmd && (r->seqno == cmd->hdr.seqno))) {
- reply = r;
+ list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
+ if (!IS_IPA(tmp->data) ||
+ __ipa_cmd(tmp)->hdr.seqno == cmd->hdr.seqno) {
+ request = tmp;
/* take the object outside the lock */
- qeth_get_reply(reply);
+ qeth_get_cmd(request);
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
- if (!reply)
+ if (!request)
goto out;
+ reply = &request->reply;
if (!reply->callback) {
rc = 0;
goto no_callback;
}
- spin_lock_irqsave(&reply->lock, flags);
- if (reply->rc) {
+ spin_lock_irqsave(&request->lock, flags);
+ if (request->rc)
/* Bail out when the requestor has already left: */
- rc = reply->rc;
- } else {
- if (cmd) {
- reply->offset = (u16)((char *)cmd - (char *)iob->data);
- rc = reply->callback(card, reply, (unsigned long)cmd);
- } else {
- rc = reply->callback(card, reply, (unsigned long)iob);
- }
- }
- spin_unlock_irqrestore(&reply->lock, flags);
+ rc = request->rc;
+ else
+ rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
+ (unsigned long)iob);
+ spin_unlock_irqrestore(&request->lock, flags);
no_callback:
if (rc <= 0)
- qeth_notify_reply(reply, rc);
- qeth_put_reply(reply);
-
+ qeth_notify_cmd(request, rc);
+ qeth_put_cmd(request);
out:
memcpy(&card->seqno.pdu_hdr_ack,
QETH_PDU_HEADER_SEQ_NO(iob->data),
@@ -1072,8 +1048,16 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
}
}
- if (iob && iob->callback)
- iob->callback(card, iob);
+ if (iob) {
+ /* sanity check: */
+ if (irb->scsw.cmd.count > iob->length) {
+ qeth_cancel_cmd(iob, -EIO);
+ goto out;
+ }
+ if (iob->callback)
+ iob->callback(card, iob,
+ iob->length - irb->scsw.cmd.count);
+ }
out:
wake_up(&card->wait_q);
@@ -1094,22 +1078,52 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
}
}
-static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
+static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
+ int budget)
{
+ struct qeth_qdio_out_q *queue = buf->q;
struct sk_buff *skb;
/* release may never happen from within CQ tasklet scope */
WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
- qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
+ qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
- while ((skb = __skb_dequeue(&buf->skb_list)) != NULL)
- consume_skb(skb);
+ /* Empty buffer? */
+ if (buf->next_element_to_fill == 0)
+ return;
+
+ QETH_TXQ_STAT_INC(queue, bufs);
+ QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
+ while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
+ unsigned int bytes = qdisc_pkt_len(skb);
+ bool is_tso = skb_is_gso(skb);
+ unsigned int packets;
+
+ packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
+ if (error) {
+ QETH_TXQ_STAT_ADD(queue, tx_errors, packets);
+ } else {
+ QETH_TXQ_STAT_ADD(queue, tx_packets, packets);
+ QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
+ if (skb_is_nonlinear(skb))
+ QETH_TXQ_STAT_INC(queue, skbs_sg);
+ if (is_tso) {
+ QETH_TXQ_STAT_INC(queue, skbs_tso);
+ QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
+ }
+ }
+
+ napi_consume_skb(skb, budget);
+ }
}
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buf)
+ struct qeth_qdio_out_buffer *buf,
+ bool error, int budget)
{
int i;
@@ -1117,7 +1131,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
atomic_dec(&queue->set_pci_flags_count);
- qeth_release_skbs(buf);
+ qeth_tx_complete_buf(buf, error, budget);
for (i = 0; i < queue->max_elements; ++i) {
if (buf->buffer->element[i].addr && buf->is_header[i])
@@ -1128,6 +1142,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
buf->next_element_to_fill = 0;
+ buf->bytes = 0;
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
}
@@ -1139,7 +1154,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
if (!q->bufs[j])
continue;
qeth_cleanup_handled_pending(q, j, 1);
- qeth_clear_output_buffer(q, q->bufs[j]);
+ qeth_clear_output_buffer(q, q->bufs[j], true, 0);
if (free) {
kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
q->bufs[j] = NULL;
@@ -1652,7 +1667,6 @@ static void qeth_mpc_finalize_cmd(struct qeth_card *card,
memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
&card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
- iob->reply->seqno = QETH_IDX_COMMAND_SEQNO;
iob->callback = qeth_release_buffer_cb;
}
@@ -1703,29 +1717,19 @@ static int qeth_send_control_data(struct qeth_card *card,
void *reply_param)
{
struct qeth_channel *channel = iob->channel;
+ struct qeth_reply *reply = &iob->reply;
long timeout = iob->timeout;
int rc;
- struct qeth_reply *reply = NULL;
QETH_CARD_TEXT(card, 2, "sendctl");
- reply = qeth_alloc_reply(card);
- if (!reply) {
- qeth_put_cmd(iob);
- return -ENOMEM;
- }
reply->callback = reply_cb;
reply->param = reply_param;
- /* pairs with qeth_put_cmd(): */
- qeth_get_reply(reply);
- iob->reply = reply;
-
timeout = wait_event_interruptible_timeout(card->wait_q,
qeth_trylock_channel(channel),
timeout);
if (timeout <= 0) {
- qeth_put_reply(reply);
qeth_put_cmd(iob);
return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
}
@@ -1734,7 +1738,10 @@ static int qeth_send_control_data(struct qeth_card *card,
iob->finalize(card, iob);
QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
- qeth_enqueue_reply(card, reply);
+ qeth_enqueue_cmd(card, iob);
+
+ /* This pairs with iob->callback, and keeps the iob alive after IO: */
+ qeth_get_cmd(iob);
QETH_CARD_TEXT(card, 6, "noirqpnd");
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
@@ -1745,51 +1752,74 @@ static int qeth_send_control_data(struct qeth_card *card,
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
CARD_DEVID(card), rc);
QETH_CARD_TEXT_(card, 2, " err%d", rc);
- qeth_dequeue_reply(card, reply);
- qeth_put_reply(reply);
+ qeth_dequeue_cmd(card, iob);
qeth_put_cmd(iob);
atomic_set(&channel->irq_pending, 0);
wake_up(&card->wait_q);
- return rc;
+ goto out;
}
- timeout = wait_for_completion_interruptible_timeout(&reply->received,
+ timeout = wait_for_completion_interruptible_timeout(&iob->done,
timeout);
if (timeout <= 0)
rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
- qeth_dequeue_reply(card, reply);
+ qeth_dequeue_cmd(card, iob);
if (reply_cb) {
/* Wait until the callback for a late reply has completed: */
- spin_lock_irq(&reply->lock);
+ spin_lock_irq(&iob->lock);
if (rc)
/* Zap any callback that's still pending: */
- reply->rc = rc;
- spin_unlock_irq(&reply->lock);
+ iob->rc = rc;
+ spin_unlock_irq(&iob->lock);
}
if (!rc)
- rc = reply->rc;
- qeth_put_reply(reply);
+ rc = iob->rc;
+
+out:
+ qeth_put_cmd(iob);
return rc;
}
+struct qeth_node_desc {
+ struct node_descriptor nd1;
+ struct node_descriptor nd2;
+ struct node_descriptor nd3;
+};
+
static void qeth_read_conf_data_cb(struct qeth_card *card,
- struct qeth_cmd_buffer *iob)
+ struct qeth_cmd_buffer *iob,
+ unsigned int data_length)
{
- unsigned char *prcd = iob->data;
+ struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
+ int rc = 0;
+ u8 *tag;
QETH_CARD_TEXT(card, 2, "cfgunit");
- card->info.chpid = prcd[30];
- card->info.unit_addr2 = prcd[31];
- card->info.cula = prcd[63];
- card->info.is_vm_nic = ((prcd[0x10] == _ascebc['V']) &&
- (prcd[0x11] == _ascebc['M']));
- card->info.use_v1_blkt = prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
- prcd[76] >= 0xF1 && prcd[76] <= 0xF4;
-
- qeth_notify_reply(iob->reply, 0);
+
+ if (data_length < sizeof(*nd)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
+ nd->nd1.plant[1] == _ascebc['M'];
+ tag = (u8 *)&nd->nd1.tag;
+ card->info.chpid = tag[0];
+ card->info.unit_addr2 = tag[1];
+
+ tag = (u8 *)&nd->nd2.tag;
+ card->info.cula = tag[1];
+
+ card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
+ nd->nd3.model[1] == 0xF0 &&
+ nd->nd3.model[2] >= 0xF1 &&
+ nd->nd3.model[2] <= 0xF4;
+
+out:
+ qeth_notify_cmd(iob, rc);
qeth_put_cmd(iob);
}
@@ -1803,6 +1833,8 @@ static int qeth_read_conf_data(struct qeth_card *card)
ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
if (!ciw || ciw->cmd == 0)
return -EOPNOTSUPP;
+ if (ciw->count < sizeof(struct qeth_node_desc))
+ return -EINVAL;
iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
if (!iob)
@@ -1850,7 +1882,8 @@ static int qeth_idx_check_activate_response(struct qeth_card *card,
}
static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
- struct qeth_cmd_buffer *iob)
+ struct qeth_cmd_buffer *iob,
+ unsigned int data_length)
{
struct qeth_channel *channel = iob->channel;
u16 peer_level;
@@ -1878,12 +1911,13 @@ static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
out:
- qeth_notify_reply(iob->reply, rc);
+ qeth_notify_cmd(iob, rc);
qeth_put_cmd(iob);
}
static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
- struct qeth_cmd_buffer *iob)
+ struct qeth_cmd_buffer *iob,
+ unsigned int data_length)
{
struct qeth_channel *channel = iob->channel;
u16 peer_level;
@@ -1905,7 +1939,7 @@ static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
}
out:
- qeth_notify_reply(iob->reply, rc);
+ qeth_notify_cmd(iob, rc);
qeth_put_cmd(iob);
}
@@ -2253,6 +2287,14 @@ static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
return q;
}
+static void qeth_tx_completion_timer(struct timer_list *timer)
+{
+ struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
+
+ napi_schedule(&queue->napi);
+ QETH_TXQ_STAT_INC(queue, completion_timer);
+}
+
static int qeth_alloc_qdio_queues(struct qeth_card *card)
{
int i, j;
@@ -2274,17 +2316,22 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
/* outbound */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
- card->qdio.out_qs[i] = qeth_alloc_output_queue();
- if (!card->qdio.out_qs[i])
+ struct qeth_qdio_out_q *queue;
+
+ queue = qeth_alloc_output_queue();
+ if (!queue)
goto out_freeoutq;
QETH_CARD_TEXT_(card, 2, "outq %i", i);
- QETH_CARD_HEX(card, 2, &card->qdio.out_qs[i], sizeof(void *));
- card->qdio.out_qs[i]->card = card;
- card->qdio.out_qs[i]->queue_no = i;
+ QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
+ card->qdio.out_qs[i] = queue;
+ queue->card = card;
+ queue->queue_no = i;
+ timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
+
/* give outbound qeth_qdio_buffers their qdio_buffers */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
- WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
- if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
+ WARN_ON(queue->bufs[j]);
+ if (qeth_init_qdio_out_buf(queue, j))
goto out_freeoutqbufs;
}
}
@@ -2624,9 +2671,12 @@ int qeth_init_qdio_queues(struct qeth_card *card)
queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
queue->next_buf_to_fill = 0;
queue->do_pack = 0;
+ queue->prev_hdr = NULL;
+ queue->bulk_start = 0;
atomic_set(&queue->used_buffers, 0);
atomic_set(&queue->set_pci_flags_count, 0);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
}
return 0;
}
@@ -2638,8 +2688,7 @@ static void qeth_ipa_finalize_cmd(struct qeth_card *card,
qeth_mpc_finalize_cmd(card, iob);
/* override with IPA-specific values: */
- __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa;
- iob->reply->seqno = card->seqno.ipa++;
+ __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
}
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
@@ -3196,6 +3245,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
int count)
{
+ struct qeth_card *card = queue->card;
struct qeth_qdio_out_buffer *buf;
int rc;
int i;
@@ -3239,14 +3289,17 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
}
}
- QETH_TXQ_STAT_ADD(queue, bufs, count);
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT;
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
+
+ /* Fake the TX completion interrupt: */
+ if (IS_IQD(card))
+ napi_schedule(&queue->napi);
+
if (rc) {
- QETH_TXQ_STAT_ADD(queue, tx_errors, count);
/* ignore temporary SIGA errors without busy condition */
if (rc == -ENOBUFS)
return;
@@ -3263,6 +3316,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
}
}
+static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
+{
+ qeth_flush_buffers(queue, queue->bulk_start, 1);
+
+ queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1);
+ queue->prev_hdr = NULL;
+}
+
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
{
int index;
@@ -3424,48 +3485,12 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = queue->bufs[bidx];
qeth_handle_send_error(card, buffer, qdio_error);
-
- if (queue->bufstates &&
- (queue->bufstates[bidx].flags &
- QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
- WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
-
- if (atomic_cmpxchg(&buffer->state,
- QETH_QDIO_BUF_PRIMED,
- QETH_QDIO_BUF_PENDING) ==
- QETH_QDIO_BUF_PRIMED) {
- qeth_notify_skbs(queue, buffer,
- TX_NOTIFY_PENDING);
- }
- QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
-
- /* prepare the queue slot for re-use: */
- qeth_scrub_qdio_buffer(buffer->buffer,
- queue->max_elements);
- if (qeth_init_qdio_out_buf(queue, bidx)) {
- QETH_CARD_TEXT(card, 2, "outofbuf");
- qeth_schedule_recovery(card);
- }
- } else {
- if (card->options.cq == QETH_CQ_ENABLED) {
- enum iucv_tx_notify n;
-
- n = qeth_compute_cq_notification(
- buffer->buffer->element[15].sflags, 0);
- qeth_notify_skbs(queue, buffer, n);
- }
-
- qeth_clear_output_buffer(queue, buffer);
- }
- qeth_cleanup_handled_pending(queue, bidx, 0);
+ qeth_clear_output_buffer(queue, buffer, qdio_error, 0);
}
+
atomic_sub(count, &queue->used_buffers);
- /* check if we need to do something on this outbound queue */
- if (!IS_IQD(card))
- qeth_check_outbound_queue(queue);
+ qeth_check_outbound_queue(queue);
- if (IS_IQD(card))
- __queue = qeth_iqd_translate_txq(dev, __queue);
txq = netdev_get_tx_queue(dev, __queue);
/* xmit may have observed the full-condition, but not yet stopped the
* txq. In which case the code below won't trigger. So before returning,
@@ -3535,7 +3560,7 @@ static int qeth_get_elements_for_frags(struct sk_buff *skb)
int cnt, elements = 0;
for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt];
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
elements += qeth_get_elements_for_range(
(addr_t)skb_frag_address(frag),
@@ -3654,9 +3679,32 @@ check_layout:
return 0;
}
-static void __qeth_fill_buffer(struct sk_buff *skb,
- struct qeth_qdio_out_buffer *buf,
- bool is_first_elem, unsigned int offset)
+static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buffer,
+ struct sk_buff *curr_skb,
+ struct qeth_hdr *curr_hdr)
+{
+ struct qeth_hdr *prev_hdr = queue->prev_hdr;
+
+ if (!prev_hdr)
+ return true;
+
+ /* All packets must have the same target: */
+ if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
+ struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
+
+ return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
+ eth_hdr(curr_skb)->h_dest) &&
+ qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
+ }
+
+ return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
+ qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
+}
+
+static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
+ struct qeth_qdio_out_buffer *buf,
+ bool is_first_elem, unsigned int offset)
{
struct qdio_buffer *buffer = buf->buffer;
int element = buf->next_element_to_fill;
@@ -3713,24 +3761,21 @@ static void __qeth_fill_buffer(struct sk_buff *skb,
if (buffer->element[element - 1].eflags)
buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
buf->next_element_to_fill = element;
+ return element;
}
/**
* qeth_fill_buffer() - map skb into an output buffer
- * @queue: QDIO queue to submit the buffer on
* @buf: buffer to transport the skb
* @skb: skb to map into the buffer
* @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
* from qeth_core_header_cache.
* @offset: when mapping the skb, start at skb->data + offset
* @hd_len: if > 0, build a dedicated header element of this size
- * flush: Prepare the buffer to be flushed, regardless of its fill level.
*/
-static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buf,
- struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int offset, unsigned int hd_len,
- bool flush)
+static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
+ struct sk_buff *skb, struct qeth_hdr *hdr,
+ unsigned int offset, unsigned int hd_len)
{
struct qdio_buffer *buffer = buf->buffer;
bool is_first_elem = true;
@@ -3750,35 +3795,22 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
buf->next_element_to_fill++;
}
- __qeth_fill_buffer(skb, buf, is_first_elem, offset);
-
- if (!queue->do_pack) {
- QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
- } else {
- QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
-
- QETH_TXQ_STAT_INC(queue, skbs_pack);
- /* If the buffer still has free elements, keep using it. */
- if (!flush &&
- buf->next_element_to_fill < queue->max_elements)
- return 0;
- }
-
- /* flush out the buffer */
- atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
- queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
- QDIO_MAX_BUFFERS_PER_Q;
- return 1;
+ return __qeth_fill_buffer(skb, buf, is_first_elem, offset);
}
-static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
- struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int offset, unsigned int hd_len)
+static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
+ struct sk_buff *skb, unsigned int elements,
+ struct qeth_hdr *hdr, unsigned int offset,
+ unsigned int hd_len)
{
- int index = queue->next_buf_to_fill;
- struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
+ struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
+ unsigned int bytes = qdisc_pkt_len(skb);
+ unsigned int next_element;
struct netdev_queue *txq;
bool stopped = false;
+ bool flush;
+
+ txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
/* Just a sanity check, the wake/stop logic should ensure that we always
* get a free buffer.
@@ -3786,9 +3818,19 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
- txq = netdev_get_tx_queue(queue->card->dev, skb_get_queue_mapping(skb));
+ if ((buffer->next_element_to_fill + elements > queue->max_elements) ||
+ !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) {
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+ qeth_flush_queue(queue);
+ buffer = queue->bufs[queue->bulk_start];
+
+ /* Sanity-check again: */
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
+ return -EBUSY;
+ }
- if (atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
+ if (buffer->next_element_to_fill == 0 &&
+ atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
/* If a TX completion happens right _here_ and misses to wake
* the txq, then our re-check below will catch the race.
*/
@@ -3797,8 +3839,17 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
stopped = true;
}
- qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped);
- qeth_flush_buffers(queue, index, 1);
+ next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
+ buffer->bytes += bytes;
+ queue->prev_hdr = hdr;
+
+ flush = __netdev_tx_sent_queue(txq, bytes,
+ !stopped && netdev_xmit_more());
+
+ if (flush || next_element >= queue->max_elements) {
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+ qeth_flush_queue(queue);
+ }
if (stopped && !qeth_out_queue_is_full(queue))
netif_tx_start_queue(txq);
@@ -3811,6 +3862,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
int elements_needed)
{
struct qeth_qdio_out_buffer *buffer;
+ unsigned int next_element;
struct netdev_queue *txq;
bool stopped = false;
int start_index;
@@ -3873,8 +3925,17 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
stopped = true;
}
- flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len,
- stopped);
+ next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
+
+ if (queue->do_pack)
+ QETH_TXQ_STAT_INC(queue, skbs_pack);
+ if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
+ flush_count++;
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+ queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
+ QDIO_MAX_BUFFERS_PER_Q;
+ }
+
if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
@@ -3941,7 +4002,6 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
unsigned int hd_len = 0;
unsigned int elements;
int push_len, rc;
- bool is_sg;
if (is_tso) {
hw_hdr_len = sizeof(struct qeth_hdr_tso);
@@ -3970,10 +4030,9 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
frame_len - proto_len, skb, proto_len);
- is_sg = skb_is_nonlinear(skb);
if (IS_IQD(card)) {
- rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
- hd_len);
+ rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
+ hd_len);
} else {
/* TODO: drop skb_orphan() once TX completion is fast enough */
skb_orphan(skb);
@@ -3981,18 +4040,9 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
hd_len, elements);
}
- if (!rc) {
- QETH_TXQ_STAT_ADD(queue, buf_elements, elements);
- if (is_sg)
- QETH_TXQ_STAT_INC(queue, skbs_sg);
- if (is_tso) {
- QETH_TXQ_STAT_INC(queue, skbs_tso);
- QETH_TXQ_STAT_ADD(queue, tso_bytes, frame_len);
- }
- } else {
- if (!push_len)
- kmem_cache_free(qeth_core_header_cache, hdr);
- }
+ if (rc && !push_len)
+ kmem_cache_free(qeth_core_header_cache, hdr);
+
return rc;
}
EXPORT_SYMBOL_GPL(qeth_xmit);
@@ -4014,23 +4064,14 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
return (cmd->hdr.return_code) ? -EIO : 0;
}
-void qeth_setadp_promisc_mode(struct qeth_card *card)
+void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
{
- enum qeth_ipa_promisc_modes mode;
- struct net_device *dev = card->dev;
+ enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
+ SET_PROMISC_MODE_OFF;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 4, "setprom");
-
- if (((dev->flags & IFF_PROMISC) &&
- (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
- (!(dev->flags & IFF_PROMISC) &&
- (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
- return;
- mode = SET_PROMISC_MODE_OFF;
- if (dev->flags & IFF_PROMISC)
- mode = SET_PROMISC_MODE_ON;
QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
@@ -4298,20 +4339,16 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
}
static int qeth_snmp_command_cb(struct qeth_card *card,
- struct qeth_reply *reply, unsigned long sdata)
+ struct qeth_reply *reply, unsigned long data)
{
- struct qeth_ipa_cmd *cmd;
- struct qeth_arp_query_info *qinfo;
- unsigned char *data;
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ struct qeth_arp_query_info *qinfo = reply->param;
+ struct qeth_ipacmd_setadpparms *adp_cmd;
+ unsigned int data_len;
void *snmp_data;
- __u16 data_len;
QETH_CARD_TEXT(card, 3, "snpcmdcb");
- cmd = (struct qeth_ipa_cmd *) sdata;
- data = (unsigned char *)((char *)cmd - reply->offset);
- qinfo = (struct qeth_arp_query_info *) reply->param;
-
if (cmd->hdr.return_code) {
QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
return -EIO;
@@ -4322,15 +4359,14 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
return -EIO;
}
- data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
- if (cmd->data.setadapterparms.hdr.seq_no == 1) {
- snmp_data = &cmd->data.setadapterparms.data.snmp;
- data_len -= offsetof(struct qeth_ipa_cmd,
- data.setadapterparms.data.snmp);
+
+ adp_cmd = &cmd->data.setadapterparms;
+ data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
+ if (adp_cmd->hdr.seq_no == 1) {
+ snmp_data = &adp_cmd->data.snmp;
} else {
- snmp_data = &cmd->data.setadapterparms.data.snmp.request;
- data_len -= offsetof(struct qeth_ipa_cmd,
- data.setadapterparms.data.snmp.request);
+ snmp_data = &adp_cmd->data.snmp.request;
+ data_len -= offsetof(struct qeth_snmp_cmd, request);
}
/* check if there is enough room in userspace */
@@ -4741,7 +4777,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.input_sbal_addr_array = in_sbal_ptrs;
init_data.output_sbal_addr_array = out_sbal_ptrs;
init_data.output_sbal_state_array = card->qdio.out_bufstates;
- init_data.scan_threshold = IS_IQD(card) ? 1 : 32;
+ init_data.scan_threshold = IS_IQD(card) ? 0 : 32;
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
@@ -5155,6 +5191,107 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_poll);
+static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
+ unsigned int bidx, bool error, int budget)
+{
+ struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
+ u8 sflags = buffer->buffer->element[15].sflags;
+ struct qeth_card *card = queue->card;
+
+ if (queue->bufstates && (queue->bufstates[bidx].flags &
+ QDIO_OUTBUF_STATE_FLAG_PENDING)) {
+ WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
+
+ if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
+ QETH_QDIO_BUF_PENDING) ==
+ QETH_QDIO_BUF_PRIMED)
+ qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
+
+ QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
+
+ /* prepare the queue slot for re-use: */
+ qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
+ if (qeth_init_qdio_out_buf(queue, bidx)) {
+ QETH_CARD_TEXT(card, 2, "outofbuf");
+ qeth_schedule_recovery(card);
+ }
+
+ return;
+ }
+
+ if (card->options.cq == QETH_CQ_ENABLED)
+ qeth_notify_skbs(queue, buffer,
+ qeth_compute_cq_notification(sflags, 0));
+ qeth_clear_output_buffer(queue, buffer, error, budget);
+}
+
+static int qeth_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
+ unsigned int queue_no = queue->queue_no;
+ struct qeth_card *card = queue->card;
+ struct net_device *dev = card->dev;
+ unsigned int work_done = 0;
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
+
+ while (1) {
+ unsigned int start, error, i;
+ unsigned int packets = 0;
+ unsigned int bytes = 0;
+ int completed;
+
+ if (qeth_out_queue_is_empty(queue)) {
+ napi_complete(napi);
+ return 0;
+ }
+
+ /* Give the CPU a breather: */
+ if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
+ QETH_TXQ_STAT_INC(queue, completion_yield);
+ if (napi_complete_done(napi, 0))
+ napi_schedule(napi);
+ return 0;
+ }
+
+ completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
+ &start, &error);
+ if (completed <= 0) {
+ /* Ensure we see TX completion for pending work: */
+ if (napi_complete_done(napi, 0))
+ qeth_tx_arm_timer(queue);
+ return 0;
+ }
+
+ for (i = start; i < start + completed; i++) {
+ struct qeth_qdio_out_buffer *buffer;
+ unsigned int bidx = QDIO_BUFNR(i);
+
+ buffer = queue->bufs[bidx];
+ packets += skb_queue_len(&buffer->skb_list);
+ bytes += buffer->bytes;
+
+ qeth_handle_send_error(card, buffer, error);
+ qeth_iqd_tx_complete(queue, bidx, error, budget);
+ qeth_cleanup_handled_pending(queue, bidx, false);
+ }
+
+ netdev_tx_completed_queue(txq, packets, bytes);
+ atomic_sub(completed, &queue->used_buffers);
+ work_done += completed;
+
+ /* xmit may have observed the full-condition, but not yet
+ * stopped the txq. In which case the code below won't trigger.
+ * So before returning, xmit will re-check the txq's fill level
+ * and wake it up if needed.
+ */
+ if (netif_tx_queue_stopped(txq) &&
+ !qeth_out_queue_is_full(queue))
+ netif_tx_wake_queue(txq);
+ }
+}
+
static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
{
if (!cmd->hdr.return_code)
@@ -6101,6 +6238,17 @@ int qeth_open(struct net_device *dev)
napi_enable(&card->napi);
local_bh_disable();
napi_schedule(&card->napi);
+ if (IS_IQD(card)) {
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
+
+ qeth_for_each_output_queue(card, queue, i) {
+ netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
+ QETH_NAPI_WEIGHT);
+ napi_enable(&queue->napi);
+ napi_schedule(&queue->napi);
+ }
+ }
/* kick-start the NAPI softirq: */
local_bh_enable();
return 0;
@@ -6112,7 +6260,26 @@ int qeth_stop(struct net_device *dev)
struct qeth_card *card = dev->ml_priv;
QETH_CARD_TEXT(card, 4, "qethstop");
- netif_tx_disable(dev);
+ if (IS_IQD(card)) {
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
+
+ /* Quiesce the NAPI instances: */
+ qeth_for_each_output_queue(card, queue, i) {
+ napi_disable(&queue->napi);
+ del_timer_sync(&queue->timer);
+ }
+
+ /* Stop .ndo_start_xmit, might still access queue->napi. */
+ netif_tx_disable(dev);
+
+ /* Queues may get re-allocated, so remove the NAPIs here. */
+ qeth_for_each_output_queue(card, queue, i)
+ netif_napi_del(&queue->napi);
+ } else {
+ netif_tx_disable(dev);
+ }
+
napi_disable(&card->napi);
return 0;
}
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 75b5834ed28d..6420b58cf42b 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -27,7 +27,6 @@ extern unsigned char IPA_PDU_HEADER[];
#define QETH_TIMEOUT (10 * HZ)
#define QETH_IPA_TIMEOUT (45 * HZ)
-#define QETH_IDX_COMMAND_SEQNO 0xffff0000
#define QETH_CLEAR_CHANNEL_PARM -10
#define QETH_HALT_CHANNEL_PARM -11
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 4166eb29f0bd..096698df3886 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -39,6 +39,8 @@ static const struct qeth_stats txq_stats[] = {
QETH_TXQ_STAT("TSO bytes", tso_bytes),
QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
QETH_TXQ_STAT("Queue stopped", stopped),
+ QETH_TXQ_STAT("Completion yield", completion_yield),
+ QETH_TXQ_STAT("Completion timer", completion_timer),
};
static const struct qeth_stats card_stats[] = {
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index cbead3d1b2fd..b8799cd3e7aa 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -175,10 +175,8 @@ static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO;
} else {
hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
- QETH_TXQ_STAT_INC(queue, skbs_csum);
- }
}
/* set byte byte 3 to casting flags */
@@ -439,23 +437,14 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static void qeth_promisc_to_bridge(struct qeth_card *card)
+static void qeth_l2_promisc_to_bridge(struct qeth_card *card, bool enable)
{
- struct net_device *dev = card->dev;
- enum qeth_ipa_promisc_modes promisc_mode;
int role;
int rc;
QETH_CARD_TEXT(card, 3, "pmisc2br");
- if (!card->options.sbp.reflect_promisc)
- return;
- promisc_mode = (dev->flags & IFF_PROMISC) ? SET_PROMISC_MODE_ON
- : SET_PROMISC_MODE_OFF;
- if (promisc_mode == card->info.promisc_mode)
- return;
-
- if (promisc_mode == SET_PROMISC_MODE_ON) {
+ if (enable) {
if (card->options.sbp.reflect_promisc_primary)
role = QETH_SBP_ROLE_PRIMARY;
else
@@ -464,14 +453,26 @@ static void qeth_promisc_to_bridge(struct qeth_card *card)
role = QETH_SBP_ROLE_NONE;
rc = qeth_bridgeport_setrole(card, role);
- QETH_CARD_TEXT_(card, 2, "bpm%c%04x",
- (promisc_mode == SET_PROMISC_MODE_ON) ? '+' : '-', rc);
+ QETH_CARD_TEXT_(card, 2, "bpm%c%04x", enable ? '+' : '-', rc);
if (!rc) {
card->options.sbp.role = role;
- card->info.promisc_mode = promisc_mode;
+ card->info.promisc_mode = enable;
}
+}
+static void qeth_l2_set_promisc_mode(struct qeth_card *card)
+{
+ bool enable = card->dev->flags & IFF_PROMISC;
+
+ if (card->info.promisc_mode == enable)
+ return;
+
+ if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+ qeth_setadp_promisc_mode(card, enable);
+ else if (card->options.sbp.reflect_promisc)
+ qeth_l2_promisc_to_bridge(card, enable);
}
+
/* New MAC address is added to the hash table and marked to be written on card
* only if there is not in the hash table storage already
*
@@ -539,10 +540,7 @@ static void qeth_l2_rx_mode_work(struct work_struct *work)
}
}
- if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
- qeth_setadp_promisc_mode(card);
- else
- qeth_promisc_to_bridge(card);
+ qeth_l2_set_promisc_mode(card);
}
static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
@@ -588,9 +586,10 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
struct qeth_card *card = dev->ml_priv;
u16 txq = skb_get_queue_mapping(skb);
struct qeth_qdio_out_q *queue;
- int tx_bytes = skb->len;
int rc;
+ if (!skb_is_gso(skb))
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
if (IS_IQD(card))
txq = qeth_iqd_translate_txq(dev, txq);
queue = card->qdio.out_qs[txq];
@@ -601,11 +600,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
qeth_l2_fill_header);
- if (!rc) {
- QETH_TXQ_STAT_INC(queue, tx_packets);
- QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
+ if (!rc)
return NETDEV_TX_OK;
- }
QETH_TXQ_STAT_INC(queue, tx_dropped);
kfree_skb(skb);
@@ -1000,9 +996,10 @@ struct qeth_discipline qeth_l2_discipline = {
EXPORT_SYMBOL_GPL(qeth_l2_discipline);
static void qeth_osn_assist_cb(struct qeth_card *card,
- struct qeth_cmd_buffer *iob)
+ struct qeth_cmd_buffer *iob,
+ unsigned int data_length)
{
- qeth_notify_reply(iob->reply, 0);
+ qeth_notify_cmd(iob, 0);
qeth_put_cmd(iob);
}
@@ -1703,7 +1700,6 @@ static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc)
/* generic VNICC request call back control */
struct _qeth_l2_vnicc_request_cbctl {
- u32 sub_cmd;
struct {
union{
u32 *sup_cmds;
@@ -1721,6 +1717,7 @@ static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
(struct _qeth_l2_vnicc_request_cbctl *) reply->param;
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc;
+ u32 sub_cmd = cmd->data.vnicc.hdr.sub_command;
QETH_CARD_TEXT(card, 2, "vniccrcb");
if (cmd->hdr.return_code)
@@ -1729,10 +1726,9 @@ static int qeth_l2_vnicc_request_cb(struct qeth_card *card,
card->options.vnicc.sup_chars = rep->vnicc_cmds.supported;
card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled;
- if (cbctl->sub_cmd == IPA_VNICC_QUERY_CMDS)
+ if (sub_cmd == IPA_VNICC_QUERY_CMDS)
*cbctl->result.sup_cmds = rep->data.query_cmds.sup_cmds;
-
- if (cbctl->sub_cmd == IPA_VNICC_GET_TIMEOUT)
+ else if (sub_cmd == IPA_VNICC_GET_TIMEOUT)
*cbctl->result.timeout = rep->data.getset_timeout.timeout;
return 0;
@@ -1760,7 +1756,6 @@ static struct qeth_cmd_buffer *qeth_l2_vnicc_build_cmd(struct qeth_card *card,
/* VNICC query VNIC characteristics request */
static int qeth_l2_vnicc_query_chars(struct qeth_card *card)
{
- struct _qeth_l2_vnicc_request_cbctl cbctl;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccqch");
@@ -1768,10 +1763,7 @@ static int qeth_l2_vnicc_query_chars(struct qeth_card *card)
if (!iob)
return -ENOMEM;
- /* prepare callback control */
- cbctl.sub_cmd = IPA_VNICC_QUERY_CHARS;
-
- return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL);
}
/* VNICC query sub commands request */
@@ -1790,7 +1782,6 @@ static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
__ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char;
/* prepare callback control */
- cbctl.sub_cmd = IPA_VNICC_QUERY_CMDS;
cbctl.result.sup_cmds = sup_cmds;
return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
@@ -1800,7 +1791,6 @@ static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char,
static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char,
u32 cmd)
{
- struct _qeth_l2_vnicc_request_cbctl cbctl;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "vniccedc");
@@ -1810,10 +1800,7 @@ static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char,
__ipa_cmd(iob)->data.vnicc.data.set_char.vnic_char = vnic_char;
- /* prepare callback control */
- cbctl.sub_cmd = cmd;
-
- return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, &cbctl);
+ return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL);
}
/* VNICC get/set timeout for characteristic request */
@@ -1837,7 +1824,6 @@ static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
getset_timeout->timeout = *timeout;
/* prepare callback control */
- cbctl.sub_cmd = cmd;
if (cmd == IPA_VNICC_GET_TIMEOUT)
cbctl.result.timeout = timeout;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 2dd99f103671..d7bfc7a0e4c0 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1435,27 +1435,19 @@ static void qeth_l3_stop_card(struct qeth_card *card)
flush_workqueue(card->event_wq);
}
-/*
- * test for and Switch promiscuous mode (on or off)
- * either for guestlan or HiperSocket Sniffer
- */
-static void
-qeth_l3_handle_promisc_mode(struct qeth_card *card)
+static void qeth_l3_set_promisc_mode(struct qeth_card *card)
{
- struct net_device *dev = card->dev;
+ bool enable = card->dev->flags & IFF_PROMISC;
- if (((dev->flags & IFF_PROMISC) &&
- (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
- (!(dev->flags & IFF_PROMISC) &&
- (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
+ if (card->info.promisc_mode == enable)
return;
if (IS_VM_NIC(card)) { /* Guestlan trace */
if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
- qeth_setadp_promisc_mode(card);
+ qeth_setadp_promisc_mode(card, enable);
} else if (card->options.sniffer && /* HiperSockets trace */
qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
- if (dev->flags & IFF_PROMISC) {
+ if (enable) {
QETH_CARD_TEXT(card, 3, "+promisc");
qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
} else {
@@ -1502,11 +1494,9 @@ static void qeth_l3_rx_mode_work(struct work_struct *work)
addr->disp_flag = QETH_DISP_ADDR_DELETE;
}
}
-
- if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
- return;
}
- qeth_l3_handle_promisc_mode(card);
+
+ qeth_l3_set_promisc_mode(card);
}
static int qeth_l3_arp_makerc(u16 rc)
@@ -1967,7 +1957,6 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
/* some HW requires combined L3+L4 csum offload: */
if (ipv == 4)
hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
- QETH_TXQ_STAT_INC(queue, skbs_csum);
}
}
@@ -2054,9 +2043,10 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
u16 txq = skb_get_queue_mapping(skb);
int ipv = qeth_get_ip_version(skb);
struct qeth_qdio_out_q *queue;
- int tx_bytes = skb->len;
int rc;
+ if (!skb_is_gso(skb))
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
if (IS_IQD(card)) {
queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)];
@@ -2079,11 +2069,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
else
rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
- if (!rc) {
- QETH_TXQ_STAT_INC(queue, tx_packets);
- QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
+ if (!rc)
return NETDEV_TX_OK;
- }
tx_drop:
QETH_TXQ_STAT_INC(queue, tx_dropped);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 7796799bf04a..9ff9429395eb 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -346,7 +346,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
return -ENOMEM;
}
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
- cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
+ cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
} else {
cp = skb_put(skb, tlen);
}
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 00dd47bcbb1e..587d4bbb7d22 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1522,8 +1522,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
return -ENOMEM;
}
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
- cp = kmap_atomic(skb_frag_page(frag))
- + frag->page_offset;
+ cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
} else {
cp = skb_put(skb, tlen);
}
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index ba4603d76284..a20ddc301c89 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -308,7 +308,7 @@ EXPORT_SYMBOL_GPL(fcoe_get_wwn);
u32 fcoe_fc_crc(struct fc_frame *fp)
{
struct sk_buff *skb = fp_skb(fp);
- struct skb_frag_struct *frag;
+ skb_frag_t *frag;
unsigned char *data;
unsigned long off, len, clen;
u32 crc;
@@ -318,7 +318,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
- off = frag->page_offset;
+ off = skb_frag_off(frag);
len = skb_frag_size(frag);
while (len > 0) {
clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index a42babde036d..42542720962f 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -1077,7 +1077,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
return -ENOMEM;
}
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
- cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
+ cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
} else {
cp = skb_put(skb, tlen);
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 7c96a01eef6c..0b8a614be11e 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -120,4 +120,6 @@ source "drivers/staging/kpc2000/Kconfig"
source "drivers/staging/isdn/Kconfig"
+source "drivers/staging/qlge/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index fcaac9693b83..741152511a10 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -50,3 +50,4 @@ obj-$(CONFIG_EROFS_FS) += erofs/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_KPC2000) += kpc2000/
obj-$(CONFIG_ISDN_CAPI) += isdn/
+obj-$(CONFIG_QLGE) += qlge/
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 1e3012b9991c..5319909eb2f6 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
config OCTEON_ETHERNET
tristate "Cavium Networks Octeon Ethernet support"
- depends on CAVIUM_OCTEON_SOC && NETDEVICES
+ depends on CAVIUM_OCTEON_SOC || COMPILE_TEST
+ depends on NETDEVICES
select PHYLIB
select MDIO_OCTEON
help
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index 1e114422993a..ef9e767b0e2e 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -21,8 +21,6 @@
#ifndef __ETHERNET_DEFINES_H__
#define __ETHERNET_DEFINES_H__
-#include <asm/octeon/cvmx-config.h>
-
#ifdef CONFIG_NETFILTER
#define REUSE_SKBUFFS_WITHOUT_FREE 0
#else
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index 2aee64fdaec5..ffac0c4b3f5c 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -13,15 +13,11 @@
#include <generated/utsrelease.h>
#include <net/dst.h>
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
#include "ethernet-mdio.h"
#include "ethernet-util.h"
-#include <asm/octeon/cvmx-gmxx-defs.h>
-
static void cvm_oct_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index 0d26c4a93ec1..532594957ebc 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -9,13 +9,10 @@
#include <linux/netdevice.h>
#include <linux/slab.h>
-#include <asm/octeon/octeon.h>
-
+#include "octeon-ethernet.h"
#include "ethernet-mem.h"
#include "ethernet-defines.h"
-#include <asm/octeon/cvmx-fpa.h>
-
/**
* cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
* @pool: Pool to allocate an skbuff for
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index c15376d33891..d91fd5ce9e68 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -12,19 +12,11 @@
#include <linux/ratelimit.h>
#include <net/dst.h>
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
#include "ethernet-util.h"
#include "ethernet-mdio.h"
-#include <asm/octeon/cvmx-helper.h>
-
-#include <asm/octeon/cvmx-ipd-defs.h>
-#include <asm/octeon/cvmx-npi-defs.h>
-#include <asm/octeon/cvmx-gmxx-defs.h>
-
static DEFINE_SPINLOCK(global_register_lock);
static void cvm_oct_set_hw_preamble(struct octeon_ethernet *priv, bool enable)
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 5e271245273c..0e65955c746b 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -23,23 +23,12 @@
#include <net/xfrm.h>
#endif /* CONFIG_XFRM */
-#include <asm/octeon/octeon.h>
-
+#include "octeon-ethernet.h"
#include "ethernet-defines.h"
#include "ethernet-mem.h"
#include "ethernet-rx.h"
-#include "octeon-ethernet.h"
#include "ethernet-util.h"
-#include <asm/octeon/cvmx-helper.h>
-#include <asm/octeon/cvmx-wqe.h>
-#include <asm/octeon/cvmx-fau.h>
-#include <asm/octeon/cvmx-pow.h>
-#include <asm/octeon/cvmx-pip.h>
-#include <asm/octeon/cvmx-scratch.h>
-
-#include <asm/octeon/cvmx-gmxx-defs.h>
-
static atomic_t oct_rx_ready = ATOMIC_INIT(0);
static struct oct_rx_group {
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
index 096553d8fc99..ff6482fa20d6 100644
--- a/drivers/staging/octeon/ethernet-rx.h
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -5,8 +5,6 @@
* Copyright (c) 2003-2007 Cavium Networks
*/
-#include <asm/octeon/cvmx-fau.h>
-
void cvm_oct_poll_controller(struct net_device *dev);
void cvm_oct_rx_initialize(void);
void cvm_oct_rx_shutdown(void);
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index a4a8f094e2b4..d7fbd9159302 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -11,17 +11,11 @@
#include <linux/ratelimit.h>
#include <net/dst.h>
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
#include "ethernet-util.h"
#include "ethernet-mdio.h"
-#include <asm/octeon/cvmx-helper.h>
-
-#include <asm/octeon/cvmx-gmxx-defs.h>
-
int cvm_oct_sgmii_open(struct net_device *dev)
{
return cvm_oct_common_open(dev, cvm_oct_link_poll);
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index 01efdf2a2c20..c582403e6a1f 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -10,18 +10,10 @@
#include <linux/interrupt.h>
#include <net/dst.h>
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
#include "ethernet-util.h"
-#include <asm/octeon/cvmx-spi.h>
-
-#include <asm/octeon/cvmx-npi-defs.h>
-#include <asm/octeon/cvmx-spxx-defs.h>
-#include <asm/octeon/cvmx-stxx-defs.h>
-
static int number_spi_ports;
static int need_retrain[2] = { 0, 0 };
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 20f513fbaa85..c64728fc21f2 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -22,21 +22,11 @@
#include <linux/atomic.h>
#include <net/sch_generic.h>
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
#include "ethernet-tx.h"
#include "ethernet-util.h"
-#include <asm/octeon/cvmx-wqe.h>
-#include <asm/octeon/cvmx-fau.h>
-#include <asm/octeon/cvmx-pip.h>
-#include <asm/octeon/cvmx-pko.h>
-#include <asm/octeon/cvmx-helper.h>
-
-#include <asm/octeon/cvmx-gmxx-defs.h>
-
#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
/*
@@ -280,12 +270,11 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
hw_buffer.s.size = skb_headlen(skb);
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
+ skb_frag_t *fs = skb_shinfo(skb)->frags + i;
hw_buffer.s.addr =
- XKPHYS_TO_PHYS((u64)(page_address(fs->page.p) +
- fs->page_offset));
- hw_buffer.s.size = fs->size;
+ XKPHYS_TO_PHYS((u64)skb_frag_address(fs));
+ hw_buffer.s.size = skb_frag_size(fs);
CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
}
hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
index 31a82873e15c..2af83a12ca78 100644
--- a/drivers/staging/octeon/ethernet-util.h
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -5,10 +5,6 @@
* Copyright (c) 2003-2007 Cavium Networks
*/
-#include <asm/octeon/cvmx-pip.h>
-#include <asm/octeon/cvmx-helper.h>
-#include <asm/octeon/cvmx-helper-util.h>
-
/**
* cvm_oct_get_buffer_ptr - convert packet data address to pointer
* @packet_ptr: Packet data hardware address
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 8847a11c212f..8889494adf1f 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -19,24 +19,14 @@
#include <net/dst.h>
-#include <asm/octeon/octeon.h>
-
-#include "ethernet-defines.h"
#include "octeon-ethernet.h"
+#include "ethernet-defines.h"
#include "ethernet-mem.h"
#include "ethernet-rx.h"
#include "ethernet-tx.h"
#include "ethernet-mdio.h"
#include "ethernet-util.h"
-#include <asm/octeon/cvmx-pip.h>
-#include <asm/octeon/cvmx-pko.h>
-#include <asm/octeon/cvmx-fau.h>
-#include <asm/octeon/cvmx-ipd.h>
-#include <asm/octeon/cvmx-helper.h>
-#include <asm/octeon/cvmx-asxx-defs.h>
-#include <asm/octeon/cvmx-gmxx-defs.h>
-
#define OCTEON_MAX_MTU 65392
static int num_packet_buffers = 1024;
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index be570d33685a..a8a864b40913 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -13,7 +13,34 @@
#include <linux/of.h>
#include <linux/phy.h>
-#include <asm/octeon/cvmx-helper-board.h>
+
+#ifdef CONFIG_MIPS
+
+#include <asm/octeon/octeon.h>
+
+#include <asm/octeon/cvmx-asxx-defs.h>
+#include <asm/octeon/cvmx-config.h>
+#include <asm/octeon/cvmx-fau.h>
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-util.h>
+#include <asm/octeon/cvmx-ipd.h>
+#include <asm/octeon/cvmx-ipd-defs.h>
+#include <asm/octeon/cvmx-npi-defs.h>
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-pow.h>
+#include <asm/octeon/cvmx-scratch.h>
+#include <asm/octeon/cvmx-spi.h>
+#include <asm/octeon/cvmx-spxx-defs.h>
+#include <asm/octeon/cvmx-stxx-defs.h>
+#include <asm/octeon/cvmx-wqe.h>
+
+#else
+
+#include "octeon-stubs.h"
+
+#endif
/**
* This is the definition of the Ethernet driver's private
diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h
new file mode 100644
index 000000000000..a4ac3bfb62a8
--- /dev/null
+++ b/drivers/staging/octeon/octeon-stubs.h
@@ -0,0 +1,1429 @@
+#define CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 512
+#define XKPHYS_TO_PHYS(p) (p)
+
+#define OCTEON_IRQ_WORKQ0 0
+#define OCTEON_IRQ_RML 0
+#define OCTEON_IRQ_TIMER1 0
+#define OCTEON_IS_MODEL(x) 0
+#define octeon_has_feature(x) 0
+#define octeon_get_clock_rate() 0
+
+#define CVMX_SYNCIOBDMA do { } while(0)
+
+#define CVMX_HELPER_INPUT_TAG_TYPE 0
+#define CVMX_HELPER_FIRST_MBUFF_SKIP 7
+#define CVMX_FAU_REG_END (2048)
+#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
+#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 16
+#define CVMX_FPA_PACKET_POOL (0)
+#define CVMX_FPA_PACKET_POOL_SIZE 16
+#define CVMX_FPA_WQE_POOL (1)
+#define CVMX_FPA_WQE_POOL_SIZE 16
+#define CVMX_GMXX_RXX_ADR_CAM_EN(a, b) ((a)+(b))
+#define CVMX_GMXX_RXX_ADR_CTL(a, b) ((a)+(b))
+#define CVMX_GMXX_PRTX_CFG(a, b) ((a)+(b))
+#define CVMX_GMXX_RXX_FRM_MAX(a, b) ((a)+(b))
+#define CVMX_GMXX_RXX_JABBER(a, b) ((a)+(b))
+#define CVMX_IPD_CTL_STATUS 0
+#define CVMX_PIP_FRM_LEN_CHKX(a) (a)
+#define CVMX_PIP_NUM_INPUT_PORTS 1
+#define CVMX_SCR_SCRATCH 0
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 2
+#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 2
+#define CVMX_IPD_SUB_PORT_FCS 0
+#define CVMX_SSO_WQ_IQ_DIS 0
+#define CVMX_SSO_WQ_INT 0
+#define CVMX_POW_WQ_INT 0
+#define CVMX_SSO_WQ_INT_PC 0
+#define CVMX_NPI_RSL_INT_BLOCKS 0
+#define CVMX_POW_WQ_INT_PC 0
+
+typedef union {
+ uint64_t u64;
+ struct {
+ uint64_t bufs:8;
+ uint64_t ip_offset:8;
+ uint64_t vlan_valid:1;
+ uint64_t vlan_stacked:1;
+ uint64_t unassigned:1;
+ uint64_t vlan_cfi:1;
+ uint64_t vlan_id:12;
+ uint64_t pr:4;
+ uint64_t unassigned2:8;
+ uint64_t dec_ipcomp:1;
+ uint64_t tcp_or_udp:1;
+ uint64_t dec_ipsec:1;
+ uint64_t is_v6:1;
+ uint64_t software:1;
+ uint64_t L4_error:1;
+ uint64_t is_frag:1;
+ uint64_t IP_exc:1;
+ uint64_t is_bcast:1;
+ uint64_t is_mcast:1;
+ uint64_t not_IP:1;
+ uint64_t rcv_error:1;
+ uint64_t err_code:8;
+ } s;
+ struct {
+ uint64_t bufs:8;
+ uint64_t ip_offset:8;
+ uint64_t vlan_valid:1;
+ uint64_t vlan_stacked:1;
+ uint64_t unassigned:1;
+ uint64_t vlan_cfi:1;
+ uint64_t vlan_id:12;
+ uint64_t port:12;
+ uint64_t dec_ipcomp:1;
+ uint64_t tcp_or_udp:1;
+ uint64_t dec_ipsec:1;
+ uint64_t is_v6:1;
+ uint64_t software:1;
+ uint64_t L4_error:1;
+ uint64_t is_frag:1;
+ uint64_t IP_exc:1;
+ uint64_t is_bcast:1;
+ uint64_t is_mcast:1;
+ uint64_t not_IP:1;
+ uint64_t rcv_error:1;
+ uint64_t err_code:8;
+ } s_cn68xx;
+
+ struct {
+ uint64_t unused1:16;
+ uint64_t vlan:16;
+ uint64_t unused2:32;
+ } svlan;
+ struct {
+ uint64_t bufs:8;
+ uint64_t unused:8;
+ uint64_t vlan_valid:1;
+ uint64_t vlan_stacked:1;
+ uint64_t unassigned:1;
+ uint64_t vlan_cfi:1;
+ uint64_t vlan_id:12;
+ uint64_t pr:4;
+ uint64_t unassigned2:12;
+ uint64_t software:1;
+ uint64_t unassigned3:1;
+ uint64_t is_rarp:1;
+ uint64_t is_arp:1;
+ uint64_t is_bcast:1;
+ uint64_t is_mcast:1;
+ uint64_t not_IP:1;
+ uint64_t rcv_error:1;
+ uint64_t err_code:8;
+ } snoip;
+
+} cvmx_pip_wqe_word2;
+
+union cvmx_pip_wqe_word0 {
+ struct {
+ uint64_t next_ptr:40;
+ uint8_t unused;
+ uint16_t hw_chksum;
+ } cn38xx;
+ struct {
+ uint64_t pknd:6; /* 0..5 */
+ uint64_t unused2:2; /* 6..7 */
+ uint64_t bpid:6; /* 8..13 */
+ uint64_t unused1:18; /* 14..31 */
+ uint64_t l2ptr:8; /* 32..39 */
+ uint64_t l3ptr:8; /* 40..47 */
+ uint64_t unused0:8; /* 48..55 */
+ uint64_t l4ptr:8; /* 56..63 */
+ } cn68xx;
+};
+
+union cvmx_wqe_word0 {
+ uint64_t u64;
+ union cvmx_pip_wqe_word0 pip;
+};
+
+union cvmx_wqe_word1 {
+ uint64_t u64;
+ struct {
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t varies:14;
+ uint64_t len:16;
+ };
+ struct {
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t zero_2:3;
+ uint64_t grp:6;
+ uint64_t zero_1:1;
+ uint64_t qos:3;
+ uint64_t zero_0:1;
+ uint64_t len:16;
+ } cn68xx;
+ struct {
+ uint64_t tag:32;
+ uint64_t tag_type:2;
+ uint64_t zero_2:1;
+ uint64_t grp:4;
+ uint64_t qos:3;
+ uint64_t ipprt:6;
+ uint64_t len:16;
+ } cn38xx;
+};
+
+union cvmx_buf_ptr {
+ void *ptr;
+ uint64_t u64;
+ struct {
+ uint64_t i:1;
+ uint64_t back:4;
+ uint64_t pool:3;
+ uint64_t size:16;
+ uint64_t addr:40;
+ } s;
+};
+
+typedef struct {
+ union cvmx_wqe_word0 word0;
+ union cvmx_wqe_word1 word1;
+ cvmx_pip_wqe_word2 word2;
+ union cvmx_buf_ptr packet_ptr;
+ uint8_t packet_data[96];
+} cvmx_wqe_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+ uint64_t reserved_20_63:44;
+ uint64_t link_up:1; /**< Is the physical link up? */
+ uint64_t full_duplex:1; /**< 1 if the link is full duplex */
+ uint64_t speed:18; /**< Speed of the link in Mbps */
+ } s;
+} cvmx_helper_link_info_t;
+
+typedef enum {
+ CVMX_FAU_REG_32_START = 0,
+} cvmx_fau_reg_32_t;
+
+typedef enum {
+ CVMX_FAU_OP_SIZE_8 = 0,
+ CVMX_FAU_OP_SIZE_16 = 1,
+ CVMX_FAU_OP_SIZE_32 = 2,
+ CVMX_FAU_OP_SIZE_64 = 3
+} cvmx_fau_op_size_t;
+
+typedef enum {
+ CVMX_SPI_MODE_UNKNOWN = 0,
+ CVMX_SPI_MODE_TX_HALFPLEX = 1,
+ CVMX_SPI_MODE_RX_HALFPLEX = 2,
+ CVMX_SPI_MODE_DUPLEX = 3
+} cvmx_spi_mode_t;
+
+typedef enum {
+ CVMX_HELPER_INTERFACE_MODE_DISABLED,
+ CVMX_HELPER_INTERFACE_MODE_RGMII,
+ CVMX_HELPER_INTERFACE_MODE_GMII,
+ CVMX_HELPER_INTERFACE_MODE_SPI,
+ CVMX_HELPER_INTERFACE_MODE_PCIE,
+ CVMX_HELPER_INTERFACE_MODE_XAUI,
+ CVMX_HELPER_INTERFACE_MODE_SGMII,
+ CVMX_HELPER_INTERFACE_MODE_PICMG,
+ CVMX_HELPER_INTERFACE_MODE_NPI,
+ CVMX_HELPER_INTERFACE_MODE_LOOP,
+} cvmx_helper_interface_mode_t;
+
+typedef enum {
+ CVMX_POW_WAIT = 1,
+ CVMX_POW_NO_WAIT = 0,
+} cvmx_pow_wait_t;
+
+typedef enum {
+ CVMX_PKO_LOCK_NONE = 0,
+ CVMX_PKO_LOCK_ATOMIC_TAG = 1,
+ CVMX_PKO_LOCK_CMD_QUEUE = 2,
+} cvmx_pko_lock_t;
+
+typedef enum {
+ CVMX_PKO_SUCCESS,
+ CVMX_PKO_INVALID_PORT,
+ CVMX_PKO_INVALID_QUEUE,
+ CVMX_PKO_INVALID_PRIORITY,
+ CVMX_PKO_NO_MEMORY,
+ CVMX_PKO_PORT_ALREADY_SETUP,
+ CVMX_PKO_CMD_QUEUE_INIT_ERROR
+} cvmx_pko_status_t;
+
+enum cvmx_pow_tag_type {
+ CVMX_POW_TAG_TYPE_ORDERED = 0L,
+ CVMX_POW_TAG_TYPE_ATOMIC = 1L,
+ CVMX_POW_TAG_TYPE_NULL = 2L,
+ CVMX_POW_TAG_TYPE_NULL_NULL = 3L
+};
+
+union cvmx_ipd_ctl_status {
+ uint64_t u64;
+ struct cvmx_ipd_ctl_status_s {
+ uint64_t reserved_18_63:46;
+ uint64_t use_sop:1;
+ uint64_t rst_done:1;
+ uint64_t clken:1;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } s;
+ struct cvmx_ipd_ctl_status_cn30xx {
+ uint64_t reserved_10_63:54;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn30xx;
+ struct cvmx_ipd_ctl_status_cn38xxp2 {
+ uint64_t reserved_9_63:55;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn38xxp2;
+ struct cvmx_ipd_ctl_status_cn50xx {
+ uint64_t reserved_15_63:49;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn50xx;
+ struct cvmx_ipd_ctl_status_cn58xx {
+ uint64_t reserved_12_63:52;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn58xx;
+ struct cvmx_ipd_ctl_status_cn63xxp1 {
+ uint64_t reserved_16_63:48;
+ uint64_t clken:1;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn63xxp1;
+};
+
+union cvmx_ipd_sub_port_fcs {
+ uint64_t u64;
+ struct cvmx_ipd_sub_port_fcs_s {
+ uint64_t port_bit:32;
+ uint64_t reserved_32_35:4;
+ uint64_t port_bit2:4;
+ uint64_t reserved_40_63:24;
+ } s;
+ struct cvmx_ipd_sub_port_fcs_cn30xx {
+ uint64_t port_bit:3;
+ uint64_t reserved_3_63:61;
+ } cn30xx;
+ struct cvmx_ipd_sub_port_fcs_cn38xx {
+ uint64_t port_bit:32;
+ uint64_t reserved_32_63:32;
+ } cn38xx;
+};
+
+union cvmx_ipd_sub_port_qos_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_sub_port_qos_cnt_s {
+ uint64_t cnt:32;
+ uint64_t port_qos:9;
+ uint64_t reserved_41_63:23;
+ } s;
+};
+typedef struct {
+ uint32_t dropped_octets;
+ uint32_t dropped_packets;
+ uint32_t pci_raw_packets;
+ uint32_t octets;
+ uint32_t packets;
+ uint32_t multicast_packets;
+ uint32_t broadcast_packets;
+ uint32_t len_64_packets;
+ uint32_t len_65_127_packets;
+ uint32_t len_128_255_packets;
+ uint32_t len_256_511_packets;
+ uint32_t len_512_1023_packets;
+ uint32_t len_1024_1518_packets;
+ uint32_t len_1519_max_packets;
+ uint32_t fcs_align_err_packets;
+ uint32_t runt_packets;
+ uint32_t runt_crc_packets;
+ uint32_t oversize_packets;
+ uint32_t oversize_crc_packets;
+ uint32_t inb_packets;
+ uint64_t inb_octets;
+ uint16_t inb_errors;
+} cvmx_pip_port_status_t;
+
+typedef struct {
+ uint32_t packets;
+ uint64_t octets;
+ uint64_t doorbell;
+} cvmx_pko_port_status_t;
+
+union cvmx_pip_frm_len_chkx {
+ uint64_t u64;
+ struct cvmx_pip_frm_len_chkx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t maxlen:16;
+ uint64_t minlen:16;
+ } s;
+};
+
+union cvmx_gmxx_rxx_frm_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_frm_ctl_s {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_11:1;
+ uint64_t ptp_mode:1;
+ uint64_t reserved_13_63:51;
+ } s;
+ struct cvmx_gmxx_rxx_frm_ctl_cn30xx {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t reserved_9_63:55;
+ } cn30xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn31xx {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t reserved_8_63:56;
+ } cn31xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn50xx {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_63:53;
+ } cn50xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_align:1;
+ uint64_t reserved_10_63:54;
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_frm_ctl_cn58xx {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t vlan_len:1;
+ uint64_t pad_len:1;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_63:53;
+ } cn58xx;
+ struct cvmx_gmxx_rxx_frm_ctl_cn61xx {
+ uint64_t pre_chk:1;
+ uint64_t pre_strp:1;
+ uint64_t ctl_drp:1;
+ uint64_t ctl_bck:1;
+ uint64_t ctl_mcst:1;
+ uint64_t ctl_smac:1;
+ uint64_t pre_free:1;
+ uint64_t reserved_7_8:2;
+ uint64_t pre_align:1;
+ uint64_t null_dis:1;
+ uint64_t reserved_11_11:1;
+ uint64_t ptp_mode:1;
+ uint64_t reserved_13_63:51;
+ } cn61xx;
+};
+
+union cvmx_gmxx_rxx_int_reg {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_int_reg_s {
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t hg2fld:1;
+ uint64_t hg2cc:1;
+ uint64_t reserved_29_63:35;
+ } s;
+ struct cvmx_gmxx_rxx_int_reg_cn30xx {
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t reserved_19_63:45;
+ } cn30xx;
+ struct cvmx_gmxx_rxx_int_reg_cn50xx {
+ uint64_t reserved_0_0:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t reserved_6_6:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_20_63:44;
+ } cn50xx;
+ struct cvmx_gmxx_rxx_int_reg_cn52xx {
+ uint64_t reserved_0_0:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t hg2fld:1;
+ uint64_t hg2cc:1;
+ uint64_t reserved_29_63:35;
+ } cn52xx;
+ struct cvmx_gmxx_rxx_int_reg_cn56xxp1 {
+ uint64_t reserved_0_0:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t reserved_27_63:37;
+ } cn56xxp1;
+ struct cvmx_gmxx_rxx_int_reg_cn58xx {
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t maxerr:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t alnerr:1;
+ uint64_t lenerr:1;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t niberr:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t phy_link:1;
+ uint64_t phy_spd:1;
+ uint64_t phy_dupx:1;
+ uint64_t pause_drp:1;
+ uint64_t reserved_20_63:44;
+ } cn58xx;
+ struct cvmx_gmxx_rxx_int_reg_cn61xx {
+ uint64_t minerr:1;
+ uint64_t carext:1;
+ uint64_t reserved_2_2:1;
+ uint64_t jabber:1;
+ uint64_t fcserr:1;
+ uint64_t reserved_5_6:2;
+ uint64_t rcverr:1;
+ uint64_t skperr:1;
+ uint64_t reserved_9_9:1;
+ uint64_t ovrerr:1;
+ uint64_t pcterr:1;
+ uint64_t rsverr:1;
+ uint64_t falerr:1;
+ uint64_t coldet:1;
+ uint64_t ifgerr:1;
+ uint64_t reserved_16_18:3;
+ uint64_t pause_drp:1;
+ uint64_t loc_fault:1;
+ uint64_t rem_fault:1;
+ uint64_t bad_seq:1;
+ uint64_t bad_term:1;
+ uint64_t unsop:1;
+ uint64_t uneop:1;
+ uint64_t undat:1;
+ uint64_t hg2fld:1;
+ uint64_t hg2cc:1;
+ uint64_t reserved_29_63:35;
+ } cn61xx;
+};
+
+union cvmx_gmxx_prtx_cfg {
+ uint64_t u64;
+ struct cvmx_gmxx_prtx_cfg_s {
+ uint64_t reserved_22_63:42;
+ uint64_t pknd:6;
+ uint64_t reserved_14_15:2;
+ uint64_t tx_idle:1;
+ uint64_t rx_idle:1;
+ uint64_t reserved_9_11:3;
+ uint64_t speed_msb:1;
+ uint64_t reserved_4_7:4;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+ } s;
+ struct cvmx_gmxx_prtx_cfg_cn30xx {
+ uint64_t reserved_4_63:60;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+ } cn30xx;
+ struct cvmx_gmxx_prtx_cfg_cn52xx {
+ uint64_t reserved_14_63:50;
+ uint64_t tx_idle:1;
+ uint64_t rx_idle:1;
+ uint64_t reserved_9_11:3;
+ uint64_t speed_msb:1;
+ uint64_t reserved_4_7:4;
+ uint64_t slottime:1;
+ uint64_t duplex:1;
+ uint64_t speed:1;
+ uint64_t en:1;
+ } cn52xx;
+};
+
+union cvmx_gmxx_rxx_adr_ctl {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_adr_ctl_s {
+ uint64_t reserved_4_63:60;
+ uint64_t cam_mode:1;
+ uint64_t mcst:2;
+ uint64_t bcst:1;
+ } s;
+};
+
+union cvmx_pip_prt_tagx {
+ uint64_t u64;
+ struct cvmx_pip_prt_tagx_s {
+ uint64_t reserved_54_63:10;
+ uint64_t portadd_en:1;
+ uint64_t inc_hwchk:1;
+ uint64_t reserved_50_51:2;
+ uint64_t grptagbase_msb:2;
+ uint64_t reserved_46_47:2;
+ uint64_t grptagmask_msb:2;
+ uint64_t reserved_42_43:2;
+ uint64_t grp_msb:2;
+ uint64_t grptagbase:4;
+ uint64_t grptagmask:4;
+ uint64_t grptag:1;
+ uint64_t grptag_mskip:1;
+ uint64_t tag_mode:2;
+ uint64_t inc_vs:2;
+ uint64_t inc_vlan:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_src_flag:1;
+ uint64_t tcp6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t non_tag_type:2;
+ uint64_t grp:4;
+ } s;
+ struct cvmx_pip_prt_tagx_cn30xx {
+ uint64_t reserved_40_63:24;
+ uint64_t grptagbase:4;
+ uint64_t grptagmask:4;
+ uint64_t grptag:1;
+ uint64_t reserved_30_30:1;
+ uint64_t tag_mode:2;
+ uint64_t inc_vs:2;
+ uint64_t inc_vlan:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_src_flag:1;
+ uint64_t tcp6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t non_tag_type:2;
+ uint64_t grp:4;
+ } cn30xx;
+ struct cvmx_pip_prt_tagx_cn50xx {
+ uint64_t reserved_40_63:24;
+ uint64_t grptagbase:4;
+ uint64_t grptagmask:4;
+ uint64_t grptag:1;
+ uint64_t grptag_mskip:1;
+ uint64_t tag_mode:2;
+ uint64_t inc_vs:2;
+ uint64_t inc_vlan:1;
+ uint64_t inc_prt_flag:1;
+ uint64_t ip6_dprt_flag:1;
+ uint64_t ip4_dprt_flag:1;
+ uint64_t ip6_sprt_flag:1;
+ uint64_t ip4_sprt_flag:1;
+ uint64_t ip6_nxth_flag:1;
+ uint64_t ip4_pctl_flag:1;
+ uint64_t ip6_dst_flag:1;
+ uint64_t ip4_dst_flag:1;
+ uint64_t ip6_src_flag:1;
+ uint64_t ip4_src_flag:1;
+ uint64_t tcp6_tag_type:2;
+ uint64_t tcp4_tag_type:2;
+ uint64_t ip6_tag_type:2;
+ uint64_t ip4_tag_type:2;
+ uint64_t non_tag_type:2;
+ uint64_t grp:4;
+ } cn50xx;
+};
+
+union cvmx_spxx_int_reg {
+ uint64_t u64;
+ struct cvmx_spxx_int_reg_s {
+ uint64_t reserved_32_63:32;
+ uint64_t spf:1;
+ uint64_t reserved_12_30:19;
+ uint64_t calerr:1;
+ uint64_t syncerr:1;
+ uint64_t diperr:1;
+ uint64_t tpaovr:1;
+ uint64_t rsverr:1;
+ uint64_t drwnng:1;
+ uint64_t clserr:1;
+ uint64_t spiovr:1;
+ uint64_t reserved_2_3:2;
+ uint64_t abnorm:1;
+ uint64_t prtnxa:1;
+ } s;
+};
+
+union cvmx_spxx_int_msk {
+ uint64_t u64;
+ struct cvmx_spxx_int_msk_s {
+ uint64_t reserved_12_63:52;
+ uint64_t calerr:1;
+ uint64_t syncerr:1;
+ uint64_t diperr:1;
+ uint64_t tpaovr:1;
+ uint64_t rsverr:1;
+ uint64_t drwnng:1;
+ uint64_t clserr:1;
+ uint64_t spiovr:1;
+ uint64_t reserved_2_3:2;
+ uint64_t abnorm:1;
+ uint64_t prtnxa:1;
+ } s;
+};
+
+union cvmx_pow_wq_int {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_s {
+ uint64_t wq_int:16;
+ uint64_t iq_dis:16;
+ uint64_t reserved_32_63:32;
+ } s;
+};
+
+union cvmx_sso_wq_int_thrx {
+ uint64_t u64;
+ struct {
+ uint64_t iq_thr:12;
+ uint64_t reserved_12_13:2;
+ uint64_t ds_thr:12;
+ uint64_t reserved_26_27:2;
+ uint64_t tc_thr:4;
+ uint64_t tc_en:1;
+ uint64_t reserved_33_63:31;
+ } s;
+};
+
+union cvmx_stxx_int_reg {
+ uint64_t u64;
+ struct cvmx_stxx_int_reg_s {
+ uint64_t reserved_9_63:55;
+ uint64_t syncerr:1;
+ uint64_t frmerr:1;
+ uint64_t unxfrm:1;
+ uint64_t nosync:1;
+ uint64_t diperr:1;
+ uint64_t datovr:1;
+ uint64_t ovrbst:1;
+ uint64_t calpar1:1;
+ uint64_t calpar0:1;
+ } s;
+};
+
+union cvmx_stxx_int_msk {
+ uint64_t u64;
+ struct cvmx_stxx_int_msk_s {
+ uint64_t reserved_8_63:56;
+ uint64_t frmerr:1;
+ uint64_t unxfrm:1;
+ uint64_t nosync:1;
+ uint64_t diperr:1;
+ uint64_t datovr:1;
+ uint64_t ovrbst:1;
+ uint64_t calpar1:1;
+ uint64_t calpar0:1;
+ } s;
+};
+
+union cvmx_pow_wq_int_pc {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_pc_s {
+ uint64_t reserved_0_7:8;
+ uint64_t pc_thr:20;
+ uint64_t reserved_28_31:4;
+ uint64_t pc:28;
+ uint64_t reserved_60_63:4;
+ } s;
+};
+
+union cvmx_pow_wq_int_thrx {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_thrx_s {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_23_23:1;
+ uint64_t ds_thr:11;
+ uint64_t reserved_11_11:1;
+ uint64_t iq_thr:11;
+ } s;
+ struct cvmx_pow_wq_int_thrx_cn30xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_18_23:6;
+ uint64_t ds_thr:6;
+ uint64_t reserved_6_11:6;
+ uint64_t iq_thr:6;
+ } cn30xx;
+ struct cvmx_pow_wq_int_thrx_cn31xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_20_23:4;
+ uint64_t ds_thr:8;
+ uint64_t reserved_8_11:4;
+ uint64_t iq_thr:8;
+ } cn31xx;
+ struct cvmx_pow_wq_int_thrx_cn52xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_21_23:3;
+ uint64_t ds_thr:9;
+ uint64_t reserved_9_11:3;
+ uint64_t iq_thr:9;
+ } cn52xx;
+ struct cvmx_pow_wq_int_thrx_cn63xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_22_23:2;
+ uint64_t ds_thr:10;
+ uint64_t reserved_10_11:2;
+ uint64_t iq_thr:10;
+ } cn63xx;
+};
+
+union cvmx_npi_rsl_int_blocks {
+ uint64_t u64;
+ struct cvmx_npi_rsl_int_blocks_s {
+ uint64_t reserved_32_63:32;
+ uint64_t rint_31:1;
+ uint64_t iob:1;
+ uint64_t reserved_28_29:2;
+ uint64_t rint_27:1;
+ uint64_t rint_26:1;
+ uint64_t rint_25:1;
+ uint64_t rint_24:1;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t rint_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t rint_15:1;
+ uint64_t reserved_13_14:2;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t rint_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+ } s;
+ struct cvmx_npi_rsl_int_blocks_cn30xx {
+ uint64_t reserved_32_63:32;
+ uint64_t rint_31:1;
+ uint64_t iob:1;
+ uint64_t rint_29:1;
+ uint64_t rint_28:1;
+ uint64_t rint_27:1;
+ uint64_t rint_26:1;
+ uint64_t rint_25:1;
+ uint64_t rint_24:1;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t rint_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t rint_15:1;
+ uint64_t rint_14:1;
+ uint64_t usb:1;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t rint_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+ } cn30xx;
+ struct cvmx_npi_rsl_int_blocks_cn38xx {
+ uint64_t reserved_32_63:32;
+ uint64_t rint_31:1;
+ uint64_t iob:1;
+ uint64_t rint_29:1;
+ uint64_t rint_28:1;
+ uint64_t rint_27:1;
+ uint64_t rint_26:1;
+ uint64_t rint_25:1;
+ uint64_t rint_24:1;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t rint_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t rint_15:1;
+ uint64_t rint_14:1;
+ uint64_t rint_13:1;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t rint_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+ } cn38xx;
+ struct cvmx_npi_rsl_int_blocks_cn50xx {
+ uint64_t reserved_31_63:33;
+ uint64_t iob:1;
+ uint64_t lmc1:1;
+ uint64_t agl:1;
+ uint64_t reserved_24_27:4;
+ uint64_t asx1:1;
+ uint64_t asx0:1;
+ uint64_t reserved_21_21:1;
+ uint64_t pip:1;
+ uint64_t spx1:1;
+ uint64_t spx0:1;
+ uint64_t lmc:1;
+ uint64_t l2c:1;
+ uint64_t reserved_15_15:1;
+ uint64_t rad:1;
+ uint64_t usb:1;
+ uint64_t pow:1;
+ uint64_t tim:1;
+ uint64_t pko:1;
+ uint64_t ipd:1;
+ uint64_t reserved_8_8:1;
+ uint64_t zip:1;
+ uint64_t dfa:1;
+ uint64_t fpa:1;
+ uint64_t key:1;
+ uint64_t npi:1;
+ uint64_t gmx1:1;
+ uint64_t gmx0:1;
+ uint64_t mio:1;
+ } cn50xx;
+};
+
+typedef union {
+ uint64_t u64;
+ struct {
+ uint64_t total_bytes:16;
+ uint64_t segs:6;
+ uint64_t dontfree:1;
+ uint64_t ignore_i:1;
+ uint64_t ipoffp1:7;
+ uint64_t gather:1;
+ uint64_t rsp:1;
+ uint64_t wqp:1;
+ uint64_t n2:1;
+ uint64_t le:1;
+ uint64_t reg0:11;
+ uint64_t subone0:1;
+ uint64_t reg1:11;
+ uint64_t subone1:1;
+ uint64_t size0:2;
+ uint64_t size1:2;
+ } s;
+} cvmx_pko_command_word0_t;
+
+union cvmx_ciu_timx {
+ uint64_t u64;
+ struct cvmx_ciu_timx_s {
+ uint64_t reserved_37_63:27;
+ uint64_t one_shot:1;
+ uint64_t len:36;
+ } s;
+};
+
+union cvmx_gmxx_rxx_rx_inbnd {
+ uint64_t u64;
+ struct cvmx_gmxx_rxx_rx_inbnd_s {
+ uint64_t status:1;
+ uint64_t speed:2;
+ uint64_t duplex:1;
+ uint64_t reserved_4_63:60;
+ } s;
+};
+
+static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
+ int32_t value)
+{
+ return value;
+}
+
+static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
+{ }
+
+static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
+{ }
+
+static inline uint64_t cvmx_scratch_read64(uint64_t address)
+{
+ return 0;
+}
+
+static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
+{ }
+
+static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work)
+{
+ return 0;
+}
+
+static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
+{
+ return (void *)(physical_address);
+}
+
+static inline uint64_t cvmx_ptr_to_phys(void *ptr)
+{
+ return (unsigned long)ptr;
+}
+
+static inline int cvmx_helper_get_interface_num(int ipd_port)
+{
+ return ipd_port;
+}
+
+static inline int cvmx_helper_get_interface_index_num(int ipd_port)
+{
+ return ipd_port;
+}
+
+static inline void cvmx_fpa_enable(void)
+{ }
+
+static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
+{
+ return 0;
+}
+
+static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
+{ }
+
+static inline int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
+{
+ return 0;
+}
+
+static inline void *cvmx_fpa_alloc(uint64_t pool)
+{
+ return NULL;
+}
+
+static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
+ uint64_t num_cache_lines)
+{ }
+
+static inline int octeon_is_simulation(void)
+{
+ return 1;
+}
+
+static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
+ cvmx_pip_port_status_t *status)
+{ }
+
+static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
+ cvmx_pko_port_status_t *status)
+{ }
+
+static inline cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
+ interface)
+{
+ return 0;
+}
+
+static inline cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
+{
+ cvmx_helper_link_info_t ret = { .u64 = 0 };
+
+ return ret;
+}
+
+static inline int cvmx_helper_link_set(int ipd_port,
+ cvmx_helper_link_info_t link_info)
+{
+ return 0;
+}
+
+static inline int cvmx_helper_initialize_packet_io_global(void)
+{
+ return 0;
+}
+
+static inline int cvmx_helper_get_number_of_interfaces(void)
+{
+ return 2;
+}
+
+static inline int cvmx_helper_ports_on_interface(int interface)
+{
+ return 1;
+}
+
+static inline int cvmx_helper_get_ipd_port(int interface, int port)
+{
+ return 0;
+}
+
+static inline int cvmx_helper_ipd_and_packet_input_enable(void)
+{
+ return 0;
+}
+
+static inline void cvmx_ipd_disable(void)
+{ }
+
+static inline void cvmx_ipd_free_ptr(void)
+{ }
+
+static inline void cvmx_pko_disable(void)
+{ }
+
+static inline void cvmx_pko_shutdown(void)
+{ }
+
+static inline int cvmx_pko_get_base_queue_per_core(int port, int core)
+{
+ return port;
+}
+
+static inline int cvmx_pko_get_base_queue(int port)
+{
+ return port;
+}
+
+static inline int cvmx_pko_get_num_queues(int port)
+{
+ return port;
+}
+
+static inline unsigned int cvmx_get_core_num(void)
+{
+ return 0;
+}
+
+static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
+ cvmx_pow_wait_t wait)
+{ }
+
+static inline void cvmx_pow_work_request_async(int scr_addr,
+ cvmx_pow_wait_t wait)
+{ }
+
+static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr)
+{
+ cvmx_wqe_t *wqe = (void *)(unsigned long)scr_addr;
+
+ return wqe;
+}
+
+static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
+{
+ return (void *)(unsigned long)wait;
+}
+
+static inline int cvmx_spi_restart_interface(int interface,
+ cvmx_spi_mode_t mode, int timeout)
+{
+ return 0;
+}
+
+static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
+ cvmx_fau_reg_32_t reg,
+ int32_t value)
+{ }
+
+static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
+ int interface,
+ int port)
+{
+ union cvmx_gmxx_rxx_rx_inbnd r;
+ r.u64 = 0;
+ return r;
+}
+
+static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
+ cvmx_pko_lock_t use_locking)
+{ }
+
+static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(uint64_t port,
+ uint64_t queue, cvmx_pko_command_word0_t pko_command,
+ union cvmx_buf_ptr packet, cvmx_pko_lock_t use_locking)
+{
+ cvmx_pko_status_t ret = 0;
+
+ return ret;
+}
+
+static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port)
+{ }
+
+static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos)
+{ }
+
+static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work)
+{
+ return 0;
+}
+
+static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp)
+{ }
+
+static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
+ enum cvmx_pow_tag_type tag_type,
+ uint64_t qos, uint64_t grp)
+{ }
+
+#define CVMX_ASXX_RX_CLK_SETX(a, b) ((a)+(b))
+#define CVMX_ASXX_TX_CLK_SETX(a, b) ((a)+(b))
+#define CVMX_CIU_TIMX(a) (a)
+#define CVMX_GMXX_RXX_ADR_CAM0(a, b) ((a)+(b))
+#define CVMX_GMXX_RXX_ADR_CAM1(a, b) ((a)+(b))
+#define CVMX_GMXX_RXX_ADR_CAM2(a, b) ((a)+(b))
+#define CVMX_GMXX_RXX_ADR_CAM3(a, b) ((a)+(b))
+#define CVMX_GMXX_RXX_ADR_CAM4(a, b) ((a)+(b))
+#define CVMX_GMXX_RXX_ADR_CAM5(a, b) ((a)+(b))
+#define CVMX_GMXX_RXX_FRM_CTL(a, b) ((a)+(b))
+#define CVMX_GMXX_RXX_INT_REG(a, b) ((a)+(b))
+#define CVMX_GMXX_SMACX(a, b) ((a)+(b))
+#define CVMX_PIP_PRT_TAGX(a) (a)
+#define CVMX_POW_PP_GRP_MSKX(a) (a)
+#define CVMX_POW_WQ_INT_THRX(a) (a)
+#define CVMX_SPXX_INT_MSK(a) (a)
+#define CVMX_SPXX_INT_REG(a) (a)
+#define CVMX_SSO_PPX_GRP_MSK(a) (a)
+#define CVMX_SSO_WQ_INT_THRX(a) (a)
+#define CVMX_STXX_INT_MSK(a) (a)
+#define CVMX_STXX_INT_REG(a) (a)
diff --git a/drivers/staging/qlge/Kconfig b/drivers/staging/qlge/Kconfig
new file mode 100644
index 000000000000..a3cb25a3ab80
--- /dev/null
+++ b/drivers/staging/qlge/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config QLGE
+ tristate "QLogic QLGE 10Gb Ethernet Driver Support"
+ depends on ETHERNET && PCI
+ help
+ This driver supports QLogic ISP8XXX 10Gb Ethernet cards.
+
+ To compile this driver as a module, choose M here. The module will be
+ called qlge.
diff --git a/drivers/net/ethernet/qlogic/qlge/Makefile b/drivers/staging/qlge/Makefile
index 1dc2568e820c..1dc2568e820c 100644
--- a/drivers/net/ethernet/qlogic/qlge/Makefile
+++ b/drivers/staging/qlge/Makefile
diff --git a/drivers/staging/qlge/TODO b/drivers/staging/qlge/TODO
new file mode 100644
index 000000000000..51c509084e80
--- /dev/null
+++ b/drivers/staging/qlge/TODO
@@ -0,0 +1,46 @@
+* reception stalls permanently (until admin intervention) if the rx buffer
+ queues become empty because of allocation failures (ex. under memory
+ pressure)
+* commit 7c734359d350 ("qlge: Size RX buffers based on MTU.", v2.6.33-rc1)
+ introduced dead code in the receive routines, which should be rewritten
+ anyways by the admission of the author himself, see the comment above
+ ql_build_rx_skb(). That function is now used exclusively to handle packets
+ that underwent header splitting but it still contains code to handle non
+ split cases.
+* truesize accounting is incorrect (ex: a 9000B frame has skb->truesize 10280
+ while containing two frags of order-1 allocations, ie. >16K)
+* while in that area, using two 8k buffers to store one 9k frame is a poor
+ choice of buffer size.
+* in the "chain of large buffers" case, the driver uses an skb allocated with
+ head room but only puts data in the frags.
+* rename "rx" queues to "completion" queues. Calling tx completion queues "rx
+ queues" is confusing.
+* struct rx_ring is used for rx and tx completions, with some members relevant
+ to one case only
+* there is an inordinate amount of disparate debugging code, most of which is
+ of questionable value. In particular, qlge_dbg.c has hundreds of lines of
+ code bitrotting away in ifdef land (doesn't compile since commit
+ 18c49b91777c ("qlge: do vlan cleanup", v3.1-rc1), 8 years ago).
+* triggering an ethtool regdump will hexdump a 176k struct to dmesg depending
+ on some module parameters.
+* the flow control implementation in firmware is buggy (sends a flood of pause
+ frames, resets the link, device and driver buffer queues become
+ desynchronized), disable it by default
+* some structures are initialized redundantly (ex. memset 0 after
+ alloc_etherdev())
+* the driver has a habit of using runtime checks where compile time checks are
+ possible (ex. ql_free_rx_buffers(), ql_alloc_rx_buffers())
+* reorder struct members to avoid holes if it doesn't impact performance
+* in terms of namespace, the driver uses either qlge_, ql_ (used by
+ other qlogic drivers, with clashes, ex: ql_sem_spinlock) or nothing (with
+ clashes, ex: struct ob_mac_iocb_req). Rename everything to use the "qlge_"
+ prefix.
+* avoid legacy/deprecated apis (ex. replace pci_dma_*, replace pci_enable_msi,
+ use pci_iomap)
+* some "while" loops could be rewritten with simple "for", ex.
+ ql_wait_reg_rdy(), ql_start_rx_ring())
+* remove duplicate and useless comments
+* fix weird line wrapping (all over, ex. the ql_set_routing_reg() calls in
+ qlge_set_multicast_list()).
+* fix weird indentation (all over, ex. the for loops in qlge_get_stats())
+* fix checkpatch issues
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/staging/qlge/qlge.h
index ad7c5eb8a3b6..ad7c5eb8a3b6 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/staging/qlge/qlge.h
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/staging/qlge/qlge_dbg.c
index 31389ab8bdf7..31389ab8bdf7 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/staging/qlge/qlge_dbg.c
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/staging/qlge/qlge_ethtool.c
index a6886cc5654c..a6886cc5654c 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/staging/qlge/qlge_ethtool.c
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 6cae33072496..6cae33072496 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c
index 957c72985a06..957c72985a06 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
+++ b/drivers/staging/qlge/qlge_mpi.c
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 40dd573e73c3..1d1440d43002 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -284,9 +284,9 @@ static int visor_copy_fragsinfo_from_skb(struct sk_buff *skb,
for (frag = 0; frag < numfrags; frag++) {
count = add_physinfo_entries(page_to_pfn(
skb_frag_page(&skb_shinfo(skb)->frags[frag])),
- skb_shinfo(skb)->frags[frag].page_offset,
- skb_shinfo(skb)->frags[frag].size, count,
- frags_max, frags);
+ skb_frag_off(&skb_shinfo(skb)->frags[frag]),
+ skb_frag_size(&skb_shinfo(skb)->frags[frag]),
+ count, frags_max, frags);
/* add_physinfo_entries only returns
* zero if the frags array is out of room
* That should never happen because we
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
index 24309d937d8c..fcdc4211e3c2 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -899,9 +899,9 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx];
sg_init_table(&ccmd->sg, 1);
- sg_set_page(&ccmd->sg, dfrag->page.p, skb_frag_size(dfrag),
- dfrag->page_offset);
- get_page(dfrag->page.p);
+ sg_set_page(&ccmd->sg, skb_frag_page(dfrag),
+ skb_frag_size(dfrag), skb_frag_off(dfrag));
+ get_page(skb_frag_page(dfrag));
cmd->se_cmd.t_data_sg = &ccmd->sg;
cmd->se_cmd.t_data_nents = 1;
@@ -1403,7 +1403,8 @@ static void cxgbit_lro_skb_dump(struct sk_buff *skb)
pdu_cb->ddigest, pdu_cb->frags);
for (i = 0; i < ssi->nr_frags; i++)
pr_info("skb 0x%p, frag %d, off %u, sz %u.\n",
- skb, i, ssi->frags[i].page_offset, ssi->frags[i].size);
+ skb, i, skb_frag_off(&ssi->frags[i]),
+ skb_frag_size(&ssi->frags[i]));
}
static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk)
@@ -1447,7 +1448,7 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
hpdu_cb->frags++;
hpdu_cb->hfrag_idx = hfrag_idx;
- len = hssi->frags[hfrag_idx].size;
+ len = skb_frag_size(&hssi->frags[hfrag_idx]);
hskb->len += len;
hskb->data_len += len;
hskb->truesize += len;
@@ -1467,7 +1468,7 @@ cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
get_page(skb_frag_page(&hssi->frags[dfrag_idx]));
- len += hssi->frags[dfrag_idx].size;
+ len += skb_frag_size(&hssi->frags[dfrag_idx]);
hssi->nr_frags++;
hpdu_cb->frags++;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 6a50e1d0529c..9f57736fe15e 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -102,7 +102,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct iov_iter iov_iter;
unsigned out, in;
size_t nbytes;
- size_t len;
+ size_t iov_len, payload_len;
int head;
spin_lock_bh(&vsock->send_pkt_list_lock);
@@ -147,8 +147,24 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
break;
}
- len = iov_length(&vq->iov[out], in);
- iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
+ iov_len = iov_length(&vq->iov[out], in);
+ if (iov_len < sizeof(pkt->hdr)) {
+ virtio_transport_free_pkt(pkt);
+ vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
+ break;
+ }
+
+ iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
+ payload_len = pkt->len - pkt->off;
+
+ /* If the packet is greater than the space available in the
+ * buffer, we split it using multiple buffers.
+ */
+ if (payload_len > iov_len - sizeof(pkt->hdr))
+ payload_len = iov_len - sizeof(pkt->hdr);
+
+ /* Set the correct length in the header */
+ pkt->hdr.len = cpu_to_le32(payload_len);
nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
if (nbytes != sizeof(pkt->hdr)) {
@@ -157,33 +173,47 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
break;
}
- nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
- if (nbytes != pkt->len) {
+ nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
+ &iov_iter);
+ if (nbytes != payload_len) {
virtio_transport_free_pkt(pkt);
vq_err(vq, "Faulted on copying pkt buf\n");
break;
}
- vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
+ vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
added = true;
- if (pkt->reply) {
- int val;
-
- val = atomic_dec_return(&vsock->queued_replies);
-
- /* Do we have resources to resume tx processing? */
- if (val + 1 == tx_vq->num)
- restart_tx = true;
- }
-
/* Deliver to monitoring devices all correctly transmitted
* packets.
*/
virtio_transport_deliver_tap_pkt(pkt);
- total_len += pkt->len;
- virtio_transport_free_pkt(pkt);
+ pkt->off += payload_len;
+ total_len += payload_len;
+
+ /* If we didn't send all the payload we can requeue the packet
+ * to send it with the next available buffer.
+ */
+ if (pkt->off < pkt->len) {
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+ list_add(&pkt->list, &vsock->send_pkt_list);
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
+ } else {
+ if (pkt->reply) {
+ int val;
+
+ val = atomic_dec_return(&vsock->queued_replies);
+
+ /* Do we have resources to resume tx
+ * processing?
+ */
+ if (val + 1 == tx_vq->num)
+ restart_tx = true;
+ }
+
+ virtio_transport_free_pkt(pkt);
+ }
} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
if (added)
vhost_signal(&vsock->dev, vq);
@@ -329,6 +359,8 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
return NULL;
}
+ pkt->buf_len = pkt->len;
+
nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
if (nbytes != pkt->len) {
vq_err(vq, "Expected %u byte payload, got %zu bytes\n",