aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/lanai.c3
-rw-r--r--drivers/atm/nicstar.c24
-rw-r--r--drivers/bus/mhi/core/main.c11
-rw-r--r--drivers/crypto/caam/qi.c15
-rw-r--r--drivers/infiniband/core/nldev.c10
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h27
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c15
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c2
-rw-r--r--drivers/isdn/capi/capi.c1
-rw-r--r--drivers/media/pci/ttpci/av7110_av.c1
-rw-r--r--drivers/net/Kconfig9
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/bareudp.c4
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/bonding/bond_procfs.c1
-rw-r--r--drivers/net/can/at91_can.c14
-rw-r--r--drivers/net/can/c_can/c_can.c20
-rw-r--r--drivers/net/can/cc770/cc770.c14
-rw-r--r--drivers/net/can/dev.c16
-rw-r--r--drivers/net/can/flexcan.c161
-rw-r--r--drivers/net/can/grcan.c10
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c10
-rw-r--r--drivers/net/can/janz-ican3.c20
-rw-r--r--drivers/net/can/kvaser_pciefd.c10
-rw-r--r--drivers/net/can/m_can/Kconfig8
-rw-r--r--drivers/net/can/m_can/m_can.c28
-rw-r--r--drivers/net/can/m_can/m_can.h1
-rw-r--r--drivers/net/can/m_can/m_can_platform.c2
-rw-r--r--drivers/net/can/m_can/tcan4x5x.c28
-rw-r--r--drivers/net/can/mscan/mscan.c20
-rw-r--r--drivers/net/can/pch_can.c14
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c16
-rw-r--r--drivers/net/can/rcar/rcar_can.c14
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c12
-rw-r--r--drivers/net/can/rx-offload.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c16
-rw-r--r--drivers/net/can/slcan.c32
-rw-r--r--drivers/net/can/softing/softing_fw.c2
-rw-r--r--drivers/net/can/softing/softing_main.c14
-rw-r--r--drivers/net/can/spi/hi311x.c20
-rw-r--r--drivers/net/can/spi/mcp251x.c20
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c151
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h30
-rw-r--r--drivers/net/can/sun4i_can.c10
-rw-r--r--drivers/net/can/ti_hecc.c8
-rw-r--r--drivers/net/can/usb/Kconfig5
-rw-r--r--drivers/net/can/usb/ems_usb.c16
-rw-r--r--drivers/net/can/usb/esd_usb2.c24
-rw-r--r--drivers/net/can/usb/gs_usb.c12
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c22
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c61
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c22
-rw-r--r--drivers/net/can/usb/mcba_usb.c10
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c18
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c9
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c29
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c14
-rw-r--r--drivers/net/can/usb/ucan.c20
-rw-r--r--drivers/net/can/usb/usb_8dev.c17
-rw-r--r--drivers/net/can/vxcan.c4
-rw-r--r--drivers/net/can/xilinx_can.c16
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/hirschmann/Kconfig9
-rw-r--r--drivers/net/dsa/hirschmann/Makefile5
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c1339
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h286
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c479
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h58
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c452
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.h76
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c71
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h10
-rw-r--r--drivers/net/dsa/microchip/ksz8795_spi.c6
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c14
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c6
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c8
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h2
-rw-r--r--drivers/net/dsa/mt7530.c51
-rw-r--r--drivers/net/dsa/mt7530.h12
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c66
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h10
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c105
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c36
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h3
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c123
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h9
-rw-r--r--drivers/net/dsa/ocelot/felix.c27
-rw-r--r--drivers/net/dummy.c2
-rw-r--r--drivers/net/ethernet/8390/mac8390.c7
-rw-r--r--drivers/net/ethernet/8390/ne.c2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c33
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c1
-rw-r--r--drivers/net/ethernet/cadence/macb.h44
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c151
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h1
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c3
-rw-r--r--drivers/net/ethernet/davicom/Kconfig2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c122
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c493
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h13
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c51
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h55
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c10
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c84
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c10
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c63
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c233
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h38
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c72
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h34
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c167
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c103
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c56
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c12
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c399
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h27
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c17
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c65
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h23
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c139
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h223
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h137
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h101
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c377
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h98
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c233
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c775
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c507
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c785
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c1336
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h150
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h75
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c820
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c307
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c5
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c170
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c501
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h205
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c2328
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c9
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c77
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c294
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h31
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c38
-rw-r--r--drivers/net/ethernet/neterion/s2io.c41
-rw-r--r--drivers/net/ethernet/neterion/s2io.h4
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c9
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c27
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_fw.c14
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c122
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c18
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c143
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h58
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c21
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c66
-rw-r--r--drivers/net/ethernet/smsc/Kconfig6
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c17
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c9
-rw-r--r--drivers/net/ethernet/socionext/netsec.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c54
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c22
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c355
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h5
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c41
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h1
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c2
-rw-r--r--drivers/net/ethernet/ti/tlan.c98
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c115
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c56
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c14
-rw-r--r--drivers/net/fddi/skfp/drvfbi.c4
-rw-r--r--drivers/net/fddi/skfp/ecm.c7
-rw-r--r--drivers/net/fddi/skfp/ess.c1
-rw-r--r--drivers/net/fddi/skfp/hwt.c4
-rw-r--r--drivers/net/fddi/skfp/pcmplc.c4
-rw-r--r--drivers/net/fddi/skfp/pmf.c4
-rw-r--r--drivers/net/fddi/skfp/queue.c4
-rw-r--r--drivers/net/fddi/skfp/rmt.c4
-rw-r--r--drivers/net/fddi/skfp/smtdef.c4
-rw-r--r--drivers/net/fddi/skfp/smtinit.c4
-rw-r--r--drivers/net/fddi/skfp/smttimer.c4
-rw-r--r--drivers/net/fddi/skfp/srf.c5
-rw-r--r--drivers/net/geneve.c3
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c1
-rw-r--r--drivers/net/hyperv/rndis_filter.c1
-rw-r--r--drivers/net/ieee802154/ca8210.c22
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/ipa/gsi.c499
-rw-r--r--drivers/net/ipa/gsi.h52
-rw-r--r--drivers/net/ipa/gsi_reg.h159
-rw-r--r--drivers/net/ipa/ipa_clock.c47
-rw-r--r--drivers/net/ipa/ipa_clock.h5
-rw-r--r--drivers/net/ipa/ipa_cmd.c6
-rw-r--r--drivers/net/ipa/ipa_cmd.h21
-rw-r--r--drivers/net/ipa/ipa_data-sc7180.c25
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c29
-rw-r--r--drivers/net/ipa/ipa_data.h43
-rw-r--r--drivers/net/ipa/ipa_endpoint.c258
-rw-r--r--drivers/net/ipa/ipa_endpoint.h2
-rw-r--r--drivers/net/ipa/ipa_interrupt.c6
-rw-r--r--drivers/net/ipa/ipa_interrupt.h16
-rw-r--r--drivers/net/ipa/ipa_main.c333
-rw-r--r--drivers/net/ipa/ipa_mem.c10
-rw-r--r--drivers/net/ipa/ipa_qmi.c8
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h12
-rw-r--r--drivers/net/ipa/ipa_reg.h486
-rw-r--r--drivers/net/ipa/ipa_table.c4
-rw-r--r--drivers/net/ipa/ipa_uc.c46
-rw-r--r--drivers/net/ipa/ipa_version.h1
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/macvlan.c44
-rw-r--r--drivers/net/mhi_net.c316
-rw-r--r--drivers/net/mii.c20
-rw-r--r--drivers/net/net_failover.c2
-rw-r--r--drivers/net/netconsole.c1
-rw-r--r--drivers/net/netdevsim/dev.c8
-rw-r--r--drivers/net/netdevsim/ethtool.c82
-rw-r--r--drivers/net/netdevsim/fib.c265
-rw-r--r--drivers/net/netdevsim/netdevsim.h10
-rw-r--r--drivers/net/nlmon.c1
-rw-r--r--drivers/net/phy/adin.c195
-rw-r--r--drivers/net/phy/amd.c37
-rw-r--r--drivers/net/phy/aquantia_main.c59
-rw-r--r--drivers/net/phy/at803x.c50
-rw-r--r--drivers/net/phy/bcm-cygnus.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.c49
-rw-r--r--drivers/net/phy/bcm-phy-lib.h1
-rw-r--r--drivers/net/phy/bcm54140.c46
-rw-r--r--drivers/net/phy/bcm63xx.c20
-rw-r--r--drivers/net/phy/bcm87xx.c50
-rw-r--r--drivers/net/phy/broadcom.c70
-rw-r--r--drivers/net/phy/cicada.c35
-rw-r--r--drivers/net/phy/davicom.c63
-rw-r--r--drivers/net/phy/dp83640.c43
-rw-r--r--drivers/net/phy/dp83822.c54
-rw-r--r--drivers/net/phy/dp83848.c47
-rw-r--r--drivers/net/phy/dp83867.c44
-rw-r--r--drivers/net/phy/dp83869.c42
-rw-r--r--drivers/net/phy/dp83tc811.c53
-rw-r--r--drivers/net/phy/icplus.c58
-rw-r--r--drivers/net/phy/intel-xway.c71
-rw-r--r--drivers/net/phy/lxt.c94
-rw-r--r--drivers/net/phy/marvell.c204
-rw-r--r--drivers/net/phy/mdio_bus.c9
-rw-r--r--drivers/net/phy/meson-gxl.c37
-rw-r--r--drivers/net/phy/micrel.c65
-rw-r--r--drivers/net/phy/microchip.c24
-rw-r--r--drivers/net/phy/microchip_t1.c29
-rw-r--r--drivers/net/phy/mscc/mscc_main.c70
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c18
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.h5
-rw-r--r--drivers/net/phy/national.c58
-rw-r--r--drivers/net/phy/nxp-tja11xx.c42
-rw-r--r--drivers/net/phy/phy-c45.c2
-rw-r--r--drivers/net/phy/phy.c56
-rw-r--r--drivers/net/phy/phy_device.c39
-rw-r--r--drivers/net/phy/phy_led_triggers.c16
-rw-r--r--drivers/net/phy/phylink.c5
-rw-r--r--drivers/net/phy/qsemi.c42
-rw-r--r--drivers/net/phy/realtek.c181
-rw-r--r--drivers/net/phy/smsc.c55
-rw-r--r--drivers/net/phy/ste10Xp.c53
-rw-r--r--drivers/net/phy/vitesse.c61
-rw-r--r--drivers/net/team/team.c10
-rw-r--r--drivers/net/tun.c132
-rw-r--r--drivers/net/usb/Kconfig9
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/aqc111.c2
-rw-r--r--drivers/net/usb/asix_devices.c6
-rw-r--r--drivers/net/usb/ax88172a.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/int51x1.c2
-rw-r--r--drivers/net/usb/lan78xx.c168
-rw-r--r--drivers/net/usb/mcs7830.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c41
-rw-r--r--drivers/net/usb/r8152.c40
-rw-r--r--drivers/net/usb/r8153_ecm.c162
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/sierra_net.c2
-rw-r--r--drivers/net/usb/smsc75xx.c2
-rw-r--r--drivers/net/usb/smsc95xx.c2
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/usb/sr9800.c2
-rw-r--r--drivers/net/usb/usbnet.c23
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/vrf.c1
-rw-r--r--drivers/net/vsockmon.c1
-rw-r--r--drivers/net/vxlan.c29
-rw-r--r--drivers/net/wan/Kconfig60
-rw-r--r--drivers/net/wan/Makefile3
-rw-r--r--drivers/net/wan/dlci.c541
-rw-r--r--drivers/net/wan/hdlc_fr.c118
-rw-r--r--drivers/net/wan/lmc/lmc_main.c9
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wan/sdla.c1655
-rw-r--r--drivers/net/wan/x25_asy.c836
-rw-r--r--drivers/net/wan/x25_asy.h46
-rw-r--r--drivers/net/wimax/Kconfig18
-rw-r--r--drivers/net/wimax/Makefile2
-rw-r--r--drivers/net/wireguard/device.c2
-rw-r--r--drivers/net/wireless/Kconfig13
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c1
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c78
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00config.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c3
-rw-r--r--drivers/net/wireless/rndis_wlan.c2
-rw-r--r--drivers/net/xen-netfront.c3
-rw-r--r--drivers/nfc/nxp-nci/i2c.c2
-rw-r--r--drivers/nfc/s3fwrn5/Kconfig12
-rw-r--r--drivers/nfc/s3fwrn5/Makefile4
-rw-r--r--drivers/nfc/s3fwrn5/core.c3
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c2
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c119
-rw-r--r--drivers/nfc/s3fwrn5/phy_common.c75
-rw-r--r--drivers/nfc/s3fwrn5/phy_common.h37
-rw-r--r--drivers/nfc/s3fwrn5/s3fwrn5.h11
-rw-r--r--drivers/nfc/s3fwrn5/uart.c196
-rw-r--r--drivers/ptp/ptp_idt82p33.c274
-rw-r--r--drivers/ptp/ptp_idt82p33.h3
-rw-r--r--drivers/ptp/ptp_ines.c19
-rw-r--r--drivers/s390/net/ctcm_fsms.c15
-rw-r--r--drivers/s390/net/ctcm_main.c68
-rw-r--r--drivers/s390/net/ctcm_main.h5
-rw-r--r--drivers/s390/net/ctcm_mpc.c39
-rw-r--r--drivers/s390/net/qeth_core.h22
-rw-r--r--drivers/s390/net/qeth_core_main.c223
-rw-r--r--drivers/s390/net/qeth_core_mpc.h40
-rw-r--r--drivers/s390/net/qeth_ethtool.c243
-rw-r--r--drivers/s390/net/qeth_l2_main.c33
-rw-r--r--drivers/s390/net/qeth_l3_main.c5
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c1
-rw-r--r--drivers/soc/fsl/qbman/qman.c12
-rw-r--r--drivers/soc/fsl/qbman/qman_test_api.c6
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c6
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c2
-rw-r--r--drivers/staging/wimax/Documentation/i2400m.rst283
-rw-r--r--drivers/staging/wimax/Documentation/index.rst19
-rw-r--r--drivers/staging/wimax/Documentation/wimax.rst89
-rw-r--r--drivers/staging/wimax/Kconfig46
-rw-r--r--drivers/staging/wimax/Makefile15
-rw-r--r--drivers/staging/wimax/TODO18
-rw-r--r--drivers/staging/wimax/debug-levels.h29
-rw-r--r--drivers/staging/wimax/debugfs.c38
-rw-r--r--drivers/staging/wimax/i2400m/Kconfig (renamed from drivers/net/wimax/i2400m/Kconfig)0
-rw-r--r--drivers/staging/wimax/i2400m/Makefile (renamed from drivers/net/wimax/i2400m/Makefile)0
-rw-r--r--drivers/staging/wimax/i2400m/control.c (renamed from drivers/net/wimax/i2400m/control.c)2
-rw-r--r--drivers/staging/wimax/i2400m/debug-levels.h (renamed from drivers/net/wimax/i2400m/debug-levels.h)2
-rw-r--r--drivers/staging/wimax/i2400m/debugfs.c (renamed from drivers/net/wimax/i2400m/debugfs.c)0
-rw-r--r--drivers/staging/wimax/i2400m/driver.c (renamed from drivers/net/wimax/i2400m/driver.c)2
-rw-r--r--drivers/staging/wimax/i2400m/fw.c (renamed from drivers/net/wimax/i2400m/fw.c)0
-rw-r--r--drivers/staging/wimax/i2400m/i2400m-usb.h (renamed from drivers/net/wimax/i2400m/i2400m-usb.h)0
-rw-r--r--drivers/staging/wimax/i2400m/i2400m.h (renamed from drivers/net/wimax/i2400m/i2400m.h)4
-rw-r--r--drivers/staging/wimax/i2400m/linux-wimax-i2400m.h572
-rw-r--r--drivers/staging/wimax/i2400m/netdev.c (renamed from drivers/net/wimax/i2400m/netdev.c)0
-rw-r--r--drivers/staging/wimax/i2400m/op-rfkill.c (renamed from drivers/net/wimax/i2400m/op-rfkill.c)2
-rw-r--r--drivers/staging/wimax/i2400m/rx.c (renamed from drivers/net/wimax/i2400m/rx.c)0
-rw-r--r--drivers/staging/wimax/i2400m/sysfs.c (renamed from drivers/net/wimax/i2400m/sysfs.c)0
-rw-r--r--drivers/staging/wimax/i2400m/tx.c (renamed from drivers/net/wimax/i2400m/tx.c)0
-rw-r--r--drivers/staging/wimax/i2400m/usb-debug-levels.h (renamed from drivers/net/wimax/i2400m/usb-debug-levels.h)2
-rw-r--r--drivers/staging/wimax/i2400m/usb-fw.c (renamed from drivers/net/wimax/i2400m/usb-fw.c)0
-rw-r--r--drivers/staging/wimax/i2400m/usb-notif.c (renamed from drivers/net/wimax/i2400m/usb-notif.c)0
-rw-r--r--drivers/staging/wimax/i2400m/usb-rx.c (renamed from drivers/net/wimax/i2400m/usb-rx.c)0
-rw-r--r--drivers/staging/wimax/i2400m/usb-tx.c (renamed from drivers/net/wimax/i2400m/usb-tx.c)0
-rw-r--r--drivers/staging/wimax/i2400m/usb.c (renamed from drivers/net/wimax/i2400m/usb.c)3
-rw-r--r--drivers/staging/wimax/id-table.c130
-rw-r--r--drivers/staging/wimax/linux-wimax-debug.h491
-rw-r--r--drivers/staging/wimax/linux-wimax.h239
-rw-r--r--drivers/staging/wimax/net-wimax.h503
-rw-r--r--drivers/staging/wimax/op-msg.c391
-rw-r--r--drivers/staging/wimax/op-reset.c108
-rw-r--r--drivers/staging/wimax/op-rfkill.c431
-rw-r--r--drivers/staging/wimax/op-state-get.c52
-rw-r--r--drivers/staging/wimax/stack.c616
-rw-r--r--drivers/staging/wimax/wimax-internal.h85
521 files changed, 25026 insertions, 9406 deletions
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index ac811cfa6843..d7277c26e423 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -765,8 +765,7 @@ static void lanai_shutdown_tx_vci(struct lanai_dev *lanai,
struct sk_buff *skb;
unsigned long flags, timeout;
int read, write, lastread = -1;
- APRINTK(!in_interrupt(),
- "lanai_shutdown_tx_vci called w/o process context!\n");
+
if (lvcc->vbase == NULL) /* We were never bound to a VCI */
return;
/* 15.2.1 - wait for queue to drain */
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 09ad73361879..5c7e4df159b9 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -130,8 +130,9 @@ static int ns_open(struct atm_vcc *vcc);
static void ns_close(struct atm_vcc *vcc);
static void fill_tst(ns_dev * card, int n, vc_map * vc);
static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb);
+static int ns_send_bh(struct atm_vcc *vcc, struct sk_buff *skb);
static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
- struct sk_buff *skb);
+ struct sk_buff *skb, bool may_sleep);
static void process_tsq(ns_dev * card);
static void drain_scq(ns_dev * card, scq_info * scq, int pos);
static void process_rsq(ns_dev * card);
@@ -160,6 +161,7 @@ static const struct atmdev_ops atm_ops = {
.close = ns_close,
.ioctl = ns_ioctl,
.send = ns_send,
+ .send_bh = ns_send_bh,
.phy_put = ns_phy_put,
.phy_get = ns_phy_get,
.proc_read = ns_proc_read,
@@ -1620,7 +1622,7 @@ static void fill_tst(ns_dev * card, int n, vc_map * vc)
card->tst_addr = new_tst;
}
-static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+static int _ns_send(struct atm_vcc *vcc, struct sk_buff *skb, bool may_sleep)
{
ns_dev *card;
vc_map *vc;
@@ -1704,7 +1706,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
scq = card->scq0;
}
- if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
+ if (push_scqe(card, vc, scq, &scqe, skb, may_sleep) != 0) {
atomic_inc(&vcc->stats->tx_err);
dma_unmap_single(&card->pcidev->dev, NS_PRV_DMA(skb), skb->len,
DMA_TO_DEVICE);
@@ -1716,8 +1718,18 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
return 0;
}
+static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ return _ns_send(vcc, skb, true);
+}
+
+static int ns_send_bh(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ return _ns_send(vcc, skb, false);
+}
+
static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool may_sleep)
{
unsigned long flags;
ns_scqe tsr;
@@ -1728,7 +1740,7 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
spin_lock_irqsave(&scq->lock, flags);
while (scq->tail == scq->next) {
- if (in_interrupt()) {
+ if (!may_sleep) {
spin_unlock_irqrestore(&scq->lock, flags);
printk("nicstar%d: Error pushing TBD.\n", card->index);
return 1;
@@ -1773,7 +1785,7 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
int has_run = 0;
while (scq->tail == scq->next) {
- if (in_interrupt()) {
+ if (!may_sleep) {
data = scq_virt_to_bus(scq, scq->next);
ns_write_sram(card, scq->scd, &data, 1);
spin_unlock_irqrestore(&scq->lock, flags);
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index 2cff5ddff225..ba9e721d61b7 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -1165,6 +1165,17 @@ int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
}
EXPORT_SYMBOL_GPL(mhi_queue_buf);
+bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
+{
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
+ mhi_dev->ul_chan : mhi_dev->dl_chan;
+ struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
+
+ return mhi_is_ring_full(mhi_cntrl, tre_ring);
+}
+EXPORT_SYMBOL_GPL(mhi_queue_is_full);
+
int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan,
enum mhi_cmd_type cmd)
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index ec53528d8205..8163f5df8ebf 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -545,14 +545,10 @@ static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
}
}
-static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
+static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np,
+ bool sched_napi)
{
- /*
- * In case of threaded ISR, for RT kernels in_irq() does not return
- * appropriate value, so use in_serving_softirq to distinguish between
- * softirq and irq contexts.
- */
- if (unlikely(in_irq() || !in_serving_softirq())) {
+ if (sched_napi) {
/* Disable QMan IRQ source and invoke NAPI */
qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
np->p = p;
@@ -564,7 +560,8 @@ static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
struct qman_fq *rsp_fq,
- const struct qm_dqrr_entry *dqrr)
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi)
{
struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
struct caam_drv_req *drv_req;
@@ -573,7 +570,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
struct caam_drv_private *priv = dev_get_drvdata(qidev);
u32 status;
- if (caam_qi_napi_schedule(p, caam_napi))
+ if (caam_qi_napi_schedule(p, caam_napi, sched_napi))
return qman_cb_dqrr_stop;
fd = &dqrr->fd;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 12d29d54a081..08366e254b1d 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -932,7 +932,7 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
char name[IB_DEVICE_NAME_MAX] = {};
- nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
+ nla_strscpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
IB_DEVICE_NAME_MAX);
if (strlen(name) == 0) {
err = -EINVAL;
@@ -1529,13 +1529,13 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
!tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
return -EINVAL;
- nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
+ nla_strscpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
sizeof(ibdev_name));
if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0)
return -EINVAL;
- nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
- nla_strlcpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
+ nla_strscpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
+ nla_strscpy(ndev_name, tb[RDMA_NLDEV_ATTR_NDEV_NAME],
sizeof(ndev_name));
ndev = dev_get_by_name(sock_net(skb->sk), ndev_name);
@@ -1602,7 +1602,7 @@ static int nldev_get_chardev(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err || !tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE])
return -EINVAL;
- nla_strlcpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
+ nla_strscpy(client_name, tb[RDMA_NLDEV_ATTR_CHARDEV_TYPE],
sizeof(client_name));
if (tb[RDMA_NLDEV_ATTR_DEV_INDEX]) {
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index a40701a6e1b6..0b64aa87ab73 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1686,7 +1686,6 @@ static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet)
u32 extra_bytes;
u32 tlen, qpnum;
bool do_work, do_cnp;
- struct hfi1_ipoib_dev_priv *priv;
trace_hfi1_rcvhdr(packet);
@@ -1734,8 +1733,7 @@ static void hfi1_ipoib_ib_rcv(struct hfi1_packet *packet)
if (unlikely(!skb))
goto drop;
- priv = hfi1_ipoib_priv(netdev);
- hfi1_ipoib_update_rx_netstats(priv, 1, skb->len);
+ dev_sw_netstats_rx_add(netdev, skb->len);
skb->dev = netdev;
skb->pkt_type = PACKET_HOST;
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
index b8c9d0a003fb..f650cac9d424 100644
--- a/drivers/infiniband/hw/hfi1/ipoib.h
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -110,7 +110,6 @@ struct hfi1_ipoib_dev_priv {
const struct net_device_ops *netdev_ops;
struct rvt_qp *qp;
- struct pcpu_sw_netstats __percpu *netstats;
};
/* hfi1 ipoib rdma netdev's private data structure */
@@ -126,32 +125,6 @@ hfi1_ipoib_priv(const struct net_device *dev)
return &((struct hfi1_ipoib_rdma_netdev *)netdev_priv(dev))->dev_priv;
}
-static inline void
-hfi1_ipoib_update_rx_netstats(struct hfi1_ipoib_dev_priv *priv,
- u64 packets,
- u64 bytes)
-{
- struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
-
- u64_stats_update_begin(&netstats->syncp);
- netstats->rx_packets += packets;
- netstats->rx_bytes += bytes;
- u64_stats_update_end(&netstats->syncp);
-}
-
-static inline void
-hfi1_ipoib_update_tx_netstats(struct hfi1_ipoib_dev_priv *priv,
- u64 packets,
- u64 bytes)
-{
- struct pcpu_sw_netstats *netstats = this_cpu_ptr(priv->netstats);
-
- u64_stats_update_begin(&netstats->syncp);
- netstats->tx_packets += packets;
- netstats->tx_bytes += bytes;
- u64_stats_update_end(&netstats->syncp);
-}
-
int hfi1_ipoib_send_dma(struct net_device *dev,
struct sk_buff *skb,
struct ib_ah *address,
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
index 9f71b9d706bd..3242290eb6a7 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_main.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -21,7 +21,7 @@ static int hfi1_ipoib_dev_init(struct net_device *dev)
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
int ret;
- priv->netstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
ret = priv->netdev_ops->ndo_init(dev);
if (ret)
@@ -93,21 +93,12 @@ static int hfi1_ipoib_dev_stop(struct net_device *dev)
return priv->netdev_ops->ndo_stop(dev);
}
-static void hfi1_ipoib_dev_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *storage)
-{
- struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
-
- netdev_stats_to_stats64(storage, &dev->stats);
- dev_fetch_sw_netstats(storage, priv->netstats);
-}
-
static const struct net_device_ops hfi1_ipoib_netdev_ops = {
.ndo_init = hfi1_ipoib_dev_init,
.ndo_uninit = hfi1_ipoib_dev_uninit,
.ndo_open = hfi1_ipoib_dev_open,
.ndo_stop = hfi1_ipoib_dev_stop,
- .ndo_get_stats64 = hfi1_ipoib_dev_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
};
static int hfi1_ipoib_send(struct net_device *dev,
@@ -182,7 +173,7 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev)
hfi1_ipoib_txreq_deinit(priv);
hfi1_ipoib_rxq_deinit(priv->netdev);
- free_percpu(priv->netstats);
+ free_percpu(dev->tstats);
}
static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev)
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index 9df292b51a05..edd4eeac8dd1 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -121,7 +121,7 @@ static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
struct hfi1_ipoib_dev_priv *priv = tx->priv;
if (likely(!tx->sdma_status)) {
- hfi1_ipoib_update_tx_netstats(priv, 1, tx->skb->len);
+ dev_sw_netstats_tx_add(priv->netdev, 1, tx->skb->len);
} else {
++priv->netdev->stats.tx_errors;
dd_dev_warn(priv->dd,
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 85767f52fe3c..fdf87acccd06 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -11,6 +11,7 @@
#include <linux/compiler.h>
#include <linux/module.h>
+#include <linux/ethtool.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
diff --git a/drivers/media/pci/ttpci/av7110_av.c b/drivers/media/pci/ttpci/av7110_av.c
index ea9f7d0058a2..91f4866c7e59 100644
--- a/drivers/media/pci/ttpci/av7110_av.c
+++ b/drivers/media/pci/ttpci/av7110_av.c
@@ -11,6 +11,7 @@
* the project's page is at https://linuxtv.org
*/
+#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c3dbe64e628e..4ee41924cdf1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -426,6 +426,13 @@ config VSOCKMON
mostly intended for developers or support to debug vsock issues. If
unsure, say N.
+config MHI_NET
+ tristate "MHI network driver"
+ depends on MHI_BUS
+ help
+ This is the network driver for MHI bus. It can be used with
+ QCOM based WWAN modems (like SDX55). Say Y or M.
+
endif # NET_CORE
config SUNGEM_PHY
@@ -489,8 +496,6 @@ source "drivers/net/usb/Kconfig"
source "drivers/net/wireless/Kconfig"
-source "drivers/net/wimax/Kconfig"
-
source "drivers/net/wan/Kconfig"
source "drivers/net/ieee802154/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 72e18d505d1a..36e2e41ed2aa 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_NET_VRF) += vrf.o
obj-$(CONFIG_VSOCKMON) += vsockmon.o
+obj-$(CONFIG_MHI_NET) += mhi_net.o
#
# Networking Drivers
@@ -66,7 +67,6 @@ obj-$(CONFIG_NET_SB1000) += sb1000.o
obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o
obj-$(CONFIG_WAN) += wan/
obj-$(CONFIG_WLAN) += wireless/
-obj-$(CONFIG_WIMAX) += wimax/
obj-$(CONFIG_IEEE802154) += ieee802154/
obj-$(CONFIG_VMXNET3) += vmxnet3/
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index ff0bea1554f9..85ebd2b7e446 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -510,7 +510,7 @@ static const struct net_device_ops bareudp_netdev_ops = {
.ndo_open = bareudp_open,
.ndo_stop = bareudp_stop,
.ndo_start_xmit = bareudp_xmit,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_fill_metadata_dst = bareudp_fill_metadata_dst,
};
@@ -522,7 +522,7 @@ static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = {
};
/* Info for udev, that this is a virtual tunnel endpoint */
-static struct device_type bareudp_type = {
+static const struct device_type bareudp_type = {
.name = "bareudp",
};
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 47afc5938c26..e0880a3840d7 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1228,14 +1228,14 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
}
#define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
NETIF_F_HIGHDMA | NETIF_F_LRO)
#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
+ NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_ALL_TSO)
+ NETIF_F_GSO_SOFTWARE)
static void bond_compute_features(struct bonding *bond)
@@ -1291,8 +1291,7 @@ done:
bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX |
- NETIF_F_GSO_UDP_L4;
+ NETIF_F_HW_VLAN_STAG_TX;
#ifdef CONFIG_XFRM_OFFLOAD
bond_dev->hw_enc_features |= xfrm_features;
#endif /* CONFIG_XFRM_OFFLOAD */
@@ -4746,7 +4745,7 @@ void bond_setup(struct net_device *bond_dev)
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
- bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
+ bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
#ifdef CONFIG_XFRM_OFFLOAD
bond_dev->hw_features |= BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index fd5c9cbe45b1..56d34be5e797 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/proc_fs.h>
+#include <linux/ethtool.h>
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index c14de95d2ca7..5284f0ab3b06 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -468,7 +468,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
reg_mid = at91_can_id_to_reg_mid(cf->can_id);
reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
- (cf->can_dlc << 16) | AT91_MCR_MTCR;
+ (cf->len << 16) | AT91_MCR_MTCR;
/* disable MB while writing ID (see datasheet) */
set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED);
@@ -481,7 +481,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* This triggers transmission */
at91_write(priv, AT91_MCR(mb), reg_mcr);
- stats->tx_bytes += cf->can_dlc;
+ stats->tx_bytes += cf->len;
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv));
@@ -554,7 +554,7 @@ static void at91_rx_overflow_err(struct net_device *dev)
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_receive_skb(skb);
}
@@ -580,7 +580,7 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
reg_msr = at91_read(priv, AT91_MSR(mb));
- cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf);
+ cf->len = can_cc_dlc2len((reg_msr >> 16) & 0xf);
if (reg_msr & AT91_MSR_MRTR)
cf->can_id |= CAN_RTR_FLAG;
@@ -619,7 +619,7 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
at91_read_mb(dev, mb, cf);
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_receive_skb(skb);
can_led_event(dev, CAN_LED_EVENT_RX);
@@ -780,7 +780,7 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
at91_poll_err_frame(dev, cf, reg_sr);
dev->stats.rx_packets++;
- dev->stats.rx_bytes += cf->can_dlc;
+ dev->stats.rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
@@ -1047,7 +1047,7 @@ static void at91_irq_err(struct net_device *dev)
at91_irq_err_state(dev, cf, new_state);
dev->stats.rx_packets++;
- dev->stats.rx_bytes += cf->can_dlc;
+ dev->stats.rx_bytes += cf->len;
netif_rx(skb);
priv->can.state = new_state;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 1a9e9b9a4bf6..63f48b016ecd 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -306,7 +306,7 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
struct can_frame *frame, int idx)
{
struct c_can_priv *priv = netdev_priv(dev);
- u16 ctrl = IF_MCONT_TX | frame->can_dlc;
+ u16 ctrl = IF_MCONT_TX | frame->len;
bool rtr = frame->can_id & CAN_RTR_FLAG;
u32 arb = IF_ARB_MSGVAL;
int i;
@@ -339,7 +339,7 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
if (priv->type == BOSCH_D_CAN) {
u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
- for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ for (i = 0; i < frame->len; i += 4, dreg += 2) {
data = (u32)frame->data[i];
data |= (u32)frame->data[i + 1] << 8;
data |= (u32)frame->data[i + 2] << 16;
@@ -347,7 +347,7 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
priv->write_reg32(priv, dreg, data);
}
} else {
- for (i = 0; i < frame->can_dlc; i += 2) {
+ for (i = 0; i < frame->len; i += 2) {
priv->write_reg(priv,
C_CAN_IFACE(DATA1_REG, iface) + i / 2,
frame->data[i] |
@@ -397,7 +397,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
return -ENOMEM;
}
- frame->can_dlc = get_can_dlc(ctrl & 0x0F);
+ frame->len = can_cc_dlc2len(ctrl & 0x0F);
arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface));
@@ -412,7 +412,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
if (priv->type == BOSCH_D_CAN) {
- for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ for (i = 0; i < frame->len; i += 4, dreg += 2) {
data = priv->read_reg32(priv, dreg);
frame->data[i] = data;
frame->data[i + 1] = data >> 8;
@@ -420,7 +420,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
frame->data[i + 3] = data >> 24;
}
} else {
- for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
+ for (i = 0; i < frame->len; i += 2, dreg++) {
data = priv->read_reg(priv, dreg);
frame->data[i] = data;
frame->data[i + 1] = data >> 8;
@@ -429,7 +429,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
}
stats->rx_packets++;
- stats->rx_bytes += frame->can_dlc;
+ stats->rx_bytes += frame->len;
netif_receive_skb(skb);
return 0;
@@ -475,7 +475,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
* transmit as we might race against do_tx().
*/
c_can_setup_tx_object(dev, IF_TX, frame, idx);
- priv->dlc[idx] = frame->can_dlc;
+ priv->dlc[idx] = frame->len;
can_put_echo_skb(skb, dev, idx);
/* Update the active bits */
@@ -977,7 +977,7 @@ static int c_can_handle_state_change(struct net_device *dev,
}
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
@@ -1047,7 +1047,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
}
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
}
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 07e2b8df5153..8d9f332c35e0 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -390,7 +390,7 @@ static void cc770_tx(struct net_device *dev, int mo)
u32 id;
int i;
- dlc = cf->can_dlc;
+ dlc = cf->len;
id = cf->can_id;
rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR;
@@ -470,7 +470,7 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
cf->can_id = CAN_RTR_FLAG;
if (config & MSGCFG_XTD)
cf->can_id |= CAN_EFF_FLAG;
- cf->can_dlc = 0;
+ cf->len = 0;
} else {
if (config & MSGCFG_XTD) {
id = cc770_read_reg(priv, msgobj[mo].id[3]);
@@ -486,13 +486,13 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
}
cf->can_id = id;
- cf->can_dlc = get_can_dlc((config & 0xf0) >> 4);
- for (i = 0; i < cf->can_dlc; i++)
+ cf->len = can_cc_dlc2len((config & 0xf0) >> 4);
+ for (i = 0; i < cf->len; i++)
cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
}
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -572,7 +572,7 @@ static int cc770_err(struct net_device *dev, u8 status)
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
return 0;
@@ -699,7 +699,7 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
}
cf = (struct can_frame *)priv->tx_skb->data;
- stats->tx_bytes += cf->can_dlc;
+ stats->tx_bytes += cf->len;
stats->tx_packets++;
can_put_echo_skb(priv->tx_skb, dev, 0);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 81e39d7507d8..3486704c8a95 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -30,12 +30,12 @@ MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
8, 12, 16, 20, 24, 32, 48, 64};
-/* get data length from can_dlc with sanitized can_dlc */
-u8 can_dlc2len(u8 can_dlc)
+/* get data length from raw data length code (DLC) */
+u8 can_fd_dlc2len(u8 dlc)
{
- return dlc2len[can_dlc & 0x0F];
+ return dlc2len[dlc & 0x0F];
}
-EXPORT_SYMBOL_GPL(can_dlc2len);
+EXPORT_SYMBOL_GPL(can_fd_dlc2len);
static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
9, 9, 9, 9, /* 9 - 12 */
@@ -49,14 +49,14 @@ static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */
/* map the sanitized data length to an appropriate data length code */
-u8 can_len2dlc(u8 len)
+u8 can_fd_len2dlc(u8 len)
{
if (unlikely(len > 64))
return 0xF;
return len2dlc[len];
}
-EXPORT_SYMBOL_GPL(can_len2dlc);
+EXPORT_SYMBOL_GPL(can_fd_len2dlc);
#ifdef CONFIG_CAN_CALC_BITTIMING
#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
@@ -595,7 +595,7 @@ static void can_restart(struct net_device *dev)
netif_rx_ni(skb);
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
restart:
netdev_dbg(dev, "restarted\n");
@@ -737,7 +737,7 @@ struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
return NULL;
(*cf)->can_id = CAN_ERR_FLAG;
- (*cf)->can_dlc = CAN_ERR_DLC;
+ (*cf)->len = CAN_ERR_DLC;
return skb;
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 99e5f272205d..e85f20d18d67 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -236,8 +236,8 @@
#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6)
/* default to BE register access */
#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7)
-/* Setup stop mode to support wakeup */
-#define FLEXCAN_QUIRK_SETUP_STOP_MODE BIT(8)
+/* Setup stop mode with GPR to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR BIT(8)
/* Support CAN-FD mode */
#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9)
/* support memory detection and correction */
@@ -381,7 +381,7 @@ static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SETUP_STOP_MODE,
+ FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR,
};
static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
@@ -393,7 +393,7 @@ static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
- FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC,
};
@@ -746,7 +746,7 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
u32 can_id;
u32 data;
- u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | ((can_len2dlc(cfd->len)) << 16);
+ u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | ((can_fd_len2dlc(cfd->len)) << 16);
int i;
if (can_dropped_invalid_skb(dev, skb))
@@ -1000,12 +1000,12 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
cfd->can_id = (reg_id >> 18) & CAN_SFF_MASK;
if (reg_ctrl & FLEXCAN_MB_CNT_EDL) {
- cfd->len = can_dlc2len(get_canfd_dlc((reg_ctrl >> 16) & 0xf));
+ cfd->len = can_fd_dlc2len((reg_ctrl >> 16) & 0xf);
if (reg_ctrl & FLEXCAN_MB_CNT_BRS)
cfd->flags |= CANFD_BRS;
} else {
- cfd->len = get_can_dlc((reg_ctrl >> 16) & 0xf);
+ cfd->len = can_cc_dlc2len((reg_ctrl >> 16) & 0xf);
if (reg_ctrl & FLEXCAN_MB_CNT_RTR)
cfd->can_id |= CAN_RTR_FLAG;
@@ -1346,6 +1346,72 @@ static void flexcan_ram_init(struct net_device *dev)
priv->write(reg_ctrl2, &regs->ctrl2);
}
+static int flexcan_rx_offload_setup(struct net_device *dev)
+{
+ struct flexcan_priv *priv = netdev_priv(dev);
+ int err;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ priv->mb_size = sizeof(struct flexcan_mb) + CANFD_MAX_DLEN;
+ else
+ priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN;
+ priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) +
+ (sizeof(priv->regs->mb[1]) / priv->mb_size);
+
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
+ priv->tx_mb_reserved =
+ flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP);
+ else
+ priv->tx_mb_reserved =
+ flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_FIFO);
+ priv->tx_mb_idx = priv->mb_count - 1;
+ priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx);
+ priv->tx_mask = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
+
+ priv->offload.mailbox_read = flexcan_mailbox_read;
+
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST;
+ priv->offload.mb_last = priv->mb_count - 2;
+
+ priv->rx_mask = GENMASK_ULL(priv->offload.mb_last,
+ priv->offload.mb_first);
+ err = can_rx_offload_add_timestamp(dev, &priv->offload);
+ } else {
+ priv->rx_mask = FLEXCAN_IFLAG_RX_FIFO_OVERFLOW |
+ FLEXCAN_IFLAG_RX_FIFO_AVAILABLE;
+ err = can_rx_offload_add_fifo(dev, &priv->offload,
+ FLEXCAN_NAPI_WEIGHT);
+ }
+
+ return err;
+}
+
+static void flexcan_chip_interrupts_enable(const struct net_device *dev)
+{
+ const struct flexcan_priv *priv = netdev_priv(dev);
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u64 reg_imask;
+
+ disable_irq(dev->irq);
+ priv->write(priv->reg_ctrl_default, &regs->ctrl);
+ reg_imask = priv->rx_mask | priv->tx_mask;
+ priv->write(upper_32_bits(reg_imask), &regs->imask2);
+ priv->write(lower_32_bits(reg_imask), &regs->imask1);
+ enable_irq(dev->irq);
+}
+
+static void flexcan_chip_interrupts_disable(const struct net_device *dev)
+{
+ const struct flexcan_priv *priv = netdev_priv(dev);
+ struct flexcan_regs __iomem *regs = priv->regs;
+
+ priv->write(0, &regs->imask2);
+ priv->write(0, &regs->imask1);
+ priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+ &regs->ctrl);
+}
+
/* flexcan_chip_start
*
* this functions is entered with clocks enabled
@@ -1356,7 +1422,6 @@ static int flexcan_chip_start(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr;
- u64 reg_imask;
int err, i;
struct flexcan_mb __iomem *mb;
@@ -1574,14 +1639,6 @@ static int flexcan_chip_start(struct net_device *dev)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
- /* enable interrupts atomically */
- disable_irq(dev->irq);
- priv->write(priv->reg_ctrl_default, &regs->ctrl);
- reg_imask = priv->rx_mask | priv->tx_mask;
- priv->write(upper_32_bits(reg_imask), &regs->imask2);
- priv->write(lower_32_bits(reg_imask), &regs->imask1);
- enable_irq(dev->irq);
-
/* print chip status */
netdev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__,
priv->read(&regs->mcr), priv->read(&regs->ctrl));
@@ -1600,7 +1657,6 @@ static int flexcan_chip_start(struct net_device *dev)
static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
{
struct flexcan_priv *priv = netdev_priv(dev);
- struct flexcan_regs __iomem *regs = priv->regs;
int err;
/* freeze + disable module */
@@ -1611,12 +1667,6 @@ static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
if (err && !disable_on_error)
goto out_chip_unfreeze;
- /* Disable all interrupts */
- priv->write(0, &regs->imask2);
- priv->write(0, &regs->imask1);
- priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
- &regs->ctrl);
-
priv->can.state = CAN_STATE_STOPPED;
return 0;
@@ -1662,61 +1712,33 @@ static int flexcan_open(struct net_device *dev)
if (err)
goto out_close;
- err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
+ err = flexcan_rx_offload_setup(dev);
if (err)
goto out_transceiver_disable;
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
- priv->mb_size = sizeof(struct flexcan_mb) + CANFD_MAX_DLEN;
- else
- priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN;
- priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) +
- (sizeof(priv->regs->mb[1]) / priv->mb_size);
-
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
- priv->tx_mb_reserved =
- flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP);
- else
- priv->tx_mb_reserved =
- flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_FIFO);
- priv->tx_mb_idx = priv->mb_count - 1;
- priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx);
- priv->tx_mask = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
-
- priv->offload.mailbox_read = flexcan_mailbox_read;
+ err = flexcan_chip_start(dev);
+ if (err)
+ goto out_can_rx_offload_del;
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST;
- priv->offload.mb_last = priv->mb_count - 2;
+ can_rx_offload_enable(&priv->offload);
- priv->rx_mask = GENMASK_ULL(priv->offload.mb_last,
- priv->offload.mb_first);
- err = can_rx_offload_add_timestamp(dev, &priv->offload);
- } else {
- priv->rx_mask = FLEXCAN_IFLAG_RX_FIFO_OVERFLOW |
- FLEXCAN_IFLAG_RX_FIFO_AVAILABLE;
- err = can_rx_offload_add_fifo(dev, &priv->offload,
- FLEXCAN_NAPI_WEIGHT);
- }
+ err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
if (err)
- goto out_free_irq;
+ goto out_can_rx_offload_disable;
- /* start chip and queuing */
- err = flexcan_chip_start(dev);
- if (err)
- goto out_offload_del;
+ flexcan_chip_interrupts_enable(dev);
can_led_event(dev, CAN_LED_EVENT_OPEN);
- can_rx_offload_enable(&priv->offload);
netif_start_queue(dev);
return 0;
- out_offload_del:
+ out_can_rx_offload_disable:
+ can_rx_offload_disable(&priv->offload);
+ flexcan_chip_stop(dev);
+ out_can_rx_offload_del:
can_rx_offload_del(&priv->offload);
- out_free_irq:
- free_irq(dev->irq, dev);
out_transceiver_disable:
flexcan_transceiver_disable(priv);
out_close:
@@ -1732,14 +1754,15 @@ static int flexcan_close(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
netif_stop_queue(dev);
+ flexcan_chip_interrupts_disable(dev);
+ free_irq(dev->irq, dev);
can_rx_offload_disable(&priv->offload);
flexcan_chip_stop_disable_on_error(dev);
can_rx_offload_del(&priv->offload);
- free_irq(dev->irq, dev);
flexcan_transceiver_disable(priv);
-
close_candev(dev);
+
pm_runtime_put(priv->dev);
can_led_event(dev, CAN_LED_EVENT_STOP);
@@ -1757,6 +1780,8 @@ static int flexcan_set_mode(struct net_device *dev, enum can_mode mode)
if (err)
return err;
+ flexcan_chip_interrupts_enable(dev);
+
netif_wake_queue(dev);
break;
@@ -2047,7 +2072,7 @@ static int flexcan_probe(struct platform_device *pdev)
of_can_transceiver(dev);
devm_can_led_init(dev);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE) {
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
err = flexcan_setup_stop_mode(pdev);
if (err)
dev_dbg(&pdev->dev, "failed to setup stop-mode\n");
@@ -2095,6 +2120,8 @@ static int __maybe_unused flexcan_suspend(struct device *device)
if (err)
return err;
+ flexcan_chip_interrupts_disable(dev);
+
err = pinctrl_pm_select_sleep_state(device);
if (err)
return err;
@@ -2130,6 +2157,8 @@ static int __maybe_unused flexcan_resume(struct device *device)
err = flexcan_chip_start(dev);
if (err)
return err;
+
+ flexcan_chip_interrupts_enable(dev);
}
}
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 39802f107eb1..f5d94a692576 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1201,12 +1201,12 @@ static int grcan_receive(struct net_device *dev, int budget)
cf->can_id = ((slot[0] & GRCAN_MSG_BID)
>> GRCAN_MSG_BID_BIT);
}
- cf->can_dlc = get_can_dlc((slot[1] & GRCAN_MSG_DLC)
+ cf->len = can_cc_dlc2len((slot[1] & GRCAN_MSG_DLC)
>> GRCAN_MSG_DLC_BIT);
if (rtr) {
cf->can_id |= CAN_RTR_FLAG;
} else {
- for (i = 0; i < cf->can_dlc; i++) {
+ for (i = 0; i < cf->len; i++) {
j = GRCAN_MSG_DATA_SLOT_INDEX(i);
shift = GRCAN_MSG_DATA_SHIFT(i);
cf->data[i] = (u8)(slot[j] >> shift);
@@ -1215,7 +1215,7 @@ static int grcan_receive(struct net_device *dev, int budget)
/* Update statistics and read pointer */
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_receive_skb(skb);
rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
@@ -1399,7 +1399,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
eff = cf->can_id & CAN_EFF_FLAG;
rtr = cf->can_id & CAN_RTR_FLAG;
id = cf->can_id & (eff ? CAN_EFF_MASK : CAN_SFF_MASK);
- dlc = cf->can_dlc;
+ dlc = cf->len;
if (eff)
tmp = (id << GRCAN_MSG_EID_BIT) & GRCAN_MSG_EID;
else
@@ -1447,7 +1447,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
* can_put_echo_skb would be an error unless other measures are
* taken.
*/
- priv->txdlc[slotindex] = cf->can_dlc; /* Store dlc for statistics */
+ priv->txdlc[slotindex] = cf->len; /* Store dlc for statistics */
can_put_echo_skb(skb, dev, slotindex);
/* Make sure everything is written before allowing hardware to
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 74503cacf594..86b0e1406a21 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -271,9 +271,9 @@ static void ifi_canfd_read_fifo(struct net_device *ndev)
dlc = (rxdlc >> IFI_CANFD_RXFIFO_DLC_DLC_OFFSET) &
IFI_CANFD_RXFIFO_DLC_DLC_MASK;
if (rxdlc & IFI_CANFD_RXFIFO_DLC_EDL)
- cf->len = can_dlc2len(dlc);
+ cf->len = can_fd_dlc2len(dlc);
else
- cf->len = get_can_dlc(dlc);
+ cf->len = can_cc_dlc2len(dlc);
rxid = readl(priv->base + IFI_CANFD_RXFIFO_ID);
id = (rxid >> IFI_CANFD_RXFIFO_ID_ID_OFFSET);
@@ -431,7 +431,7 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
@@ -523,7 +523,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
}
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
@@ -900,7 +900,7 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb,
txid = cf->can_id & CAN_SFF_MASK;
}
- txdlc = can_len2dlc(cf->len);
+ txdlc = can_fd_len2dlc(cf->len);
if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) && can_is_canfd_skb(skb)) {
txdlc |= IFI_CANFD_TXFIFO_DLC_EDL;
if (cf->flags & CANFD_BRS)
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index f929db893957..2a6c918186c0 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -916,10 +916,10 @@ static void ican3_to_can_frame(struct ican3_dev *mod,
cf->can_id |= desc->data[0] << 3;
cf->can_id |= (desc->data[1] & 0xe0) >> 5;
- cf->can_dlc = get_can_dlc(desc->data[1] & ICAN3_CAN_DLC_MASK);
- memcpy(cf->data, &desc->data[2], cf->can_dlc);
+ cf->len = can_cc_dlc2len(desc->data[1] & ICAN3_CAN_DLC_MASK);
+ memcpy(cf->data, &desc->data[2], cf->len);
} else {
- cf->can_dlc = get_can_dlc(desc->data[0] & ICAN3_CAN_DLC_MASK);
+ cf->len = can_cc_dlc2len(desc->data[0] & ICAN3_CAN_DLC_MASK);
if (desc->data[0] & ICAN3_EFF_RTR)
cf->can_id |= CAN_RTR_FLAG;
@@ -934,7 +934,7 @@ static void ican3_to_can_frame(struct ican3_dev *mod,
cf->can_id |= desc->data[3] >> 5; /* 2-0 */
}
- memcpy(cf->data, &desc->data[6], cf->can_dlc);
+ memcpy(cf->data, &desc->data[6], cf->len);
}
}
@@ -947,7 +947,7 @@ static void can_frame_to_ican3(struct ican3_dev *mod,
/* we always use the extended format, with the ECHO flag set */
desc->command = ICAN3_CAN_TYPE_EFF;
- desc->data[0] |= cf->can_dlc;
+ desc->data[0] |= cf->len;
desc->data[1] |= ICAN3_ECHO;
/* support single transmission (no retries) mode */
@@ -970,7 +970,7 @@ static void can_frame_to_ican3(struct ican3_dev *mod,
}
/* copy the data bits into the descriptor */
- memcpy(&desc->data[6], cf->data, cf->can_dlc);
+ memcpy(&desc->data[6], cf->data, cf->len);
}
/*
@@ -1294,7 +1294,7 @@ static unsigned int ican3_get_echo_skb(struct ican3_dev *mod)
}
cf = (struct can_frame *)skb->data;
- dlc = cf->can_dlc;
+ dlc = cf->len;
/* check flag whether this packet has to be looped back */
if (skb->pkt_type != PACKET_LOOPBACK) {
@@ -1332,10 +1332,10 @@ static bool ican3_echo_skb_matches(struct ican3_dev *mod, struct sk_buff *skb)
if (cf->can_id != echo_cf->can_id)
return false;
- if (cf->can_dlc != echo_cf->can_dlc)
+ if (cf->len != echo_cf->len)
return false;
- return memcmp(cf->data, echo_cf->data, cf->can_dlc) == 0;
+ return memcmp(cf->data, echo_cf->data, cf->len) == 0;
}
/*
@@ -1421,7 +1421,7 @@ static int ican3_recv_skb(struct ican3_dev *mod)
/* update statistics, receive the skb */
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_receive_skb(skb);
err_noalloc:
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 43151dd6cb1c..969cedb9b0b6 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -742,7 +742,7 @@ static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
p->header[0] |= cf->can_id & CAN_EFF_MASK;
- p->header[1] |= can_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
+ p->header[1] |= can_fd_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
if (can_is_canfd_skb(skb)) {
@@ -1176,7 +1176,7 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
cf->can_id |= CAN_EFF_FLAG;
- cf->len = can_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
+ cf->len = can_fd_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR)
cf->can_id |= CAN_RTR_FLAG;
@@ -1301,7 +1301,7 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
cf->data[7] = bec.rxerr;
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
return 0;
@@ -1500,7 +1500,7 @@ static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
if (skb) {
cf->can_id |= CAN_ERR_BUSERROR;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
stats->rx_packets++;
netif_rx(skb);
} else {
@@ -1602,7 +1602,7 @@ static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
u8 data_len;
- data_len = can_dlc2len(p->header[1] >>
+ data_len = can_fd_dlc2len(p->header[1] >>
KVASER_PCIEFD_RPACKET_DLC_SHIFT);
pos += DIV_ROUND_UP(data_len, 4);
}
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
index 5f9f8192dd0b..e3eb69b76cf5 100644
--- a/drivers/net/can/m_can/Kconfig
+++ b/drivers/net/can/m_can/Kconfig
@@ -1,21 +1,21 @@
# SPDX-License-Identifier: GPL-2.0-only
-config CAN_M_CAN
+menuconfig CAN_M_CAN
tristate "Bosch M_CAN support"
help
Say Y here if you want support for Bosch M_CAN controller framework.
This is common support for devices that embed the Bosch M_CAN IP.
+if CAN_M_CAN
+
config CAN_M_CAN_PLATFORM
tristate "Bosch M_CAN support for io-mapped devices"
depends on HAS_IOMEM
- depends on CAN_M_CAN
help
Say Y here if you want support for IO Mapped Bosch M_CAN controller.
This support is for devices that have the Bosch M_CAN controller
IP embedded into the device and the IP is IO Mapped to the processor.
config CAN_M_CAN_TCAN4X5X
- depends on CAN_M_CAN
depends on SPI
select REGMAP_SPI
tristate "TCAN4X5X M_CAN device"
@@ -23,3 +23,5 @@ config CAN_M_CAN_TCAN4X5X
Say Y here if you want support for Texas Instruments TCAN4x5x
M_CAN controller. This device is a peripheral device that uses the
SPI bus for communication.
+
+endif
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 61a93b192037..05c978d1c53d 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -457,9 +457,9 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
}
if (dlc & RX_BUF_FDF)
- cf->len = can_dlc2len((dlc >> 16) & 0x0F);
+ cf->len = can_fd_dlc2len((dlc >> 16) & 0x0F);
else
- cf->len = get_can_dlc((dlc >> 16) & 0x0F);
+ cf->len = can_cc_dlc2len((dlc >> 16) & 0x0F);
id = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID);
if (id & RX_BUF_XTD)
@@ -596,7 +596,7 @@ static int m_can_handle_lec_err(struct net_device *dev,
}
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
@@ -723,7 +723,7 @@ static int m_can_handle_state_change(struct net_device *dev,
}
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
@@ -1491,7 +1491,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
/* message ram configuration */
m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, id);
m_can_fifo_write(cdev, 0, M_CAN_FIFO_DLC,
- can_len2dlc(cf->len) << 16);
+ can_fd_len2dlc(cf->len) << 16);
for (i = 0; i < cf->len; i += 4)
m_can_fifo_write(cdev, 0,
@@ -1559,7 +1559,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DLC,
((putidx << TX_BUF_MM_SHIFT) &
TX_BUF_MM_MASK) |
- (can_len2dlc(cf->len) << 16) |
+ (can_fd_len2dlc(cf->len) << 16) |
fdflags | TX_BUF_EFC);
for (i = 0; i < cf->len; i += 4)
@@ -1869,6 +1869,14 @@ pm_runtime_fail:
}
EXPORT_SYMBOL_GPL(m_can_class_register);
+void m_can_class_unregister(struct m_can_classdev *m_can_dev)
+{
+ unregister_candev(m_can_dev->net);
+
+ m_can_clk_stop(m_can_dev);
+}
+EXPORT_SYMBOL_GPL(m_can_class_unregister);
+
int m_can_class_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
@@ -1915,14 +1923,6 @@ int m_can_class_resume(struct device *dev)
}
EXPORT_SYMBOL_GPL(m_can_class_resume);
-void m_can_class_unregister(struct m_can_classdev *m_can_dev)
-{
- unregister_candev(m_can_dev->net);
-
- m_can_clk_stop(m_can_dev);
-}
-EXPORT_SYMBOL_GPL(m_can_class_unregister);
-
MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index b2699a7c9997..f8a692596e59 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -89,7 +89,6 @@ struct m_can_classdev {
void *device_data;
int version;
- int freq;
u32 irqstatus;
int pm_clock_support;
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 161cb9be018c..c45a889a1afd 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -141,8 +141,6 @@ static int m_can_plat_remove(struct platform_device *pdev)
m_can_class_free_dev(mcan_class->net);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
index 7347ab39c5b6..a0fecc3fb829 100644
--- a/drivers/net/can/m_can/tcan4x5x.c
+++ b/drivers/net/can/m_can/tcan4x5x.c
@@ -123,10 +123,6 @@ struct tcan4x5x_priv {
struct gpio_desc *device_wake_gpio;
struct gpio_desc *device_state_gpio;
struct regulator *power;
-
- /* Register based ip */
- int mram_start;
- int reg_offset;
};
static struct can_bittiming_const tcan4x5x_bittiming_const = {
@@ -260,7 +256,7 @@ static u32 tcan4x5x_read_reg(struct m_can_classdev *cdev, int reg)
struct tcan4x5x_priv *priv = cdev->device_data;
u32 val;
- regmap_read(priv->regmap, priv->reg_offset + reg, &val);
+ regmap_read(priv->regmap, TCAN4X5X_MCAN_OFFSET + reg, &val);
return val;
}
@@ -270,7 +266,7 @@ static u32 tcan4x5x_read_fifo(struct m_can_classdev *cdev, int addr_offset)
struct tcan4x5x_priv *priv = cdev->device_data;
u32 val;
- regmap_read(priv->regmap, priv->mram_start + addr_offset, &val);
+ regmap_read(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, &val);
return val;
}
@@ -279,7 +275,7 @@ static int tcan4x5x_write_reg(struct m_can_classdev *cdev, int reg, int val)
{
struct tcan4x5x_priv *priv = cdev->device_data;
- return regmap_write(priv->regmap, priv->reg_offset + reg, val);
+ return regmap_write(priv->regmap, TCAN4X5X_MCAN_OFFSET + reg, val);
}
static int tcan4x5x_write_fifo(struct m_can_classdev *cdev,
@@ -287,7 +283,7 @@ static int tcan4x5x_write_fifo(struct m_can_classdev *cdev,
{
struct tcan4x5x_priv *priv = cdev->device_data;
- return regmap_write(priv->regmap, priv->mram_start + addr_offset, val);
+ return regmap_write(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, val);
}
static int tcan4x5x_power_enable(struct regulator *reg, int enable)
@@ -328,12 +324,8 @@ static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev)
if (ret)
return ret;
- ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS,
- TCAN4X5X_CLEAR_ALL_INT);
- if (ret)
- return ret;
-
- return ret;
+ return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS,
+ TCAN4X5X_CLEAR_ALL_INT);
}
static int tcan4x5x_init(struct m_can_classdev *cdev)
@@ -379,7 +371,7 @@ static int tcan4x5x_disable_state(struct m_can_classdev *cdev)
TCAN4X5X_DISABLE_INH_MSK, 0x01);
}
-static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
+static int tcan4x5x_get_gpios(struct m_can_classdev *cdev)
{
struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
int ret;
@@ -469,8 +461,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
goto out_m_can_class_free_dev;
}
- priv->reg_offset = TCAN4X5X_MCAN_OFFSET;
- priv->mram_start = TCAN4X5X_MRAM_START;
priv->spi = spi;
priv->mcan_dev = mcan_class;
@@ -502,7 +492,7 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
if (ret)
goto out_m_can_class_free_dev;
- ret = tcan4x5x_parse_config(mcan_class);
+ ret = tcan4x5x_get_gpios(mcan_class);
if (ret)
goto out_power;
@@ -521,8 +511,6 @@ out_power:
tcan4x5x_power_enable(priv->power, 0);
out_m_can_class_free_dev:
m_can_class_free_dev(mcan_class->net);
- dev_err(&spi->dev, "Probe failed, err=%d\n", ret);
-
return ret;
}
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 640ba1b356ec..5ed00a1558e1 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -250,16 +250,16 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
void __iomem *data = &regs->tx.dsr1_0;
u16 *payload = (u16 *)frame->data;
- for (i = 0; i < frame->can_dlc / 2; i++) {
+ for (i = 0; i < frame->len / 2; i++) {
out_be16(data, *payload++);
data += 2 + _MSCAN_RESERVED_DSR_SIZE;
}
/* write remaining byte if necessary */
- if (frame->can_dlc & 1)
- out_8(data, frame->data[frame->can_dlc - 1]);
+ if (frame->len & 1)
+ out_8(data, frame->data[frame->len - 1]);
}
- out_8(&regs->tx.dlr, frame->can_dlc);
+ out_8(&regs->tx.dlr, frame->len);
out_8(&regs->tx.tbpr, priv->cur_pri);
/* Start transmission. */
@@ -312,19 +312,19 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
if (can_id & 1)
frame->can_id |= CAN_RTR_FLAG;
- frame->can_dlc = get_can_dlc(in_8(&regs->rx.dlr) & 0xf);
+ frame->len = can_cc_dlc2len(in_8(&regs->rx.dlr) & 0xf);
if (!(frame->can_id & CAN_RTR_FLAG)) {
void __iomem *data = &regs->rx.dsr1_0;
u16 *payload = (u16 *)frame->data;
- for (i = 0; i < frame->can_dlc / 2; i++) {
+ for (i = 0; i < frame->len / 2; i++) {
*payload++ = in_be16(data);
data += 2 + _MSCAN_RESERVED_DSR_SIZE;
}
/* read remaining byte if necessary */
- if (frame->can_dlc & 1)
- frame->data[frame->can_dlc - 1] = in_8(data);
+ if (frame->len & 1)
+ frame->data[frame->len - 1] = in_8(data);
}
out_8(&regs->canrflg, MSCAN_RXF);
@@ -372,7 +372,7 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
}
}
priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
- frame->can_dlc = CAN_ERR_DLC;
+ frame->len = CAN_ERR_DLC;
out_8(&regs->canrflg, MSCAN_ERR_IF);
}
@@ -407,7 +407,7 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
mscan_get_err_frame(dev, frame, canrflg);
stats->rx_packets++;
- stats->rx_bytes += frame->can_dlc;
+ stats->rx_bytes += frame->len;
work_done++;
netif_receive_skb(skb);
}
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 5c180d2f3c3c..4f9e7ec192aa 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -563,7 +563,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
netif_receive_skb(skb);
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
}
static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
@@ -683,10 +683,10 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
if (id2 & PCH_ID2_DIR)
cf->can_id |= CAN_RTR_FLAG;
- cf->can_dlc = get_can_dlc((ioread32(&priv->regs->
+ cf->len = can_cc_dlc2len((ioread32(&priv->regs->
ifregs[0].mcont)) & 0xF);
- for (i = 0; i < cf->can_dlc; i += 2) {
+ for (i = 0; i < cf->len; i += 2) {
data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]);
cf->data[i] = data_reg;
cf->data[i + 1] = data_reg >> 8;
@@ -696,7 +696,7 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
rcv_pkts++;
stats->rx_packets++;
quota--;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
pch_fifo_thresh(priv, obj_num);
obj_num++;
@@ -715,7 +715,7 @@ static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
&priv->regs->ifregs[1].cmask);
pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
- dlc = get_can_dlc(ioread32(&priv->regs->ifregs[1].mcont) &
+ dlc = can_cc_dlc2len(ioread32(&priv->regs->ifregs[1].mcont) &
PCH_IF_MCONT_DLC);
stats->tx_bytes += dlc;
stats->tx_packets++;
@@ -919,7 +919,7 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
iowrite32(id2, &priv->regs->ifregs[1].id2);
/* Copy data to register */
- for (i = 0; i < cf->can_dlc; i += 2) {
+ for (i = 0; i < cf->len; i += 2) {
iowrite16(cf->data[i] | (cf->data[i + 1] << 8),
&priv->regs->ifregs[1].data[i / 2]);
}
@@ -927,7 +927,7 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1);
/* Set the size of the data. Update if2_mcont */
- iowrite32(cf->can_dlc | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
+ iowrite32(cf->len | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
PCH_IF_MCONT_TXIE, &priv->regs->ifregs[1].mcont);
pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, tx_obj_no);
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 40c33b8a5fda..c5334b0c3038 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -257,9 +257,9 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
u8 cf_len;
if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN)
- cf_len = can_dlc2len(get_canfd_dlc(pucan_msg_get_dlc(msg)));
+ cf_len = can_fd_dlc2len(pucan_msg_get_dlc(msg));
else
- cf_len = get_can_dlc(pucan_msg_get_dlc(msg));
+ cf_len = can_cc_dlc2len(pucan_msg_get_dlc(msg));
/* if this frame is an echo, */
if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) {
@@ -410,7 +410,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
}
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
return 0;
@@ -438,7 +438,7 @@ static int pucan_handle_cache_critical(struct peak_canfd_priv *priv)
cf->data[6] = priv->bec.txerr;
cf->data[7] = priv->bec.rxerr;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
stats->rx_packets++;
netif_rx(skb);
@@ -652,7 +652,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
unsigned long flags;
bool should_stop_tx_queue;
int room_left;
- u8 can_dlc;
+ u8 len;
if (can_dropped_invalid_skb(ndev, skb))
return NETDEV_TX_OK;
@@ -682,7 +682,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
if (can_is_canfd_skb(skb)) {
/* CAN FD frame format */
- can_dlc = can_len2dlc(cf->len);
+ len = can_fd_len2dlc(cf->len);
msg_flags |= PUCAN_MSG_EXT_DATA_LEN;
@@ -693,7 +693,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
msg_flags |= PUCAN_MSG_ERROR_STATE_IND;
} else {
/* CAN 2.0 frame format */
- can_dlc = cf->len;
+ len = cf->len;
if (cf->can_id & CAN_RTR_FLAG)
msg_flags |= PUCAN_MSG_RTR;
@@ -707,7 +707,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
msg_flags |= PUCAN_MSG_SELF_RECEIVE;
msg->flags = cpu_to_le16(msg_flags);
- msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, can_dlc);
+ msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, len);
memcpy(msg->d, cf->data, cf->len);
/* struct msg client field is used as an index in the echo skbs ring */
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 48575900adb7..c803327f8f79 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -364,7 +364,7 @@ static void rcar_can_error(struct net_device *ndev)
if (skb) {
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
}
@@ -607,16 +607,16 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
data |= RCAR_CAN_RTR;
} else {
- for (i = 0; i < cf->can_dlc; i++)
+ for (i = 0; i < cf->len; i++)
writeb(cf->data[i],
&priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].data[i]);
}
writel(data, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].id);
- writeb(cf->can_dlc, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
+ writeb(cf->len, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
- priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->can_dlc;
+ priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->len;
can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH);
priv->tx_head++;
/* Start Tx: write 0xff to the TFPCR register to increment
@@ -659,18 +659,18 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK;
dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc);
- cf->can_dlc = get_can_dlc(dlc);
+ cf->len = can_cc_dlc2len(dlc);
if (data & RCAR_CAN_RTR) {
cf->can_id |= CAN_RTR_FLAG;
} else {
- for (dlc = 0; dlc < cf->can_dlc; dlc++)
+ for (dlc = 0; dlc < cf->len; dlc++)
cf->data[dlc] =
readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]);
}
can_led_event(priv->ndev, CAN_LED_EVENT_RX);
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
stats->rx_packets++;
netif_receive_skb(skb);
}
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index de59dd6aad29..2778ed5c61d1 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1025,7 +1025,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
rcar_canfd_write(priv->base, RCANFD_CERFL(ch),
RCANFD_CERFL_ERR(~cerfl));
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -1134,7 +1134,7 @@ static void rcar_canfd_state_change(struct net_device *ndev,
can_change_state(ndev, cf, tx_state, rx_state);
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
}
@@ -1357,7 +1357,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
if (cf->can_id & CAN_RTR_FLAG)
id |= RCANFD_CFID_CFRTR;
- dlc = RCANFD_CFPTR_CFDLC(can_len2dlc(cf->len));
+ dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len));
if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
rcar_canfd_write(priv->base,
@@ -1446,9 +1446,9 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
if (sts & RCANFD_RFFDSTS_RFFDF)
- cf->len = can_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
+ cf->len = can_fd_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
else
- cf->len = get_can_dlc(RCANFD_RFPTR_RFDLC(dlc));
+ cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
if (sts & RCANFD_RFFDSTS_RFESI) {
cf->flags |= CANFD_ESI;
@@ -1464,7 +1464,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(ridx, 0));
}
} else {
- cf->len = get_can_dlc(RCANFD_RFPTR_RFDLC(dlc));
+ cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
if (id & RCANFD_RFID_RFRTR)
cf->can_id |= CAN_RTR_FLAG;
else
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index 6e95193b215b..450c5cfcb3fc 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -55,7 +55,7 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
work_done++;
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_receive_skb(skb);
}
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 25a4d7d0b349..b6a7003c51d2 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -284,7 +284,6 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
struct sja1000_priv *priv = netdev_priv(dev);
struct can_frame *cf = (struct can_frame *)skb->data;
uint8_t fi;
- uint8_t dlc;
canid_t id;
uint8_t dreg;
u8 cmd_reg_val = 0x00;
@@ -295,7 +294,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
netif_stop_queue(dev);
- fi = dlc = cf->can_dlc;
+ fi = can_get_cc_dlc(cf, priv->can.ctrlmode);
id = cf->can_id;
if (id & CAN_RTR_FLAG)
@@ -316,7 +315,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
priv->write_reg(priv, SJA1000_ID2, (id & 0x00000007) << 5);
}
- for (i = 0; i < dlc; i++)
+ for (i = 0; i < cf->len; i++)
priv->write_reg(priv, dreg++, cf->data[i]);
can_put_echo_skb(skb, dev, 0);
@@ -367,11 +366,11 @@ static void sja1000_rx(struct net_device *dev)
| (priv->read_reg(priv, SJA1000_ID2) >> 5);
}
- cf->can_dlc = get_can_dlc(fi & 0x0F);
+ can_frame_set_cc_len(cf, fi & 0x0F, priv->can.ctrlmode);
if (fi & SJA1000_FI_RTR) {
id |= CAN_RTR_FLAG;
} else {
- for (i = 0; i < cf->can_dlc; i++)
+ for (i = 0; i < cf->len; i++)
cf->data[i] = priv->read_reg(priv, dreg++);
}
@@ -381,7 +380,7 @@ static void sja1000_rx(struct net_device *dev)
sja1000_write_cmdreg(priv, CMD_RRB);
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
can_led_event(dev, CAN_LED_EVENT_RX);
@@ -489,7 +488,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
}
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
return 0;
@@ -637,7 +636,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
CAN_CTRLMODE_3_SAMPLES |
CAN_CTRLMODE_ONE_SHOT |
CAN_CTRLMODE_BERR_REPORTING |
- CAN_CTRLMODE_PRESUME_ACK;
+ CAN_CTRLMODE_PRESUME_ACK |
+ CAN_CTRLMODE_CC_LEN8_DLC;
spin_lock_init(&priv->cmdreg_lock);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index b4a39f0449ba..a1bd1be09548 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -106,8 +106,8 @@ static struct net_device **slcan_devs;
/*
* A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
- * frame format) a data length code (can_dlc) which can be from 0 to 8
- * and up to <can_dlc> data bytes as payload.
+ * frame format) a data length code (len) which can be from 0 to 8
+ * and up to <len> data bytes as payload.
* Additionally a CAN frame may become a remote transmission frame if the
* RTR-bit is set. This causes another ECU to send a CAN frame with the
* given can_id.
@@ -128,10 +128,10 @@ static struct net_device **slcan_devs;
*
* Examples:
*
- * t1230 : can_id 0x123, can_dlc 0, no data
- * t4563112233 : can_id 0x456, can_dlc 3, data 0x11 0x22 0x33
- * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, can_dlc 2, data 0xAA 0x55
- * r1230 : can_id 0x123, can_dlc 0, no data, remote transmission request
+ * t1230 : can_id 0x123, len 0, no data
+ * t4563112233 : can_id 0x456, len 3, data 0x11 0x22 0x33
+ * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, len 2, data 0xAA 0x55
+ * r1230 : can_id 0x123, len 0, no data, remote transmission request
*
*/
@@ -156,7 +156,7 @@ static void slc_bump(struct slcan *sl)
fallthrough;
case 't':
/* store dlc ASCII value and terminate SFF CAN ID string */
- cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
+ cf.len = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0;
/* point to payload data behind the dlc */
cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1;
@@ -167,7 +167,7 @@ static void slc_bump(struct slcan *sl)
case 'T':
cf.can_id |= CAN_EFF_FLAG;
/* store dlc ASCII value and terminate EFF CAN ID string */
- cf.can_dlc = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
+ cf.len = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0;
/* point to payload data behind the dlc */
cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1;
@@ -181,15 +181,15 @@ static void slc_bump(struct slcan *sl)
cf.can_id |= tmpid;
- /* get can_dlc from sanitized ASCII value */
- if (cf.can_dlc >= '0' && cf.can_dlc < '9')
- cf.can_dlc -= '0';
+ /* get len from sanitized ASCII value */
+ if (cf.len >= '0' && cf.len < '9')
+ cf.len -= '0';
else
return;
/* RTR frames may have a dlc > 0 but they never have any data bytes */
if (!(cf.can_id & CAN_RTR_FLAG)) {
- for (i = 0; i < cf.can_dlc; i++) {
+ for (i = 0; i < cf.len; i++) {
tmp = hex_to_bin(*cmd++);
if (tmp < 0)
return;
@@ -218,7 +218,7 @@ static void slc_bump(struct slcan *sl)
skb_put_data(skb, &cf, sizeof(struct can_frame));
sl->dev->stats.rx_packets++;
- sl->dev->stats.rx_bytes += cf.can_dlc;
+ sl->dev->stats.rx_bytes += cf.len;
netif_rx_ni(skb);
}
@@ -282,11 +282,11 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN;
- *pos++ = cf->can_dlc + '0';
+ *pos++ = cf->len + '0';
/* RTR frames may have a dlc > 0 but they never have any data bytes */
if (!(cf->can_id & CAN_RTR_FLAG)) {
- for (i = 0; i < cf->can_dlc; i++)
+ for (i = 0; i < cf->len; i++)
pos = hex_byte_pack_upper(pos, cf->data[i]);
}
@@ -304,7 +304,7 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
sl->xleft = (pos - sl->xbuff) - actual;
sl->xhead = sl->xbuff + actual;
- sl->dev->stats.tx_bytes += cf->can_dlc;
+ sl->dev->stats.tx_bytes += cf->len;
}
/* Write out any remaining transmit buffer. Scheduled when tty is writable */
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index ccd649a8e37b..7e1536877993 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -624,7 +624,7 @@ int softing_startstop(struct net_device *dev, int up)
*/
memset(&msg, 0, sizeof(msg));
msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
- msg.can_dlc = CAN_ERR_DLC;
+ msg.len = CAN_ERR_DLC;
for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
if (!(bus_bitmask_start & (1 << j)))
continue;
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 9d2faaa39ce4..03a68bb486fd 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -84,7 +84,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
if (priv->index)
*ptr |= CMD_BUS2;
++ptr;
- *ptr++ = cf->can_dlc;
+ *ptr++ = cf->len;
*ptr++ = (cf->can_id >> 0);
*ptr++ = (cf->can_id >> 8);
if (cf->can_id & CAN_EFF_FLAG) {
@@ -95,7 +95,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
ptr += 1;
}
if (!(cf->can_id & CAN_RTR_FLAG))
- memcpy(ptr, &cf->data[0], cf->can_dlc);
+ memcpy(ptr, &cf->data[0], cf->len);
memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr],
buf, DPRAM_TX_SIZE);
if (++fifo_wr >= DPRAM_TX_CNT)
@@ -167,7 +167,7 @@ static int softing_handle_1(struct softing *card)
iowrite8(0, &card->dpram[DPRAM_RX_LOST]);
/* prepare msg */
msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
- msg.can_dlc = CAN_ERR_DLC;
+ msg.len = CAN_ERR_DLC;
msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
/*
* service to all buses, we don't know which it was applicable
@@ -218,7 +218,7 @@ static int softing_handle_1(struct softing *card)
state = *ptr++;
msg.can_id = CAN_ERR_FLAG;
- msg.can_dlc = CAN_ERR_DLC;
+ msg.len = CAN_ERR_DLC;
if (state & SF_MASK_BUSOFF) {
can_state = CAN_STATE_BUS_OFF;
@@ -261,7 +261,7 @@ static int softing_handle_1(struct softing *card)
} else {
if (cmd & CMD_RTR)
msg.can_id |= CAN_RTR_FLAG;
- msg.can_dlc = get_can_dlc(*ptr++);
+ msg.len = can_cc_dlc2len(*ptr++);
if (cmd & CMD_XTD) {
msg.can_id |= CAN_EFF_FLAG;
msg.can_id |= le32_to_cpup((void *)ptr);
@@ -294,7 +294,7 @@ static int softing_handle_1(struct softing *card)
--card->tx.pending;
++netdev->stats.tx_packets;
if (!(msg.can_id & CAN_RTR_FLAG))
- netdev->stats.tx_bytes += msg.can_dlc;
+ netdev->stats.tx_bytes += msg.len;
} else {
int ret;
@@ -302,7 +302,7 @@ static int softing_handle_1(struct softing *card)
if (ret == NET_RX_SUCCESS) {
++netdev->stats.rx_packets;
if (!(msg.can_id & CAN_RTR_FLAG))
- netdev->stats.rx_bytes += msg.can_dlc;
+ netdev->stats.rx_bytes += msg.len;
} else {
++netdev->stats.rx_dropped;
}
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 73d48c3b8ded..f9455de94786 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -277,13 +277,13 @@ static void hi3110_hw_tx(struct spi_device *spi, struct can_frame *frame)
((frame->can_id & CAN_EFF_MASK) << 1) |
((frame->can_id & CAN_RTR_FLAG) ? 1 : 0);
- buf[HI3110_FIFO_EXT_DLC_OFF] = frame->can_dlc;
+ buf[HI3110_FIFO_EXT_DLC_OFF] = frame->len;
memcpy(buf + HI3110_FIFO_EXT_DATA_OFF,
- frame->data, frame->can_dlc);
+ frame->data, frame->len);
hi3110_hw_tx_frame(spi, buf, HI3110_TX_EXT_BUF_LEN -
- (HI3110_CAN_MAX_DATA_LEN - frame->can_dlc));
+ (HI3110_CAN_MAX_DATA_LEN - frame->len));
} else {
/* Standard frame */
buf[HI3110_FIFO_ID_OFF] = (frame->can_id & CAN_SFF_MASK) >> 3;
@@ -291,13 +291,13 @@ static void hi3110_hw_tx(struct spi_device *spi, struct can_frame *frame)
((frame->can_id & CAN_SFF_MASK) << 5) |
((frame->can_id & CAN_RTR_FLAG) ? (1 << 4) : 0);
- buf[HI3110_FIFO_STD_DLC_OFF] = frame->can_dlc;
+ buf[HI3110_FIFO_STD_DLC_OFF] = frame->len;
memcpy(buf + HI3110_FIFO_STD_DATA_OFF,
- frame->data, frame->can_dlc);
+ frame->data, frame->len);
hi3110_hw_tx_frame(spi, buf, HI3110_TX_STD_BUF_LEN -
- (HI3110_CAN_MAX_DATA_LEN - frame->can_dlc));
+ (HI3110_CAN_MAX_DATA_LEN - frame->len));
}
}
@@ -341,16 +341,16 @@ static void hi3110_hw_rx(struct spi_device *spi)
}
/* Data length */
- frame->can_dlc = get_can_dlc(buf[HI3110_FIFO_WOTIME_DLC_OFF] & 0x0F);
+ frame->len = can_cc_dlc2len(buf[HI3110_FIFO_WOTIME_DLC_OFF] & 0x0F);
if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR)
frame->can_id |= CAN_RTR_FLAG;
else
memcpy(frame->data, buf + HI3110_FIFO_WOTIME_DAT_OFF,
- frame->can_dlc);
+ frame->len);
priv->net->stats.rx_packets++;
- priv->net->stats.rx_bytes += frame->can_dlc;
+ priv->net->stats.rx_bytes += frame->len;
can_led_event(priv->net, CAN_LED_EVENT_RX);
@@ -585,7 +585,7 @@ static void hi3110_tx_work_handler(struct work_struct *ws)
} else {
frame = (struct can_frame *)priv->tx_skb->data;
hi3110_hw_tx(spi, frame);
- priv->tx_len = 1 + frame->can_dlc;
+ priv->tx_len = 1 + frame->len;
can_put_echo_skb(priv->tx_skb, net, 0);
priv->tx_skb = NULL;
}
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 22d814ae4edc..25859d16d06f 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -644,9 +644,9 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
((eid >> SIDL_EID_SHIFT) & SIDL_EID_MASK);
buf[TXBEID8_OFF] = GET_BYTE(eid, 1);
buf[TXBEID0_OFF] = GET_BYTE(eid, 0);
- buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
- memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
- mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
+ buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->len;
+ memcpy(buf + TXBDAT_OFF, frame->data, frame->len);
+ mcp251x_hw_tx_frame(spi, buf, frame->len, tx_buf_idx);
/* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
@@ -664,7 +664,7 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
for (i = 1; i < RXBDAT_OFF; i++)
buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
- len = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
+ len = can_cc_dlc2len(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
for (; i < (RXBDAT_OFF + len); i++)
buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
} else {
@@ -720,11 +720,11 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
frame->can_id |= CAN_RTR_FLAG;
}
/* Data length */
- frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
- memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc);
+ frame->len = can_cc_dlc2len(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
+ memcpy(frame->data, buf + RXBDAT_OFF, frame->len);
priv->net->stats.rx_packets++;
- priv->net->stats.rx_bytes += frame->can_dlc;
+ priv->net->stats.rx_bytes += frame->len;
can_led_event(priv->net, CAN_LED_EVENT_RX);
@@ -998,10 +998,10 @@ static void mcp251x_tx_work_handler(struct work_struct *ws)
} else {
frame = (struct can_frame *)priv->tx_skb->data;
- if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
- frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
+ if (frame->len > CAN_FRAME_MAX_DATA_LEN)
+ frame->len = CAN_FRAME_MAX_DATA_LEN;
mcp251x_hw_tx(spi, frame, 0);
- priv->tx_len = 1 + frame->can_dlc;
+ priv->tx_len = 1 + frame->len;
can_put_echo_skb(priv->tx_skb, net, 0);
priv->tx_skb = NULL;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 8a39be076e14..20cbd5c446f5 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -326,17 +326,36 @@ mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
{
+ struct mcp251xfd_tef_ring *tef_ring;
struct mcp251xfd_tx_ring *tx_ring;
struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
struct mcp251xfd_tx_obj *tx_obj;
u32 val;
u16 addr;
u8 len;
- int i;
+ int i, j;
/* TEF */
- priv->tef.head = 0;
- priv->tef.tail = 0;
+ tef_ring = priv->tef;
+ tef_ring->head = 0;
+ tef_ring->tail = 0;
+
+ /* FIFO increment TEF tail pointer */
+ addr = MCP251XFD_REG_TEFCON;
+ val = MCP251XFD_REG_TEFCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
+ addr, val, val);
+
+ for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
+ struct spi_transfer *xfer;
+
+ xfer = &tef_ring->uinc_xfer[j];
+ xfer->tx_buf = &tef_ring->uinc_buf;
+ xfer->len = len;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ }
/* TX */
tx_ring = priv->tx;
@@ -370,6 +389,23 @@ static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
prev_rx_ring->obj_num;
prev_rx_ring = rx_ring;
+
+ /* FIFO increment RX tail pointer */
+ addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
+ val = MCP251XFD_REG_FIFOCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
+ addr, val, val);
+
+ for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
+ struct spi_transfer *xfer;
+
+ xfer = &rx_ring->uinc_xfer[j];
+ xfer->tx_buf = &rx_ring->uinc_buf;
+ xfer->len = len;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ }
}
}
@@ -416,7 +452,8 @@ static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
int rx_obj_num;
rx_obj_num = ram_free / rx_obj_size;
- rx_obj_num = min(1 << (fls(rx_obj_num) - 1), 32);
+ rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
+ MCP251XFD_RX_OBJ_NUM_MAX);
rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
GFP_KERNEL);
@@ -644,10 +681,7 @@ static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
return 0;
}
- if (err)
- return err;
-
- return -ETIMEDOUT;
+ return err;
}
static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
@@ -1204,7 +1238,7 @@ mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
"full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
"not empty" : "empty",
- seq, priv->tef.tail, priv->tef.head, tx_ring->head);
+ seq, priv->tef->tail, priv->tef->head, tx_ring->head);
/* The Sequence Number in the TEF doesn't match our tef_tail. */
return -EAGAIN;
@@ -1214,10 +1248,8 @@ static int
mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
const struct mcp251xfd_hw_tef_obj *hw_tef_obj)
{
- struct mcp251xfd_tx_ring *tx_ring = priv->tx;
struct net_device_stats *stats = &priv->ndev->stats;
u32 seq, seq_masked, tef_tail_masked;
- int err;
seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
hw_tef_obj->flags);
@@ -1228,7 +1260,7 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
*/
seq_masked = seq &
field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
- tef_tail_masked = priv->tef.tail &
+ tef_tail_masked = priv->tef->tail &
field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
if (seq_masked != tef_tail_masked)
return mcp251xfd_handle_tefif_recover(priv, seq);
@@ -1238,18 +1270,9 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
mcp251xfd_get_tef_tail(priv),
hw_tef_obj->ts);
stats->tx_packets++;
+ priv->tef->tail++;
- /* finally increment the TEF pointer */
- err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_TEFCON,
- GENMASK(15, 8),
- MCP251XFD_REG_TEFCON_UINC);
- if (err)
- return err;
-
- priv->tef.tail++;
- tx_ring->tail++;
-
- return mcp251xfd_check_tef_tail(priv);
+ return 0;
}
static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
@@ -1266,12 +1289,12 @@ static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
/* chip_tx_tail, is the next TX-Object send by the HW.
* The new TEF head must be >= the old head, ...
*/
- new_head = round_down(priv->tef.head, tx_ring->obj_num) + chip_tx_tail;
- if (new_head <= priv->tef.head)
+ new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
+ if (new_head <= priv->tef->head)
new_head += tx_ring->obj_num;
/* ... but it cannot exceed the TX head. */
- priv->tef.head = min(new_head, tx_ring->head);
+ priv->tef->head = min(new_head, tx_ring->head);
return mcp251xfd_check_tef_tail(priv);
}
@@ -1336,6 +1359,40 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
}
out_netif_wake_queue:
+ len = i; /* number of handled goods TEFs */
+ if (len) {
+ struct mcp251xfd_tef_ring *ring = priv->tef;
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ struct spi_transfer *last_xfer;
+
+ tx_ring->tail += len;
+
+ /* Increment the TEF FIFO tail pointer 'len' times in
+ * a single SPI message.
+ */
+
+ /* Note:
+ *
+ * "cs_change == 1" on the last transfer results in an
+ * active chip select after the complete SPI
+ * message. This causes the controller to interpret
+ * the next register access as data. Temporary set
+ * "cs_change" of the last transfer to "0" to properly
+ * deactivate the chip select at the end of the
+ * message.
+ */
+ last_xfer = &ring->uinc_xfer[len - 1];
+ last_xfer->cs_change = 0;
+ err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len);
+ last_xfer->cs_change = 1;
+ if (err)
+ return err;
+
+ err = mcp251xfd_check_tef_tail(priv);
+ if (err)
+ return err;
+ }
+
mcp251xfd_ecc_tefif_successful(priv);
if (mcp251xfd_get_tx_free(priv->tx)) {
@@ -1405,12 +1462,12 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
cfd->flags |= CANFD_BRS;
dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC, hw_rx_obj->flags);
- cfd->len = can_dlc2len(get_canfd_dlc(dlc));
+ cfd->len = can_fd_dlc2len(dlc);
} else {
if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
cfd->can_id |= CAN_RTR_FLAG;
- cfd->len = get_can_dlc(FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC,
+ cfd->len = can_cc_dlc2len(FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC,
hw_rx_obj->flags));
}
@@ -1442,13 +1499,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
if (err)
stats->rx_fifo_errors++;
- ring->tail++;
-
- /* finally increment the RX pointer */
- return regmap_update_bits(priv->map_reg,
- MCP251XFD_REG_FIFOCON(ring->fifo_nr),
- GENMASK(15, 8),
- MCP251XFD_REG_FIFOCON_UINC);
+ return 0;
}
static inline int
@@ -1480,6 +1531,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
return err;
while ((len = mcp251xfd_get_rx_linear_len(ring))) {
+ struct spi_transfer *last_xfer;
+
rx_tail = mcp251xfd_get_rx_tail(ring);
err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
@@ -1494,6 +1547,28 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
if (err)
return err;
}
+
+ /* Increment the RX FIFO tail pointer 'len' times in a
+ * single SPI message.
+ */
+ ring->tail += len;
+
+ /* Note:
+ *
+ * "cs_change == 1" on the last transfer results in an
+ * active chip select after the complete SPI
+ * message. This causes the controller to interpret
+ * the next register access as data. Temporary set
+ * "cs_change" of the last transfer to "0" to properly
+ * deactivate the chip select at the end of the
+ * message.
+ */
+ last_xfer = &ring->uinc_xfer[len - 1];
+ last_xfer->cs_change = 0;
+ err = spi_sync_transfer(priv->spi, ring->uinc_xfer, len);
+ last_xfer->cs_change = 1;
+ if (err)
+ return err;
}
return 0;
@@ -2244,7 +2319,7 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
* harm, only the lower 7 bits will be transferred into the
* TEF object.
*/
- dlc = can_len2dlc(cfd->len);
+ dlc = can_fd_len2dlc(cfd->len);
flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq) |
FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC, dlc);
@@ -2273,7 +2348,7 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
/* Clear data at end of CAN frame */
offset = round_down(cfd->len, sizeof(u32));
- len = round_up(can_dlc2len(dlc), sizeof(u32)) - offset;
+ len = round_up(can_fd_dlc2len(dlc), sizeof(u32)) - offset;
if (MCP251XFD_SANITIZE_CAN && len)
memset(hw_tx_obj->data + offset, 0x0, len);
memcpy(hw_tx_obj->data, cfd->data, cfd->len);
@@ -2281,7 +2356,7 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
/* Number of bytes to be written into the RAM of the controller */
len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
if (MCP251XFD_SANITIZE_CAN)
- len += round_up(can_dlc2len(dlc), sizeof(u32));
+ len += round_up(can_fd_dlc2len(dlc), sizeof(u32));
else
len += round_up(cfd->len, sizeof(u32));
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index fa1246e39980..cb6398c2a560 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -368,6 +368,7 @@
* FIFO setup: tef: 8*12 bytes = 96 bytes, tx: 8*16 bytes = 128 bytes
* FIFO setup: tef: 4*12 bytes = 48 bytes, tx: 4*72 bytes = 288 bytes
*/
+#define MCP251XFD_RX_OBJ_NUM_MAX 32
#define MCP251XFD_TX_OBJ_NUM_CAN 8
#define MCP251XFD_TX_OBJ_NUM_CANFD 4
@@ -458,14 +459,6 @@ struct mcp251xfd_hw_rx_obj_canfd {
u8 data[sizeof_field(struct canfd_frame, data)];
};
-struct mcp251xfd_tef_ring {
- unsigned int head;
- unsigned int tail;
-
- /* u8 obj_num equals tx_ring->obj_num */
- /* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
-};
-
struct __packed mcp251xfd_buf_cmd {
__be16 cmd;
};
@@ -505,6 +498,17 @@ struct mcp251xfd_tx_obj {
union mcp251xfd_tx_obj_load_buf buf;
};
+struct mcp251xfd_tef_ring {
+ unsigned int head;
+ unsigned int tail;
+
+ /* u8 obj_num equals tx_ring->obj_num */
+ /* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
+
+ union mcp251xfd_write_reg_buf uinc_buf;
+ struct spi_transfer uinc_xfer[MCP251XFD_TX_OBJ_NUM_MAX];
+};
+
struct mcp251xfd_tx_ring {
unsigned int head;
unsigned int tail;
@@ -527,6 +531,8 @@ struct mcp251xfd_rx_ring {
u8 obj_num;
u8 obj_size;
+ union mcp251xfd_write_reg_buf uinc_buf;
+ struct spi_transfer uinc_xfer[MCP251XFD_RX_OBJ_NUM_MAX];
struct mcp251xfd_hw_rx_obj_canfd obj[];
};
@@ -580,7 +586,7 @@ struct mcp251xfd_priv {
struct spi_device *spi;
u32 spi_max_speed_hz_orig;
- struct mcp251xfd_tef_ring tef;
+ struct mcp251xfd_tef_ring tef[1];
struct mcp251xfd_tx_ring tx[1];
struct mcp251xfd_rx_ring *rx[1];
@@ -741,17 +747,17 @@ mcp251xfd_get_rx_obj_addr(const struct mcp251xfd_rx_ring *ring, u8 n)
static inline u8 mcp251xfd_get_tef_head(const struct mcp251xfd_priv *priv)
{
- return priv->tef.head & (priv->tx->obj_num - 1);
+ return priv->tef->head & (priv->tx->obj_num - 1);
}
static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv)
{
- return priv->tef.tail & (priv->tx->obj_num - 1);
+ return priv->tef->tail & (priv->tx->obj_num - 1);
}
static inline u8 mcp251xfd_get_tef_len(const struct mcp251xfd_priv *priv)
{
- return priv->tef.head - priv->tef.tail;
+ return priv->tef->head - priv->tef->tail;
}
static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv)
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index b3f2f4fe5ee0..783b63218b7b 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -424,7 +424,7 @@ static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *d
netif_stop_queue(dev);
id = cf->can_id;
- dlc = cf->can_dlc;
+ dlc = cf->len;
msg_flag_n = dlc;
if (id & CAN_RTR_FLAG)
@@ -475,7 +475,7 @@ static void sun4i_can_rx(struct net_device *dev)
return;
fi = readl(priv->base + SUN4I_REG_BUF0_ADDR);
- cf->can_dlc = get_can_dlc(fi & 0x0F);
+ cf->len = can_cc_dlc2len(fi & 0x0F);
if (fi & SUN4I_MSG_EFF_FLAG) {
dreg = SUN4I_REG_BUF5_ADDR;
id = (readl(priv->base + SUN4I_REG_BUF1_ADDR) << 21) |
@@ -493,7 +493,7 @@ static void sun4i_can_rx(struct net_device *dev)
if (fi & SUN4I_MSG_RTR_FLAG)
id |= CAN_RTR_FLAG;
else
- for (i = 0; i < cf->can_dlc; i++)
+ for (i = 0; i < cf->len; i++)
cf->data[i] = readl(priv->base + dreg + i * 4);
cf->can_id = id;
@@ -501,7 +501,7 @@ static void sun4i_can_rx(struct net_device *dev)
sun4i_can_write_cmdreg(priv, SUN4I_CMD_RELEASE_RBUF);
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
can_led_event(dev, CAN_LED_EVENT_RX);
@@ -624,7 +624,7 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
if (likely(skb)) {
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
} else {
return -ENOMEM;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 2c22f40e12bd..a6850ff0b55b 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -496,7 +496,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irqrestore(&priv->mbx_lock, flags);
/* Prepare mailbox for transmission */
- data = cf->can_dlc | (get_tx_head_prio(priv) << 8);
+ data = cf->len | (get_tx_head_prio(priv) << 8);
if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
data |= HECC_CANMCF_RTR;
hecc_write_mbx(priv, mbxno, HECC_CANMCF, data);
@@ -508,7 +508,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
hecc_write_mbx(priv, mbxno, HECC_CANMID, data);
hecc_write_mbx(priv, mbxno, HECC_CANMDL,
be32_to_cpu(*(__be32 *)(cf->data)));
- if (cf->can_dlc > 4)
+ if (cf->len > 4)
hecc_write_mbx(priv, mbxno, HECC_CANMDH,
be32_to_cpu(*(__be32 *)(cf->data + 4)));
else
@@ -566,11 +566,11 @@ static struct sk_buff *ti_hecc_mailbox_read(struct can_rx_offload *offload,
data = hecc_read_mbx(priv, mbxno, HECC_CANMCF);
if (data & HECC_CANMCF_RTR)
cf->can_id |= CAN_RTR_FLAG;
- cf->can_dlc = get_can_dlc(data & 0xF);
+ cf->len = can_cc_dlc2len(data & 0xF);
data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
*(__be32 *)(cf->data) = cpu_to_be32(data);
- if (cf->can_dlc > 4) {
+ if (cf->len > 4) {
data = hecc_read_mbx(priv, mbxno, HECC_CANMDH);
*(__be32 *)(cf->data + 4) = cpu_to_be32(data);
}
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index bcb331b0c958..c1e5d5b570b6 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -52,7 +52,9 @@ config CAN_KVASER_USB
- Kvaser Leaf Light "China"
- Kvaser BlackBird SemiPro
- Kvaser USBcan R
+ - Kvaser USBcan R v2
- Kvaser Leaf Light v2
+ - Kvaser Leaf Light R v2
- Kvaser Mini PCI Express HS
- Kvaser Mini PCI Express 2xHS
- Kvaser USBcan Light 2xHS
@@ -72,6 +74,9 @@ config CAN_KVASER_USB
- Kvaser USBcan Light 4xHS
- Kvaser USBcan Pro 2xHS v2
- Kvaser USBcan Pro 5xHS
+ - Kvaser U100
+ - Kvaser U100P
+ - Kvaser U100S
- ATI Memorator Pro 2xHS v2
- ATI USBcan Pro 2xHS v2
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 4f52810bebf8..25eee4466364 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -306,7 +306,7 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
return;
cf->can_id = le32_to_cpu(msg->msg.can_msg.id);
- cf->can_dlc = get_can_dlc(msg->msg.can_msg.length & 0xF);
+ cf->len = can_cc_dlc2len(msg->msg.can_msg.length & 0xF);
if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME ||
msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME)
@@ -316,12 +316,12 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) {
cf->can_id |= CAN_RTR_FLAG;
} else {
- for (i = 0; i < cf->can_dlc; i++)
+ for (i = 0; i < cf->len; i++)
cf->data[i] = msg->msg.can_msg.msg[i];
}
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -396,7 +396,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
}
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -755,7 +755,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE];
msg->msg.can_msg.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK);
- msg->msg.can_msg.length = cf->can_dlc;
+ msg->msg.can_msg.length = cf->len;
if (cf->can_id & CAN_RTR_FLAG) {
msg->type = cf->can_id & CAN_EFF_FLAG ?
@@ -766,10 +766,10 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
msg->type = cf->can_id & CAN_EFF_FLAG ?
CPC_CMD_TYPE_EXT_CAN_FRAME : CPC_CMD_TYPE_CAN_FRAME;
- for (i = 0; i < cf->can_dlc; i++)
+ for (i = 0; i < cf->len; i++)
msg->msg.can_msg.msg[i] = cf->data[i];
- msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc;
+ msg->length = CPC_CAN_MSG_MIN_SIZE + cf->len;
}
for (i = 0; i < MAX_TX_URBS; i++) {
@@ -794,7 +794,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
context->dev = dev;
context->echo_index = i;
- context->dlc = cf->can_dlc;
+ context->dlc = cf->len;
usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
size, ems_usb_write_bulk_callback, context);
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index b5d7ed21d7d9..9eed75a4b678 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -183,7 +183,7 @@ struct esd_usb2_net_priv;
struct esd_tx_urb_context {
struct esd_usb2_net_priv *priv;
u32 echo_index;
- int dlc;
+ int len; /* CAN payload length */
};
struct esd_usb2 {
@@ -292,7 +292,7 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
priv->bec.rxerr = rxerr;
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
}
@@ -321,7 +321,8 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
}
cf->can_id = id & ESD_IDMASK;
- cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
+ can_frame_set_cc_len(cf, msg->msg.rx.dlc & ~ESD_RTR,
+ priv->can.ctrlmode);
if (id & ESD_EXTID)
cf->can_id |= CAN_EFF_FLAG;
@@ -329,12 +330,12 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
if (msg->msg.rx.dlc & ESD_RTR) {
cf->can_id |= CAN_RTR_FLAG;
} else {
- for (i = 0; i < cf->can_dlc; i++)
+ for (i = 0; i < cf->len; i++)
cf->data[i] = msg->msg.rx.data[i];
}
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -355,7 +356,7 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
if (!msg->msg.txdone.status) {
stats->tx_packets++;
- stats->tx_bytes += context->dlc;
+ stats->tx_bytes += context->len;
can_get_echo_skb(netdev, context->echo_index);
} else {
stats->tx_errors++;
@@ -737,7 +738,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
msg->msg.hdr.len = 3; /* minimal length */
msg->msg.hdr.cmd = CMD_CAN_TX;
msg->msg.tx.net = priv->index;
- msg->msg.tx.dlc = cf->can_dlc;
+ msg->msg.tx.dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
msg->msg.tx.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK);
if (cf->can_id & CAN_RTR_FLAG)
@@ -746,10 +747,10 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
if (cf->can_id & CAN_EFF_FLAG)
msg->msg.tx.id |= cpu_to_le32(ESD_EXTID);
- for (i = 0; i < cf->can_dlc; i++)
+ for (i = 0; i < cf->len; i++)
msg->msg.tx.data[i] = cf->data[i];
- msg->msg.hdr.len += (cf->can_dlc + 3) >> 2;
+ msg->msg.hdr.len += (cf->len + 3) >> 2;
for (i = 0; i < MAX_TX_URBS; i++) {
if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
@@ -769,7 +770,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
context->priv = priv;
context->echo_index = i;
- context->dlc = cf->can_dlc;
+ context->len = cf->len;
/* hnd must not be 0 - MSB is stripped in txdone handling */
msg->msg.tx.hnd = 0x80000000 | i; /* returned in TX done message */
@@ -988,7 +989,8 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
priv->index = index;
priv->can.state = CAN_STATE_STOPPED;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_CC_LEN8_DLC;
if (le16_to_cpu(dev->udev->descriptor.idProduct) ==
USB_CANUSBM_PRODUCT_ID)
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 018ca3b057a3..0487095e1fd0 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -9,6 +9,7 @@
* Many thanks to all socketcan devs!
*/
+#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/module.h>
@@ -337,7 +338,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
cf->can_id = le32_to_cpu(hf->can_id);
- cf->can_dlc = get_can_dlc(hf->can_dlc);
+ can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
memcpy(cf->data, hf->data, 8);
/* ERROR frames tell us information about the controller */
@@ -384,7 +385,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
goto resubmit_urb;
cf->can_id |= CAN_ERR_CRTL;
- cf->can_dlc = CAN_ERR_DLC;
+ cf->len = CAN_ERR_DLC;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
stats->rx_over_errors++;
stats->rx_errors++;
@@ -510,8 +511,9 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
cf = (struct can_frame *)skb->data;
hf->can_id = cpu_to_le32(cf->can_id);
- hf->can_dlc = cf->can_dlc;
- memcpy(hf->data, cf->data, cf->can_dlc);
+ hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode);
+
+ memcpy(hf->data, cf->data, cf->len);
usb_fill_bulk_urb(urb, dev->udev,
usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
@@ -866,7 +868,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->can.bittiming_const = &dev->bt_const;
dev->can.do_set_bittiming = gs_usb_set_bittiming;
- dev->can.ctrlmode_supported = 0;
+ dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
feature = le32_to_cpu(bt_const->feature);
if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 0f1d3e807d63..e2d58846c40c 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -58,6 +58,11 @@
#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
+#define USB_USBCAN_R_V2_PRODUCT_ID 294
+#define USB_LEAF_LIGHT_R_V2_PRODUCT_ID 295
+#define USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID 296
+#define USB_LEAF_PRODUCT_ID_END \
+ USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID
/* Kvaser USBCan-II devices product ids */
#define USB_USBCAN_REVB_PRODUCT_ID 2
@@ -78,13 +83,18 @@
#define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 268
#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 269
#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 270
+#define USB_U100_PRODUCT_ID 273
+#define USB_U100P_PRODUCT_ID 274
+#define USB_U100S_PRODUCT_ID 275
+#define USB_HYDRA_PRODUCT_ID_END \
+ USB_U100S_PRODUCT_ID
static inline bool kvaser_is_leaf(const struct usb_device_id *id)
{
return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
id->idProduct <= USB_CAN_R_PRODUCT_ID) ||
(id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID &&
- id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID);
+ id->idProduct <= USB_LEAF_PRODUCT_ID_END);
}
static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
@@ -96,7 +106,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
static inline bool kvaser_is_hydra(const struct usb_device_id *id)
{
return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID &&
- id->idProduct <= USB_HYBRID_PRO_CANLIN_PRODUCT_ID;
+ id->idProduct <= USB_HYDRA_PRODUCT_ID_END;
}
static const struct usb_device_id kvaser_usb_table[] = {
@@ -153,6 +163,9 @@ static const struct usb_device_id kvaser_usb_table[] = {
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID) },
/* USBCANII USB product IDs */
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
@@ -177,6 +190,9 @@ static const struct usb_device_id kvaser_usb_table[] = {
{ USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -258,7 +274,7 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev)
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
return 0;
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index 218fadc91155..480bd2ecb296 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -34,6 +34,7 @@
/* Forward declarations */
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan;
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc;
+static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt;
#define KVASER_USB_HYDRA_BULK_EP_IN_ADDR 0x82
#define KVASER_USB_HYDRA_BULK_EP_OUT_ADDR 0x02
@@ -135,6 +136,7 @@ struct kvaser_cmd_sw_detail_req {
#define KVASER_USB_HYDRA_SW_FLAG_CANFD BIT(10)
#define KVASER_USB_HYDRA_SW_FLAG_NONISO BIT(11)
#define KVASER_USB_HYDRA_SW_FLAG_EXT_CAP BIT(12)
+#define KVASER_USB_HYDRA_SW_FLAG_CAN_FREQ_80M BIT(13)
struct kvaser_cmd_sw_detail_res {
__le32 sw_flags;
__le32 sw_version;
@@ -383,6 +385,30 @@ static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = {
.brp_inc = 1,
};
+static const struct can_bittiming_const kvaser_usb_hydra_rt_bittiming_c = {
+ .name = "kvaser_usb_rt",
+ .tseg1_min = 2,
+ .tseg1_max = 96,
+ .tseg2_min = 2,
+ .tseg2_max = 32,
+ .sjw_max = 32,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const kvaser_usb_hydra_rtd_bittiming_c = {
+ .name = "kvaser_usb_rt",
+ .tseg1_min = 2,
+ .tseg1_max = 39,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 8,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
#define KVASER_USB_HYDRA_TRANSID_BITS 12
#define KVASER_USB_HYDRA_TRANSID_MASK \
GENMASK(KVASER_USB_HYDRA_TRANSID_BITS - 1, 0)
@@ -895,7 +921,7 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
stats = &netdev->stats;
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -1049,7 +1075,7 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
cf->data[7] = bec.rxerr;
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
priv->bec.txerr = bec.txerr;
@@ -1084,7 +1110,7 @@ static void kvaser_usb_hydra_one_shot_fail(struct kvaser_usb_net_priv *priv,
stats->tx_errors++;
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -1120,7 +1146,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
struct net_device_stats *stats = &priv->netdev->stats;
stats->tx_packets++;
- stats->tx_bytes += can_dlc2len(context->dlc);
+ stats->tx_bytes += can_fd_dlc2len(context->dlc);
}
spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
@@ -1180,15 +1206,15 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN)
kvaser_usb_can_rx_over_error(priv->netdev);
- cf->can_dlc = get_can_dlc(cmd->rx_can.dlc);
+ cf->len = can_cc_dlc2len(cmd->rx_can.dlc);
if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
cf->can_id |= CAN_RTR_FLAG;
else
- memcpy(cf->data, cmd->rx_can.data, cf->can_dlc);
+ memcpy(cf->data, cmd->rx_can.data, cf->len);
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -1251,13 +1277,13 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev,
kvaser_usb_can_rx_over_error(priv->netdev);
if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF) {
- cf->len = can_dlc2len(get_canfd_dlc(dlc));
+ cf->len = can_fd_dlc2len(dlc);
if (flags & KVASER_USB_HYDRA_CF_FLAG_BRS)
cf->flags |= CANFD_BRS;
if (flags & KVASER_USB_HYDRA_CF_FLAG_ESI)
cf->flags |= CANFD_ESI;
} else {
- cf->len = get_can_dlc(dlc);
+ cf->len = can_cc_dlc2len(dlc);
}
if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
@@ -1351,7 +1377,7 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd_ext *cmd;
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
- u8 dlc = can_len2dlc(cf->len);
+ u8 dlc = can_fd_len2dlc(cf->len);
u8 nbr_of_bytes = cf->len;
u32 flags;
u32 id;
@@ -1434,7 +1460,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
u32 flags;
u32 id;
- *frame_len = cf->can_dlc;
+ *frame_len = cf->len;
cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
if (!cmd)
@@ -1455,7 +1481,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
id = cf->can_id & CAN_SFF_MASK;
}
- cmd->tx_can.dlc = cf->can_dlc;
+ cmd->tx_can.dlc = cf->len;
flags = (cf->can_id & CAN_EFF_FLAG ?
KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID : 0);
@@ -1727,6 +1753,8 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
if (flags & KVASER_USB_HYDRA_SW_FLAG_FREQ_80M)
dev->cfg = &kvaser_usb_hydra_dev_cfg_kcan;
+ else if (flags & KVASER_USB_HYDRA_SW_FLAG_CAN_FREQ_80M)
+ dev->cfg = &kvaser_usb_hydra_dev_cfg_rt;
else
dev->cfg = &kvaser_usb_hydra_dev_cfg_flexc;
@@ -2026,3 +2054,12 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = {
.timestamp_freq = 1,
.bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c,
};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = {
+ .clock = {
+ .freq = 80000000,
+ },
+ .timestamp_freq = 24,
+ .bittiming_const = &kvaser_usb_hydra_rt_bittiming_c,
+ .data_bittiming_const = &kvaser_usb_hydra_rtd_bittiming_c,
+};
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index 1b9957f12459..98c016ef0607 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -350,7 +350,7 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
u8 *cmd_tx_can_flags = NULL; /* GCC */
struct can_frame *cf = (struct can_frame *)skb->data;
- *frame_len = cf->can_dlc;
+ *frame_len = cf->len;
cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
if (cmd) {
@@ -383,8 +383,8 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
cmd->u.tx_can.data[1] = cf->can_id & 0x3f;
}
- cmd->u.tx_can.data[5] = cf->can_dlc;
- memcpy(&cmd->u.tx_can.data[6], cf->data, cf->can_dlc);
+ cmd->u.tx_can.data[5] = cf->len;
+ memcpy(&cmd->u.tx_can.data[6], cf->data, cf->len);
if (cf->can_id & CAN_RTR_FLAG)
*cmd_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
@@ -576,7 +576,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
cf->can_id |= CAN_ERR_RESTARTED;
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
} else {
netdev_err(priv->netdev,
@@ -694,7 +694,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
{
struct can_frame *cf;
struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG,
- .can_dlc = CAN_ERR_DLC };
+ .len = CAN_ERR_DLC };
struct sk_buff *skb;
struct net_device_stats *stats;
struct kvaser_usb_net_priv *priv;
@@ -778,7 +778,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
cf->data[7] = es->rxerr;
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -978,13 +978,13 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
else
cf->can_id &= CAN_SFF_MASK;
- cf->can_dlc = get_can_dlc(cmd->u.leaf.log_message.dlc);
+ cf->len = can_cc_dlc2len(cmd->u.leaf.log_message.dlc);
if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME)
cf->can_id |= CAN_RTR_FLAG;
else
memcpy(cf->data, &cmd->u.leaf.log_message.data,
- cf->can_dlc);
+ cf->len);
} else {
cf->can_id = ((rx_data[0] & 0x1f) << 6) | (rx_data[1] & 0x3f);
@@ -996,16 +996,16 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
cf->can_id |= CAN_EFF_FLAG;
}
- cf->can_dlc = get_can_dlc(rx_data[5]);
+ cf->len = can_cc_dlc2len(rx_data[5]);
if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME)
cf->can_id |= CAN_RTR_FLAG;
else
- memcpy(cf->data, &rx_data[6], cf->can_dlc);
+ memcpy(cf->data, &rx_data[6], cf->len);
}
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index e97f2e0da6b0..df54eb7d4b36 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -184,7 +184,7 @@ static inline struct mcba_usb_ctx *mcba_usb_get_free_ctx(struct mcba_priv *priv,
if (cf) {
ctx->can = true;
- ctx->dlc = cf->can_dlc;
+ ctx->dlc = cf->len;
} else {
ctx->can = false;
ctx->dlc = 0;
@@ -348,7 +348,7 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
usb_msg.eid = 0;
}
- usb_msg.dlc = cf->can_dlc;
+ usb_msg.dlc = cf->len;
memcpy(usb_msg.data, cf->data, usb_msg.dlc);
@@ -451,12 +451,12 @@ static void mcba_usb_process_can(struct mcba_priv *priv,
if (msg->dlc & MCBA_DLC_RTR_MASK)
cf->can_id |= CAN_RTR_FLAG;
- cf->can_dlc = get_can_dlc(msg->dlc & MCBA_DLC_MASK);
+ cf->len = can_cc_dlc2len(msg->dlc & MCBA_DLC_MASK);
- memcpy(cf->data, msg->data, cf->can_dlc);
+ memcpy(cf->data, msg->data, cf->len);
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
can_led_event(priv->netdev, CAN_LED_EVENT_RX);
netif_rx(skb);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 63bd2ed96697..e6c1e5d33924 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -596,7 +596,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
}
mc->netdev->stats.rx_packets++;
- mc->netdev->stats.rx_bytes += cf->can_dlc;
+ mc->netdev->stats.rx_bytes += cf->len;
netif_rx(skb);
return 0;
@@ -734,7 +734,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
cf->can_id = le16_to_cpu(tmp16) >> 5;
}
- cf->can_dlc = get_can_dlc(rec_len);
+ can_frame_set_cc_len(cf, rec_len, mc->pdev->dev.can.ctrlmode);
/* Only first packet timestamp is a word */
if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx))
@@ -751,7 +751,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
if ((mc->ptr + rec_len) > mc->end)
goto decode_failed;
- memcpy(cf->data, mc->ptr, cf->can_dlc);
+ memcpy(cf->data, mc->ptr, cf->len);
mc->ptr += rec_len;
}
@@ -761,7 +761,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
/* update statistics */
mc->netdev->stats.rx_packets++;
- mc->netdev->stats.rx_bytes += cf->can_dlc;
+ mc->netdev->stats.rx_bytes += cf->len;
/* push the skb */
netif_rx(skb);
@@ -838,7 +838,8 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb,
pc = obuf + PCAN_USB_MSG_HEADER_LEN;
/* status/len byte */
- *pc = cf->can_dlc;
+ *pc = can_get_cc_dlc(cf, dev->can.ctrlmode);
+
if (cf->can_id & CAN_RTR_FLAG)
*pc |= PCAN_USB_STATUSLEN_RTR;
@@ -858,8 +859,8 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb,
/* can data */
if (!(cf->can_id & CAN_RTR_FLAG)) {
- memcpy(pc, cf->data, cf->can_dlc);
- pc += cf->can_dlc;
+ memcpy(pc, cf->data, cf->len);
+ pc += cf->len;
}
obuf[(*size)-1] = (u8)(stats->tx_packets & 0xff);
@@ -992,7 +993,8 @@ const struct peak_usb_adapter pcan_usb = {
.device_id = PCAN_USB_PRODUCT_ID,
.ctrl_count = 1,
.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY |
- CAN_CTRLMODE_BERR_REPORTING,
+ CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_CC_LEN8_DLC,
.clock = {
.freq = PCAN_USB_CRYSTAL_HZ / 2 ,
},
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 204ccb27d6d9..251835ea15aa 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -295,15 +295,16 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
netif_trans_update(netdev);
break;
- default:
- if (net_ratelimit())
- netdev_err(netdev, "Tx urb aborted (%d)\n",
- urb->status);
case -EPROTO:
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
+ break;
+ default:
+ if (net_ratelimit())
+ netdev_err(netdev, "Tx urb aborted (%d)\n",
+ urb->status);
break;
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index d29d20525588..61631f4fd92a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -492,14 +492,16 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND)
cfd->flags |= CANFD_ESI;
- cfd->len = can_dlc2len(get_canfd_dlc(pucan_msg_get_dlc(rm)));
+ cfd->len = can_fd_dlc2len(pucan_msg_get_dlc(rm));
} else {
/* CAN 2.0 frame case */
skb = alloc_can_skb(netdev, (struct can_frame **)&cfd);
if (!skb)
return -ENOMEM;
- cfd->len = get_can_dlc(pucan_msg_get_dlc(rm));
+ can_frame_set_cc_len((struct can_frame *)cfd,
+ pucan_msg_get_dlc(rm),
+ dev->can.ctrlmode);
}
cfd->can_id = le32_to_cpu(rm->can_id);
@@ -581,7 +583,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += cf->can_dlc;
+ netdev->stats.rx_bytes += cf->len;
return 0;
}
@@ -737,7 +739,7 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev,
struct pucan_tx_msg *tx_msg = (struct pucan_tx_msg *)obuf;
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
u16 tx_msg_size, tx_msg_flags;
- u8 can_dlc;
+ u8 dlc;
if (cfd->len > CANFD_MAX_DLEN)
return -EINVAL;
@@ -756,7 +758,7 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev,
if (can_is_canfd_skb(skb)) {
/* considering a CANFD frame */
- can_dlc = can_len2dlc(cfd->len);
+ dlc = can_fd_len2dlc(cfd->len);
tx_msg_flags |= PUCAN_MSG_EXT_DATA_LEN;
@@ -767,14 +769,15 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev,
tx_msg_flags |= PUCAN_MSG_ERROR_STATE_IND;
} else {
/* CAND 2.0 frames */
- can_dlc = cfd->len;
+ dlc = can_get_cc_dlc((struct can_frame *)cfd,
+ dev->can.ctrlmode);
if (cfd->can_id & CAN_RTR_FLAG)
tx_msg_flags |= PUCAN_MSG_RTR;
}
tx_msg->flags = cpu_to_le16(tx_msg_flags);
- tx_msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(dev->ctrl_idx, can_dlc);
+ tx_msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(dev->ctrl_idx, dlc);
memcpy(tx_msg->d, cfd->data, cfd->len);
/* add null size message to tag the end (messages are 32-bits aligned)
@@ -1036,7 +1039,8 @@ const struct peak_usb_adapter pcan_usb_fd = {
.device_id = PCAN_USBFD_PRODUCT_ID,
.ctrl_count = PCAN_USBFD_CHANNEL_COUNT,
.ctrlmode_supported = CAN_CTRLMODE_FD |
- CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+ CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_CC_LEN8_DLC,
.clock = {
.freq = PCAN_UFD_CRYSTAL_HZ,
},
@@ -1108,7 +1112,8 @@ const struct peak_usb_adapter pcan_usb_chip = {
.device_id = PCAN_USBCHIP_PRODUCT_ID,
.ctrl_count = PCAN_USBFD_CHANNEL_COUNT,
.ctrlmode_supported = CAN_CTRLMODE_FD |
- CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+ CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_CC_LEN8_DLC,
.clock = {
.freq = PCAN_UFD_CRYSTAL_HZ,
},
@@ -1180,7 +1185,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
.device_id = PCAN_USBPROFD_PRODUCT_ID,
.ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT,
.ctrlmode_supported = CAN_CTRLMODE_FD |
- CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+ CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_CC_LEN8_DLC,
.clock = {
.freq = PCAN_UFD_CRYSTAL_HZ,
},
@@ -1252,7 +1258,8 @@ const struct peak_usb_adapter pcan_usb_x6 = {
.device_id = PCAN_USBX6_PRODUCT_ID,
.ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT,
.ctrlmode_supported = CAN_CTRLMODE_FD |
- CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+ CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_CC_LEN8_DLC,
.clock = {
.freq = PCAN_UFD_CRYSTAL_HZ,
},
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index c7564773fb2b..275087c39602 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -532,7 +532,7 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
return -ENOMEM;
can_frame->can_id = le32_to_cpu(rx->id);
- can_frame->can_dlc = rx->len & 0x0f;
+ can_frame->len = rx->len & 0x0f;
if (rx->flags & PCAN_USBPRO_EXT)
can_frame->can_id |= CAN_EFF_FLAG;
@@ -540,14 +540,14 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
if (rx->flags & PCAN_USBPRO_RTR)
can_frame->can_id |= CAN_RTR_FLAG;
else
- memcpy(can_frame->data, rx->data, can_frame->can_dlc);
+ memcpy(can_frame->data, rx->data, can_frame->len);
hwts = skb_hwtstamps(skb);
peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(rx->ts32),
&hwts->hwtstamp);
netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += can_frame->can_dlc;
+ netdev->stats.rx_bytes += can_frame->len;
netif_rx(skb);
return 0;
@@ -662,7 +662,7 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
hwts = skb_hwtstamps(skb);
peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(er->ts32), &hwts->hwtstamp);
netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += can_frame->can_dlc;
+ netdev->stats.rx_bytes += can_frame->len;
netif_rx(skb);
return 0;
@@ -767,14 +767,14 @@ static int pcan_usb_pro_encode_msg(struct peak_usb_device *dev,
pcan_msg_init_empty(&usb_msg, obuf, *size);
- if ((cf->can_id & CAN_RTR_FLAG) || (cf->can_dlc == 0))
+ if ((cf->can_id & CAN_RTR_FLAG) || (cf->len == 0))
data_type = PCAN_USBPRO_TXMSG0;
- else if (cf->can_dlc <= 4)
+ else if (cf->len <= 4)
data_type = PCAN_USBPRO_TXMSG4;
else
data_type = PCAN_USBPRO_TXMSG8;
- len = (dev->ctrl_idx << 4) | (cf->can_dlc & 0x0f);
+ len = (dev->ctrl_idx << 4) | (cf->len & 0x0f);
flags = 0;
if (cf->can_id & CAN_EFF_FLAG)
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index dc5290b36598..7d92da8954fe 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -303,12 +303,12 @@ struct ucan_priv {
struct ucan_urb_context *context_array;
};
-static u8 ucan_get_can_dlc(struct ucan_can_msg *msg, u16 len)
+static u8 ucan_can_cc_dlc2len(struct ucan_can_msg *msg, u16 len)
{
if (le32_to_cpu(msg->id) & CAN_RTR_FLAG)
- return get_can_dlc(msg->dlc);
+ return can_cc_dlc2len(msg->dlc);
else
- return get_can_dlc(len - (UCAN_IN_HDR_SIZE + sizeof(msg->id)));
+ return can_cc_dlc2len(len - (UCAN_IN_HDR_SIZE + sizeof(msg->id)));
}
static void ucan_release_context_array(struct ucan_priv *up)
@@ -614,15 +614,15 @@ static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m)
cf->can_id = canid;
/* compute DLC taking RTR_FLAG into account */
- cf->can_dlc = ucan_get_can_dlc(&m->msg.can_msg, len);
+ cf->len = ucan_can_cc_dlc2len(&m->msg.can_msg, len);
/* copy the payload of non RTR frames */
if (!(cf->can_id & CAN_RTR_FLAG) || (cf->can_id & CAN_ERR_FLAG))
- memcpy(cf->data, m->msg.can_msg.data, cf->can_dlc);
+ memcpy(cf->data, m->msg.can_msg.data, cf->len);
/* don't count error frames as real packets */
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
/* pass it to Linux */
netif_rx(skb);
@@ -1078,15 +1078,15 @@ static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up,
mlen = UCAN_OUT_HDR_SIZE +
offsetof(struct ucan_can_msg, dlc) +
sizeof(m->msg.can_msg.dlc);
- m->msg.can_msg.dlc = cf->can_dlc;
+ m->msg.can_msg.dlc = cf->len;
} else {
mlen = UCAN_OUT_HDR_SIZE +
- sizeof(m->msg.can_msg.id) + cf->can_dlc;
- memcpy(m->msg.can_msg.data, cf->data, cf->can_dlc);
+ sizeof(m->msg.can_msg.id) + cf->len;
+ memcpy(m->msg.can_msg.data, cf->data, cf->len);
}
m->len = cpu_to_le16(mlen);
- context->dlc = cf->can_dlc;
+ context->dlc = cf->len;
m->subtype = echo_index;
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 62749c67c959..44478304ff46 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -449,7 +449,7 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
priv->bec.rxerr = rxerr;
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -470,7 +470,7 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
return;
cf->can_id = be32_to_cpu(msg->id);
- cf->can_dlc = get_can_dlc(msg->dlc & 0xF);
+ can_frame_set_cc_len(cf, msg->dlc & 0xF, priv->can.ctrlmode);
if (msg->flags & USB_8DEV_EXTID)
cf->can_id |= CAN_EFF_FLAG;
@@ -478,10 +478,10 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
if (msg->flags & USB_8DEV_RTR)
cf->can_id |= CAN_RTR_FLAG;
else
- memcpy(cf->data, msg->data, cf->can_dlc);
+ memcpy(cf->data, msg->data, cf->len);
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
can_led_event(priv->netdev, CAN_LED_EVENT_RX);
@@ -637,8 +637,8 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
msg->flags |= USB_8DEV_EXTID;
msg->id = cpu_to_be32(cf->can_id & CAN_ERR_MASK);
- msg->dlc = cf->can_dlc;
- memcpy(msg->data, cf->data, cf->can_dlc);
+ msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
+ memcpy(msg->data, cf->data, cf->len);
msg->end = USB_8DEV_DATA_END;
for (i = 0; i < MAX_TX_URBS; i++) {
@@ -656,7 +656,7 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
context->priv = priv;
context->echo_index = i;
- context->dlc = cf->can_dlc;
+ context->dlc = cf->len;
usb_fill_bulk_urb(urb, priv->udev,
usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_TX),
@@ -928,7 +928,8 @@ static int usb_8dev_probe(struct usb_interface *intf,
priv->can.do_get_berr_counter = usb_8dev_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
- CAN_CTRLMODE_ONE_SHOT;
+ CAN_CTRLMODE_ONE_SHOT |
+ CAN_CTRLMODE_CC_LEN8_DLC;
netdev->netdev_ops = &usb_8dev_netdev_ops;
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index d6ba9426be4d..fa47bab510bb 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -186,7 +186,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
}
if (ifmp && tbp[IFLA_IFNAME]) {
- nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
+ nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
name_assign_type = NET_NAME_USER;
} else {
snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
@@ -223,7 +223,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
/* register first device */
if (tb[IFLA_IFNAME])
- nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
+ nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
else
snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 48d746e18f30..3f54edee92eb 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -583,7 +583,7 @@ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
id |= XCAN_IDR_SRR_MASK;
}
- dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
+ dlc = can_fd_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
if (can_is_canfd_skb(skb)) {
if (cf->flags & CANFD_BRS)
dlc |= XCAN_DLCR_BRS_MASK;
@@ -759,7 +759,7 @@ static int xcan_rx(struct net_device *ndev, int frame_base)
XCAN_DLCR_DLC_SHIFT;
/* Change Xilinx CAN data length format to socketCAN data format */
- cf->can_dlc = get_can_dlc(dlc);
+ cf->len = can_cc_dlc2len(dlc);
/* Change Xilinx CAN ID format to socketCAN ID format */
if (id_xcan & XCAN_IDR_IDE_MASK) {
@@ -784,13 +784,13 @@ static int xcan_rx(struct net_device *ndev, int frame_base)
if (!(cf->can_id & CAN_RTR_FLAG)) {
/* Change Xilinx CAN data format to socketCAN data format */
- if (cf->can_dlc > 0)
+ if (cf->len > 0)
*(__be32 *)(cf->data) = cpu_to_be32(data[0]);
- if (cf->can_dlc > 4)
+ if (cf->len > 4)
*(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
}
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
stats->rx_packets++;
netif_receive_skb(skb);
@@ -832,10 +832,10 @@ static int xcanfd_rx(struct net_device *ndev, int frame_base)
* format
*/
if (dlc & XCAN_DLCR_EDL_MASK)
- cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
+ cf->len = can_fd_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
XCAN_DLCR_DLC_SHIFT);
else
- cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >>
+ cf->len = can_cc_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
XCAN_DLCR_DLC_SHIFT);
/* Change Xilinx CAN ID format to socketCAN ID format */
@@ -970,7 +970,7 @@ static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
struct net_device_stats *stats = &ndev->stats;
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
}
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 2451f61a38e4..f6a0488589fc 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -24,6 +24,8 @@ config NET_DSA_LOOP
This enables support for a fake mock-up switch chip which
exercises the DSA APIs.
+source "drivers/net/dsa/hirschmann/Kconfig"
+
config NET_DSA_LANTIQ_GSWIP
tristate "Lantiq / Intel GSWIP"
depends on HAS_IOMEM && NET_DSA
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 4a943ccc2ca4..a84adb140a04 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx-core.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM) += vitesse-vsc73xx-platform.o
obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX_SPI) += vitesse-vsc73xx-spi.o
obj-y += b53/
+obj-y += hirschmann/
obj-y += microchip/
obj-y += mv88e6xxx/
obj-y += ocelot/
diff --git a/drivers/net/dsa/hirschmann/Kconfig b/drivers/net/dsa/hirschmann/Kconfig
new file mode 100644
index 000000000000..222dd35e2c9d
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+config NET_DSA_HIRSCHMANN_HELLCREEK
+ tristate "Hirschmann Hellcreek TSN Switch support"
+ depends on HAS_IOMEM
+ depends on NET_DSA
+ depends on PTP_1588_CLOCK
+ select NET_DSA_TAG_HELLCREEK
+ help
+ This driver adds support for Hirschmann Hellcreek TSN switches.
diff --git a/drivers/net/dsa/hirschmann/Makefile b/drivers/net/dsa/hirschmann/Makefile
new file mode 100644
index 000000000000..f4075c2998b5
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK) += hellcreek_sw.o
+hellcreek_sw-objs := hellcreek.o
+hellcreek_sw-objs += hellcreek_ptp.o
+hellcreek_sw-objs += hellcreek_hwtstamp.o
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
new file mode 100644
index 000000000000..6420b76ea37c
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -0,0 +1,1339 @@
+// SPDX-License-Identifier: (GPL-2.0 or MIT)
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Author Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/random.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <net/dsa.h>
+
+#include "hellcreek.h"
+#include "hellcreek_ptp.h"
+#include "hellcreek_hwtstamp.h"
+
+static const struct hellcreek_counter hellcreek_counter[] = {
+ { 0x00, "RxFiltered", },
+ { 0x01, "RxOctets1k", },
+ { 0x02, "RxVTAG", },
+ { 0x03, "RxL2BAD", },
+ { 0x04, "RxOverloadDrop", },
+ { 0x05, "RxUC", },
+ { 0x06, "RxMC", },
+ { 0x07, "RxBC", },
+ { 0x08, "RxRS<64", },
+ { 0x09, "RxRS64", },
+ { 0x0a, "RxRS65_127", },
+ { 0x0b, "RxRS128_255", },
+ { 0x0c, "RxRS256_511", },
+ { 0x0d, "RxRS512_1023", },
+ { 0x0e, "RxRS1024_1518", },
+ { 0x0f, "RxRS>1518", },
+ { 0x10, "TxTailDropQueue0", },
+ { 0x11, "TxTailDropQueue1", },
+ { 0x12, "TxTailDropQueue2", },
+ { 0x13, "TxTailDropQueue3", },
+ { 0x14, "TxTailDropQueue4", },
+ { 0x15, "TxTailDropQueue5", },
+ { 0x16, "TxTailDropQueue6", },
+ { 0x17, "TxTailDropQueue7", },
+ { 0x18, "RxTrafficClass0", },
+ { 0x19, "RxTrafficClass1", },
+ { 0x1a, "RxTrafficClass2", },
+ { 0x1b, "RxTrafficClass3", },
+ { 0x1c, "RxTrafficClass4", },
+ { 0x1d, "RxTrafficClass5", },
+ { 0x1e, "RxTrafficClass6", },
+ { 0x1f, "RxTrafficClass7", },
+ { 0x21, "TxOctets1k", },
+ { 0x22, "TxVTAG", },
+ { 0x23, "TxL2BAD", },
+ { 0x25, "TxUC", },
+ { 0x26, "TxMC", },
+ { 0x27, "TxBC", },
+ { 0x28, "TxTS<64", },
+ { 0x29, "TxTS64", },
+ { 0x2a, "TxTS65_127", },
+ { 0x2b, "TxTS128_255", },
+ { 0x2c, "TxTS256_511", },
+ { 0x2d, "TxTS512_1023", },
+ { 0x2e, "TxTS1024_1518", },
+ { 0x2f, "TxTS>1518", },
+ { 0x30, "TxTrafficClassOverrun0", },
+ { 0x31, "TxTrafficClassOverrun1", },
+ { 0x32, "TxTrafficClassOverrun2", },
+ { 0x33, "TxTrafficClassOverrun3", },
+ { 0x34, "TxTrafficClassOverrun4", },
+ { 0x35, "TxTrafficClassOverrun5", },
+ { 0x36, "TxTrafficClassOverrun6", },
+ { 0x37, "TxTrafficClassOverrun7", },
+ { 0x38, "TxTrafficClass0", },
+ { 0x39, "TxTrafficClass1", },
+ { 0x3a, "TxTrafficClass2", },
+ { 0x3b, "TxTrafficClass3", },
+ { 0x3c, "TxTrafficClass4", },
+ { 0x3d, "TxTrafficClass5", },
+ { 0x3e, "TxTrafficClass6", },
+ { 0x3f, "TxTrafficClass7", },
+};
+
+static u16 hellcreek_read(struct hellcreek *hellcreek, unsigned int offset)
+{
+ return readw(hellcreek->base + offset);
+}
+
+static u16 hellcreek_read_ctrl(struct hellcreek *hellcreek)
+{
+ return readw(hellcreek->base + HR_CTRL_C);
+}
+
+static u16 hellcreek_read_stat(struct hellcreek *hellcreek)
+{
+ return readw(hellcreek->base + HR_SWSTAT);
+}
+
+static void hellcreek_write(struct hellcreek *hellcreek, u16 data,
+ unsigned int offset)
+{
+ writew(data, hellcreek->base + offset);
+}
+
+static void hellcreek_select_port(struct hellcreek *hellcreek, int port)
+{
+ u16 val = port << HR_PSEL_PTWSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PSEL);
+}
+
+static void hellcreek_select_prio(struct hellcreek *hellcreek, int prio)
+{
+ u16 val = prio << HR_PSEL_PRTCWSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PSEL);
+}
+
+static void hellcreek_select_counter(struct hellcreek *hellcreek, int counter)
+{
+ u16 val = counter << HR_CSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_CSEL);
+
+ /* Data sheet states to wait at least 20 internal clock cycles */
+ ndelay(200);
+}
+
+static void hellcreek_select_vlan(struct hellcreek *hellcreek, int vid,
+ bool pvid)
+{
+ u16 val = 0;
+
+ /* Set pvid bit first */
+ if (pvid)
+ val |= HR_VIDCFG_PVID;
+ hellcreek_write(hellcreek, val, HR_VIDCFG);
+
+ /* Set vlan */
+ val |= vid << HR_VIDCFG_VID_SHIFT;
+ hellcreek_write(hellcreek, val, HR_VIDCFG);
+}
+
+static int hellcreek_wait_until_ready(struct hellcreek *hellcreek)
+{
+ u16 val;
+
+ /* Wait up to 1ms, although 3 us should be enough */
+ return readx_poll_timeout(hellcreek_read_ctrl, hellcreek,
+ val, val & HR_CTRL_C_READY,
+ 3, 1000);
+}
+
+static int hellcreek_wait_until_transitioned(struct hellcreek *hellcreek)
+{
+ u16 val;
+
+ return readx_poll_timeout_atomic(hellcreek_read_ctrl, hellcreek,
+ val, !(val & HR_CTRL_C_TRANSITION),
+ 1, 1000);
+}
+
+static int hellcreek_wait_fdb_ready(struct hellcreek *hellcreek)
+{
+ u16 val;
+
+ return readx_poll_timeout_atomic(hellcreek_read_stat, hellcreek,
+ val, !(val & HR_SWSTAT_BUSY),
+ 1, 1000);
+}
+
+static int hellcreek_detect(struct hellcreek *hellcreek)
+{
+ u16 id, rel_low, rel_high, date_low, date_high, tgd_ver;
+ u8 tgd_maj, tgd_min;
+ u32 rel, date;
+
+ id = hellcreek_read(hellcreek, HR_MODID_C);
+ rel_low = hellcreek_read(hellcreek, HR_REL_L_C);
+ rel_high = hellcreek_read(hellcreek, HR_REL_H_C);
+ date_low = hellcreek_read(hellcreek, HR_BLD_L_C);
+ date_high = hellcreek_read(hellcreek, HR_BLD_H_C);
+ tgd_ver = hellcreek_read(hellcreek, TR_TGDVER);
+
+ if (id != hellcreek->pdata->module_id)
+ return -ENODEV;
+
+ rel = rel_low | (rel_high << 16);
+ date = date_low | (date_high << 16);
+ tgd_maj = (tgd_ver & TR_TGDVER_REV_MAJ_MASK) >> TR_TGDVER_REV_MAJ_SHIFT;
+ tgd_min = (tgd_ver & TR_TGDVER_REV_MIN_MASK) >> TR_TGDVER_REV_MIN_SHIFT;
+
+ dev_info(hellcreek->dev, "Module ID=%02x Release=%04x Date=%04x TGD Version=%02x.%02x\n",
+ id, rel, date, tgd_maj, tgd_min);
+
+ return 0;
+}
+
+static void hellcreek_feature_detect(struct hellcreek *hellcreek)
+{
+ u16 features;
+
+ features = hellcreek_read(hellcreek, HR_FEABITS0);
+
+ /* Currently we only detect the size of the FDB table */
+ hellcreek->fdb_entries = ((features & HR_FEABITS0_FDBBINS_MASK) >>
+ HR_FEABITS0_FDBBINS_SHIFT) * 32;
+
+ dev_info(hellcreek->dev, "Feature detect: FDB entries=%zu\n",
+ hellcreek->fdb_entries);
+}
+
+static enum dsa_tag_protocol hellcreek_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_HELLCREEK;
+}
+
+static int hellcreek_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ u16 val;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "Enable port %d\n", port);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ val = hellcreek_port->ptcfg;
+ val |= HR_PTCFG_ADMIN_EN;
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return 0;
+}
+
+static void hellcreek_port_disable(struct dsa_switch *ds, int port)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ u16 val;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ dev_dbg(hellcreek->dev, "Disable port %d\n", port);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ val = hellcreek_port->ptcfg;
+ val &= ~HR_PTCFG_ADMIN_EN;
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *data)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
+ const struct hellcreek_counter *counter = &hellcreek_counter[i];
+
+ strlcpy(data + i * ETH_GSTRING_LEN,
+ counter->name, ETH_GSTRING_LEN);
+ }
+}
+
+static int hellcreek_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(hellcreek_counter);
+}
+
+static void hellcreek_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ int i;
+
+ hellcreek_port = &hellcreek->ports[port];
+
+ for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
+ const struct hellcreek_counter *counter = &hellcreek_counter[i];
+ u8 offset = counter->offset + port * 64;
+ u16 high, low;
+ u64 value;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_counter(hellcreek, offset);
+
+ /* The registers are locked internally by selecting the
+ * counter. So low and high can be read without reading high
+ * again.
+ */
+ high = hellcreek_read(hellcreek, HR_CRDH);
+ low = hellcreek_read(hellcreek, HR_CRDL);
+ value = ((u64)high << 16) | low;
+
+ hellcreek_port->counter_values[i] += value;
+ data[i] = hellcreek_port->counter_values[i];
+
+ mutex_unlock(&hellcreek->reg_lock);
+ }
+}
+
+static u16 hellcreek_private_vid(int port)
+{
+ return VLAN_N_VID - port + 1;
+}
+
+static int hellcreek_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ int i;
+
+ dev_dbg(hellcreek->dev, "VLAN prepare for port %d\n", port);
+
+ /* Restriction: Make sure that nobody uses the "private" VLANs. These
+ * VLANs are internally used by the driver to ensure port
+ * separation. Thus, they cannot be used by someone else.
+ */
+ for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
+ const u16 restricted_vid = hellcreek_private_vid(i);
+ u16 vid;
+
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
+ if (vid == restricted_vid)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void hellcreek_select_vlan_params(struct hellcreek *hellcreek, int port,
+ int *shift, int *mask)
+{
+ switch (port) {
+ case 0:
+ *shift = HR_VIDMBRCFG_P0MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P0MBR_MASK;
+ break;
+ case 1:
+ *shift = HR_VIDMBRCFG_P1MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P1MBR_MASK;
+ break;
+ case 2:
+ *shift = HR_VIDMBRCFG_P2MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P2MBR_MASK;
+ break;
+ case 3:
+ *shift = HR_VIDMBRCFG_P3MBR_SHIFT;
+ *mask = HR_VIDMBRCFG_P3MBR_MASK;
+ break;
+ default:
+ *shift = *mask = 0;
+ dev_err(hellcreek->dev, "Unknown port %d selected!\n", port);
+ }
+}
+
+static void hellcreek_apply_vlan(struct hellcreek *hellcreek, int port, u16 vid,
+ bool pvid, bool untagged)
+{
+ int shift, mask;
+ u16 val;
+
+ dev_dbg(hellcreek->dev, "Apply VLAN: port=%d vid=%u pvid=%d untagged=%d",
+ port, vid, pvid, untagged);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, port);
+ hellcreek_select_vlan(hellcreek, vid, pvid);
+
+ /* Setup port vlan membership */
+ hellcreek_select_vlan_params(hellcreek, port, &shift, &mask);
+ val = hellcreek->vidmbrcfg[vid];
+ val &= ~mask;
+ if (untagged)
+ val |= HELLCREEK_VLAN_UNTAGGED_MEMBER << shift;
+ else
+ val |= HELLCREEK_VLAN_TAGGED_MEMBER << shift;
+
+ hellcreek_write(hellcreek, val, HR_VIDMBRCFG);
+ hellcreek->vidmbrcfg[vid] = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_unapply_vlan(struct hellcreek *hellcreek, int port,
+ u16 vid)
+{
+ int shift, mask;
+ u16 val;
+
+ dev_dbg(hellcreek->dev, "Unapply VLAN: port=%d vid=%u\n", port, vid);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_vlan(hellcreek, vid, 0);
+
+ /* Setup port vlan membership */
+ hellcreek_select_vlan_params(hellcreek, port, &shift, &mask);
+ val = hellcreek->vidmbrcfg[vid];
+ val &= ~mask;
+ val |= HELLCREEK_VLAN_NO_MEMBER << shift;
+
+ hellcreek_write(hellcreek, val, HR_VIDMBRCFG);
+ hellcreek->vidmbrcfg[vid] = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct hellcreek *hellcreek = ds->priv;
+ u16 vid;
+
+ dev_dbg(hellcreek->dev, "Add VLANs (%d -- %d) on port %d, %s, %s\n",
+ vlan->vid_begin, vlan->vid_end, port,
+ untagged ? "untagged" : "tagged",
+ pvid ? "PVID" : "no PVID");
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
+ hellcreek_apply_vlan(hellcreek, port, vid, pvid, untagged);
+}
+
+static int hellcreek_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ u16 vid;
+
+ dev_dbg(hellcreek->dev, "Remove VLANs (%d -- %d) on port %d\n",
+ vlan->vid_begin, vlan->vid_end, port);
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
+ hellcreek_unapply_vlan(hellcreek, port, vid);
+
+ return 0;
+}
+
+static void hellcreek_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port *hellcreek_port;
+ const char *new_state;
+ u16 val;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_port = &hellcreek->ports[port];
+ val = hellcreek_port->ptcfg;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ new_state = "DISABLED";
+ val |= HR_PTCFG_BLOCKED;
+ val &= ~HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_BLOCKING:
+ new_state = "BLOCKING";
+ val |= HR_PTCFG_BLOCKED;
+ val &= ~HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_LISTENING:
+ new_state = "LISTENING";
+ val |= HR_PTCFG_BLOCKED;
+ val &= ~HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_LEARNING:
+ new_state = "LEARNING";
+ val |= HR_PTCFG_BLOCKED;
+ val |= HR_PTCFG_LEARNING_EN;
+ break;
+ case BR_STATE_FORWARDING:
+ new_state = "FORWARDING";
+ val &= ~HR_PTCFG_BLOCKED;
+ val |= HR_PTCFG_LEARNING_EN;
+ break;
+ default:
+ new_state = "UNKNOWN";
+ }
+
+ hellcreek_select_port(hellcreek, port);
+ hellcreek_write(hellcreek, val, HR_PTCFG);
+ hellcreek_port->ptcfg = val;
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ dev_dbg(hellcreek->dev, "Configured STP state for port %d: %s\n",
+ port, new_state);
+}
+
+static void hellcreek_setup_ingressflt(struct hellcreek *hellcreek, int port,
+ bool enable)
+{
+ struct hellcreek_port *hellcreek_port = &hellcreek->ports[port];
+ u16 ptcfg;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ ptcfg = hellcreek_port->ptcfg;
+
+ if (enable)
+ ptcfg |= HR_PTCFG_INGRESSFLT;
+ else
+ ptcfg &= ~HR_PTCFG_INGRESSFLT;
+
+ hellcreek_select_port(hellcreek, port);
+ hellcreek_write(hellcreek, ptcfg, HR_PTCFG);
+ hellcreek_port->ptcfg = ptcfg;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_setup_vlan_awareness(struct hellcreek *hellcreek,
+ bool enable)
+{
+ u16 swcfg;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ swcfg = hellcreek->swcfg;
+
+ if (enable)
+ swcfg |= HR_SWCFG_VLAN_UNAWARE;
+ else
+ swcfg &= ~HR_SWCFG_VLAN_UNAWARE;
+
+ hellcreek_write(hellcreek, swcfg, HR_SWCFG);
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+/* Default setup for DSA: VLAN <X>: CPU and Port <X> egress untagged. */
+static void hellcreek_setup_vlan_membership(struct dsa_switch *ds, int port,
+ bool enabled)
+{
+ const u16 vid = hellcreek_private_vid(port);
+ int upstream = dsa_upstream_port(ds, port);
+ struct hellcreek *hellcreek = ds->priv;
+
+ /* Apply vid to port as egress untagged and port vlan id */
+ if (enabled)
+ hellcreek_apply_vlan(hellcreek, port, vid, true, true);
+ else
+ hellcreek_unapply_vlan(hellcreek, port, vid);
+
+ /* Apply vid to cpu port as well */
+ if (enabled)
+ hellcreek_apply_vlan(hellcreek, upstream, vid, false, true);
+ else
+ hellcreek_unapply_vlan(hellcreek, upstream, vid);
+}
+
+static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ dev_dbg(hellcreek->dev, "Port %d joins a bridge\n", port);
+
+ /* When joining a vlan_filtering bridge, keep the switch VLAN aware */
+ if (!ds->vlan_filtering)
+ hellcreek_setup_vlan_awareness(hellcreek, false);
+
+ /* Drop private vlans */
+ hellcreek_setup_vlan_membership(ds, port, false);
+
+ return 0;
+}
+
+static void hellcreek_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ dev_dbg(hellcreek->dev, "Port %d leaves a bridge\n", port);
+
+ /* Enable VLAN awareness */
+ hellcreek_setup_vlan_awareness(hellcreek, true);
+
+ /* Enable private vlans */
+ hellcreek_setup_vlan_membership(ds, port, true);
+}
+
+static int __hellcreek_fdb_add(struct hellcreek *hellcreek,
+ const struct hellcreek_fdb_entry *entry)
+{
+ u16 meta = 0;
+
+ dev_dbg(hellcreek->dev, "Add static FDB entry: MAC=%pM, MASK=0x%02x, "
+ "OBT=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac, entry->portmask,
+ entry->is_obt, entry->reprio_en, entry->reprio_tc);
+
+ /* Add mac address */
+ hellcreek_write(hellcreek, entry->mac[1] | (entry->mac[0] << 8), HR_FDBWDH);
+ hellcreek_write(hellcreek, entry->mac[3] | (entry->mac[2] << 8), HR_FDBWDM);
+ hellcreek_write(hellcreek, entry->mac[5] | (entry->mac[4] << 8), HR_FDBWDL);
+
+ /* Meta data */
+ meta |= entry->portmask << HR_FDBWRM0_PORTMASK_SHIFT;
+ if (entry->is_obt)
+ meta |= HR_FDBWRM0_OBT;
+ if (entry->reprio_en) {
+ meta |= HR_FDBWRM0_REPRIO_EN;
+ meta |= entry->reprio_tc << HR_FDBWRM0_REPRIO_TC_SHIFT;
+ }
+ hellcreek_write(hellcreek, meta, HR_FDBWRM0);
+
+ /* Commit */
+ hellcreek_write(hellcreek, 0x00, HR_FDBWRCMD);
+
+ /* Wait until done */
+ return hellcreek_wait_fdb_ready(hellcreek);
+}
+
+static int __hellcreek_fdb_del(struct hellcreek *hellcreek,
+ const struct hellcreek_fdb_entry *entry)
+{
+ dev_dbg(hellcreek->dev, "Delete FDB entry: MAC=%pM!\n", entry->mac);
+
+ /* Delete by matching idx */
+ hellcreek_write(hellcreek, entry->idx | HR_FDBWRCMD_FDBDEL, HR_FDBWRCMD);
+
+ /* Wait until done */
+ return hellcreek_wait_fdb_ready(hellcreek);
+}
+
+/* Retrieve the index of a FDB entry by mac address. Currently we search through
+ * the complete table in hardware. If that's too slow, we might have to cache
+ * the complete FDB table in software.
+ */
+static int hellcreek_fdb_get(struct hellcreek *hellcreek,
+ const unsigned char *dest,
+ struct hellcreek_fdb_entry *entry)
+{
+ size_t i;
+
+ /* Set read pointer to zero: The read of HR_FDBMAX (read-only register)
+ * should reset the internal pointer. But, that doesn't work. The vendor
+ * suggested a subsequent write as workaround. Same for HR_FDBRDH below.
+ */
+ hellcreek_read(hellcreek, HR_FDBMAX);
+ hellcreek_write(hellcreek, 0x00, HR_FDBMAX);
+
+ /* We have to read the complete table, because the switch/driver might
+ * enter new entries anywhere.
+ */
+ for (i = 0; i < hellcreek->fdb_entries; ++i) {
+ unsigned char addr[ETH_ALEN];
+ u16 meta, mac;
+
+ meta = hellcreek_read(hellcreek, HR_FDBMDRD);
+ mac = hellcreek_read(hellcreek, HR_FDBRDL);
+ addr[5] = mac & 0xff;
+ addr[4] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDM);
+ addr[3] = mac & 0xff;
+ addr[2] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDH);
+ addr[1] = mac & 0xff;
+ addr[0] = (mac & 0xff00) >> 8;
+
+ /* Force next entry */
+ hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
+
+ if (memcmp(addr, dest, ETH_ALEN))
+ continue;
+
+ /* Match found */
+ entry->idx = i;
+ entry->portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >>
+ HR_FDBMDRD_PORTMASK_SHIFT;
+ entry->age = (meta & HR_FDBMDRD_AGE_MASK) >>
+ HR_FDBMDRD_AGE_SHIFT;
+ entry->is_obt = !!(meta & HR_FDBMDRD_OBT);
+ entry->pass_blocked = !!(meta & HR_FDBMDRD_PASS_BLOCKED);
+ entry->is_static = !!(meta & HR_FDBMDRD_STATIC);
+ entry->reprio_tc = (meta & HR_FDBMDRD_REPRIO_TC_MASK) >>
+ HR_FDBMDRD_REPRIO_TC_SHIFT;
+ entry->reprio_en = !!(meta & HR_FDBMDRD_REPRIO_EN);
+ memcpy(entry->mac, addr, sizeof(addr));
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int hellcreek_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct hellcreek_fdb_entry entry = { 0 };
+ struct hellcreek *hellcreek = ds->priv;
+ int ret;
+
+ dev_dbg(hellcreek->dev, "Add FDB entry for MAC=%pM\n", addr);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ ret = hellcreek_fdb_get(hellcreek, addr, &entry);
+ if (ret) {
+ /* Not found */
+ memcpy(entry.mac, addr, sizeof(entry.mac));
+ entry.portmask = BIT(port);
+
+ ret = __hellcreek_fdb_add(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to add FDB entry!\n");
+ goto out;
+ }
+ } else {
+ /* Found */
+ ret = __hellcreek_fdb_del(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to delete FDB entry!\n");
+ goto out;
+ }
+
+ entry.portmask |= BIT(port);
+
+ ret = __hellcreek_fdb_add(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to add FDB entry!\n");
+ goto out;
+ }
+ }
+
+out:
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static int hellcreek_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct hellcreek_fdb_entry entry = { 0 };
+ struct hellcreek *hellcreek = ds->priv;
+ int ret;
+
+ dev_dbg(hellcreek->dev, "Delete FDB entry for MAC=%pM\n", addr);
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ ret = hellcreek_fdb_get(hellcreek, addr, &entry);
+ if (ret) {
+ /* Not found */
+ dev_err(hellcreek->dev, "FDB entry for deletion not found!\n");
+ } else {
+ /* Found */
+ ret = __hellcreek_fdb_del(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to delete FDB entry!\n");
+ goto out;
+ }
+
+ entry.portmask &= ~BIT(port);
+
+ if (entry.portmask != 0x00) {
+ ret = __hellcreek_fdb_add(hellcreek, &entry);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to add FDB entry!\n");
+ goto out;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ u16 entries;
+ size_t i;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ /* Set read pointer to zero: The read of HR_FDBMAX (read-only register)
+ * should reset the internal pointer. But, that doesn't work. The vendor
+ * suggested a subsequent write as workaround. Same for HR_FDBRDH below.
+ */
+ entries = hellcreek_read(hellcreek, HR_FDBMAX);
+ hellcreek_write(hellcreek, 0x00, HR_FDBMAX);
+
+ dev_dbg(hellcreek->dev, "FDB dump for port %d, entries=%d!\n", port, entries);
+
+ /* Read table */
+ for (i = 0; i < hellcreek->fdb_entries; ++i) {
+ unsigned char null_addr[ETH_ALEN] = { 0 };
+ struct hellcreek_fdb_entry entry = { 0 };
+ u16 meta, mac;
+
+ meta = hellcreek_read(hellcreek, HR_FDBMDRD);
+ mac = hellcreek_read(hellcreek, HR_FDBRDL);
+ entry.mac[5] = mac & 0xff;
+ entry.mac[4] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDM);
+ entry.mac[3] = mac & 0xff;
+ entry.mac[2] = (mac & 0xff00) >> 8;
+ mac = hellcreek_read(hellcreek, HR_FDBRDH);
+ entry.mac[1] = mac & 0xff;
+ entry.mac[0] = (mac & 0xff00) >> 8;
+
+ /* Force next entry */
+ hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
+
+ /* Check valid */
+ if (!memcmp(entry.mac, null_addr, ETH_ALEN))
+ continue;
+
+ entry.portmask = (meta & HR_FDBMDRD_PORTMASK_MASK) >>
+ HR_FDBMDRD_PORTMASK_SHIFT;
+ entry.is_static = !!(meta & HR_FDBMDRD_STATIC);
+
+ /* Check port mask */
+ if (!(entry.portmask & BIT(port)))
+ continue;
+
+ cb(entry.mac, 0, entry.is_static, data);
+ }
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return 0;
+}
+
+static int hellcreek_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct switchdev_trans *trans)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ dev_dbg(hellcreek->dev, "%s VLAN filtering on port %d\n",
+ vlan_filtering ? "Enable" : "Disable", port);
+
+ /* Configure port to drop packages with not known vids */
+ hellcreek_setup_ingressflt(hellcreek, port, vlan_filtering);
+
+ /* Enable VLAN awareness on the switch. This save due to
+ * ds->vlan_filtering_is_global.
+ */
+ hellcreek_setup_vlan_awareness(hellcreek, vlan_filtering);
+
+ return 0;
+}
+
+static int hellcreek_enable_ip_core(struct hellcreek *hellcreek)
+{
+ int ret;
+ u16 val;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ val = hellcreek_read(hellcreek, HR_CTRL_C);
+ val |= HR_CTRL_C_ENABLE;
+ hellcreek_write(hellcreek, val, HR_CTRL_C);
+ ret = hellcreek_wait_until_transitioned(hellcreek);
+
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static void hellcreek_setup_cpu_and_tunnel_port(struct hellcreek *hellcreek)
+{
+ struct hellcreek_port *tunnel_port = &hellcreek->ports[TUNNEL_PORT];
+ struct hellcreek_port *cpu_port = &hellcreek->ports[CPU_PORT];
+ u16 ptcfg = 0;
+
+ ptcfg |= HR_PTCFG_LEARNING_EN | HR_PTCFG_ADMIN_EN;
+
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_port(hellcreek, CPU_PORT);
+ hellcreek_write(hellcreek, ptcfg, HR_PTCFG);
+
+ hellcreek_select_port(hellcreek, TUNNEL_PORT);
+ hellcreek_write(hellcreek, ptcfg, HR_PTCFG);
+
+ cpu_port->ptcfg = ptcfg;
+ tunnel_port->ptcfg = ptcfg;
+
+ mutex_unlock(&hellcreek->reg_lock);
+}
+
+static void hellcreek_setup_tc_identity_mapping(struct hellcreek *hellcreek)
+{
+ int i;
+
+ /* The switch has multiple egress queues per port. The queue is selected
+ * via the PCP field in the VLAN header. The switch internally deals
+ * with traffic classes instead of PCP values and this mapping is
+ * configurable.
+ *
+ * The default mapping is (PCP - TC):
+ * 7 - 7
+ * 6 - 6
+ * 5 - 5
+ * 4 - 4
+ * 3 - 3
+ * 2 - 1
+ * 1 - 0
+ * 0 - 2
+ *
+ * The default should be an identity mapping.
+ */
+
+ for (i = 0; i < 8; ++i) {
+ mutex_lock(&hellcreek->reg_lock);
+
+ hellcreek_select_prio(hellcreek, i);
+ hellcreek_write(hellcreek,
+ i << HR_PRTCCFG_PCP_TC_MAP_SHIFT,
+ HR_PRTCCFG);
+
+ mutex_unlock(&hellcreek->reg_lock);
+ }
+}
+
+static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
+{
+ static struct hellcreek_fdb_entry ptp = {
+ /* MAC: 01-1B-19-00-00-00 */
+ .mac = { 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 0,
+ .is_static = 1,
+ .reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry p2p = {
+ /* MAC: 01-80-C2-00-00-0E */
+ .mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 0,
+ .is_static = 1,
+ .reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
+ .reprio_en = 1,
+ };
+ int ret;
+
+ mutex_lock(&hellcreek->reg_lock);
+ ret = __hellcreek_fdb_add(hellcreek, &ptp);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &p2p);
+out:
+ mutex_unlock(&hellcreek->reg_lock);
+
+ return ret;
+}
+
+static int hellcreek_setup(struct dsa_switch *ds)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ u16 swcfg = 0;
+ int ret, i;
+
+ dev_dbg(hellcreek->dev, "Set up the switch\n");
+
+ /* Let's go */
+ ret = hellcreek_enable_ip_core(hellcreek);
+ if (ret) {
+ dev_err(hellcreek->dev, "Failed to enable IP core!\n");
+ return ret;
+ }
+
+ /* Enable CPU/Tunnel ports */
+ hellcreek_setup_cpu_and_tunnel_port(hellcreek);
+
+ /* Switch config: Keep defaults, enable FDB aging and learning and tag
+ * each frame from/to cpu port for DSA tagging. Also enable the length
+ * aware shaping mode. This eliminates the need for Qbv guard bands.
+ */
+ swcfg |= HR_SWCFG_FDBAGE_EN |
+ HR_SWCFG_FDBLRN_EN |
+ HR_SWCFG_ALWAYS_OBT |
+ (HR_SWCFG_LAS_ON << HR_SWCFG_LAS_MODE_SHIFT);
+ hellcreek->swcfg = swcfg;
+ hellcreek_write(hellcreek, swcfg, HR_SWCFG);
+
+ /* Initial vlan membership to reflect port separation */
+ for (i = 0; i < ds->num_ports; ++i) {
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ hellcreek_setup_vlan_membership(ds, i, true);
+ }
+
+ /* Configure PCP <-> TC mapping */
+ hellcreek_setup_tc_identity_mapping(hellcreek);
+
+ /* Allow VLAN configurations while not filtering which is the default
+ * for new DSA drivers.
+ */
+ ds->configure_vlan_while_not_filtering = true;
+
+ /* The VLAN awareness is a global switch setting. Therefore, mixed vlan
+ * filtering setups are not supported.
+ */
+ ds->vlan_filtering_is_global = true;
+
+ /* Intercept _all_ PTP multicast traffic */
+ ret = hellcreek_setup_fdb(hellcreek);
+ if (ret) {
+ dev_err(hellcreek->dev,
+ "Failed to insert static PTP FDB entries\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hellcreek_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct hellcreek *hellcreek = ds->priv;
+
+ dev_dbg(hellcreek->dev, "Phylink validate for port %d\n", port);
+
+ /* The MAC settings are a hardware configuration option and cannot be
+ * changed at run time or by strapping. Therefore the attached PHYs
+ * should be programmed to only advertise settings which are supported
+ * by the hardware.
+ */
+ if (hellcreek->pdata->is_100_mbits)
+ phylink_set(mask, 100baseT_Full);
+ else
+ phylink_set(mask, 1000baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int
+hellcreek_port_prechangeupper(struct dsa_switch *ds, int port,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ bool used = true;
+ int ret = -EBUSY;
+ u16 vid;
+ int i;
+
+ dev_dbg(hellcreek->dev, "Pre change upper for port %d\n", port);
+
+ /*
+ * Deny VLAN devices on top of lan ports with the same VLAN ids, because
+ * it breaks the port separation due to the private VLANs. Example:
+ *
+ * lan0.100 *and* lan1.100 cannot be used in parallel. However, lan0.99
+ * and lan1.100 works.
+ */
+
+ if (!is_vlan_dev(info->upper_dev))
+ return 0;
+
+ vid = vlan_dev_vlan_id(info->upper_dev);
+
+ /* For all ports, check bitmaps */
+ mutex_lock(&hellcreek->vlan_lock);
+ for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ if (port == i)
+ continue;
+
+ used = used && test_bit(vid, hellcreek->ports[i].vlan_dev_bitmap);
+ }
+
+ if (used)
+ goto out;
+
+ /* Update bitmap */
+ set_bit(vid, hellcreek->ports[port].vlan_dev_bitmap);
+
+ ret = 0;
+
+out:
+ mutex_unlock(&hellcreek->vlan_lock);
+
+ return ret;
+}
+
+static const struct dsa_switch_ops hellcreek_ds_ops = {
+ .get_ethtool_stats = hellcreek_get_ethtool_stats,
+ .get_sset_count = hellcreek_get_sset_count,
+ .get_strings = hellcreek_get_strings,
+ .get_tag_protocol = hellcreek_get_tag_protocol,
+ .get_ts_info = hellcreek_get_ts_info,
+ .phylink_validate = hellcreek_phylink_validate,
+ .port_bridge_join = hellcreek_port_bridge_join,
+ .port_bridge_leave = hellcreek_port_bridge_leave,
+ .port_disable = hellcreek_port_disable,
+ .port_enable = hellcreek_port_enable,
+ .port_fdb_add = hellcreek_fdb_add,
+ .port_fdb_del = hellcreek_fdb_del,
+ .port_fdb_dump = hellcreek_fdb_dump,
+ .port_hwtstamp_set = hellcreek_port_hwtstamp_set,
+ .port_hwtstamp_get = hellcreek_port_hwtstamp_get,
+ .port_prechangeupper = hellcreek_port_prechangeupper,
+ .port_rxtstamp = hellcreek_port_rxtstamp,
+ .port_stp_state_set = hellcreek_port_stp_state_set,
+ .port_txtstamp = hellcreek_port_txtstamp,
+ .port_vlan_add = hellcreek_vlan_add,
+ .port_vlan_del = hellcreek_vlan_del,
+ .port_vlan_filtering = hellcreek_vlan_filtering,
+ .port_vlan_prepare = hellcreek_vlan_prepare,
+ .setup = hellcreek_setup,
+};
+
+static int hellcreek_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hellcreek *hellcreek;
+ struct resource *res;
+ int ret, i;
+
+ hellcreek = devm_kzalloc(dev, sizeof(*hellcreek), GFP_KERNEL);
+ if (!hellcreek)
+ return -ENOMEM;
+
+ hellcreek->vidmbrcfg = devm_kcalloc(dev, VLAN_N_VID,
+ sizeof(*hellcreek->vidmbrcfg),
+ GFP_KERNEL);
+ if (!hellcreek->vidmbrcfg)
+ return -ENOMEM;
+
+ hellcreek->pdata = of_device_get_match_data(dev);
+
+ hellcreek->ports = devm_kcalloc(dev, hellcreek->pdata->num_ports,
+ sizeof(*hellcreek->ports),
+ GFP_KERNEL);
+ if (!hellcreek->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < hellcreek->pdata->num_ports; ++i) {
+ struct hellcreek_port *port = &hellcreek->ports[i];
+
+ port->counter_values =
+ devm_kcalloc(dev,
+ ARRAY_SIZE(hellcreek_counter),
+ sizeof(*port->counter_values),
+ GFP_KERNEL);
+ if (!port->counter_values)
+ return -ENOMEM;
+
+ port->vlan_dev_bitmap =
+ devm_kcalloc(dev,
+ BITS_TO_LONGS(VLAN_N_VID),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!port->vlan_dev_bitmap)
+ return -ENOMEM;
+
+ port->hellcreek = hellcreek;
+ port->port = i;
+ }
+
+ mutex_init(&hellcreek->reg_lock);
+ mutex_init(&hellcreek->vlan_lock);
+ mutex_init(&hellcreek->ptp_lock);
+
+ hellcreek->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsn");
+ if (!res) {
+ dev_err(dev, "No memory region provided!\n");
+ return -ENODEV;
+ }
+
+ hellcreek->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hellcreek->base)) {
+ dev_err(dev, "No memory available!\n");
+ return PTR_ERR(hellcreek->base);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ptp");
+ if (!res) {
+ dev_err(dev, "No PTP memory region provided!\n");
+ return -ENODEV;
+ }
+
+ hellcreek->ptp_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(hellcreek->ptp_base)) {
+ dev_err(dev, "No memory available!\n");
+ return PTR_ERR(hellcreek->ptp_base);
+ }
+
+ ret = hellcreek_detect(hellcreek);
+ if (ret) {
+ dev_err(dev, "No (known) chip found!\n");
+ return ret;
+ }
+
+ ret = hellcreek_wait_until_ready(hellcreek);
+ if (ret) {
+ dev_err(dev, "Switch didn't become ready!\n");
+ return ret;
+ }
+
+ hellcreek_feature_detect(hellcreek);
+
+ hellcreek->ds = devm_kzalloc(dev, sizeof(*hellcreek->ds), GFP_KERNEL);
+ if (!hellcreek->ds)
+ return -ENOMEM;
+
+ hellcreek->ds->dev = dev;
+ hellcreek->ds->priv = hellcreek;
+ hellcreek->ds->ops = &hellcreek_ds_ops;
+ hellcreek->ds->num_ports = hellcreek->pdata->num_ports;
+ hellcreek->ds->num_tx_queues = HELLCREEK_NUM_EGRESS_QUEUES;
+
+ ret = dsa_register_switch(hellcreek->ds);
+ if (ret) {
+ dev_err_probe(dev, ret, "Unable to register switch\n");
+ return ret;
+ }
+
+ ret = hellcreek_ptp_setup(hellcreek);
+ if (ret) {
+ dev_err(dev, "Failed to setup PTP!\n");
+ goto err_ptp_setup;
+ }
+
+ ret = hellcreek_hwtstamp_setup(hellcreek);
+ if (ret) {
+ dev_err(dev, "Failed to setup hardware timestamping!\n");
+ goto err_tstamp_setup;
+ }
+
+ platform_set_drvdata(pdev, hellcreek);
+
+ return 0;
+
+err_tstamp_setup:
+ hellcreek_ptp_free(hellcreek);
+err_ptp_setup:
+ dsa_unregister_switch(hellcreek->ds);
+
+ return ret;
+}
+
+static int hellcreek_remove(struct platform_device *pdev)
+{
+ struct hellcreek *hellcreek = platform_get_drvdata(pdev);
+
+ hellcreek_hwtstamp_free(hellcreek);
+ hellcreek_ptp_free(hellcreek);
+ dsa_unregister_switch(hellcreek->ds);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct hellcreek_platform_data de1soc_r1_pdata = {
+ .num_ports = 4,
+ .is_100_mbits = 1,
+ .qbv_support = 1,
+ .qbv_on_cpu_port = 1,
+ .qbu_support = 0,
+ .module_id = 0x4c30,
+};
+
+static const struct of_device_id hellcreek_of_match[] = {
+ {
+ .compatible = "hirschmann,hellcreek-de1soc-r1",
+ .data = &de1soc_r1_pdata,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, hellcreek_of_match);
+
+static struct platform_driver hellcreek_driver = {
+ .probe = hellcreek_probe,
+ .remove = hellcreek_remove,
+ .driver = {
+ .name = "hellcreek",
+ .of_match_table = hellcreek_of_match,
+ },
+};
+module_platform_driver(hellcreek_driver);
+
+MODULE_AUTHOR("Kurt Kanzenbach <kurt@linutronix.de>");
+MODULE_DESCRIPTION("Hirschmann Hellcreek driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
new file mode 100644
index 000000000000..e81781ebc31c
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Author Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#ifndef _HELLCREEK_H_
+#define _HELLCREEK_H_
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/leds.h>
+#include <linux/platform_data/hirschmann-hellcreek.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <net/dsa.h>
+
+/* Ports:
+ * - 0: CPU
+ * - 1: Tunnel
+ * - 2: TSN front port 1
+ * - 3: TSN front port 2
+ * - ...
+ */
+#define CPU_PORT 0
+#define TUNNEL_PORT 1
+
+#define HELLCREEK_VLAN_NO_MEMBER 0x0
+#define HELLCREEK_VLAN_UNTAGGED_MEMBER 0x1
+#define HELLCREEK_VLAN_TAGGED_MEMBER 0x3
+#define HELLCREEK_NUM_EGRESS_QUEUES 8
+
+/* Register definitions */
+#define HR_MODID_C (0 * 2)
+#define HR_REL_L_C (1 * 2)
+#define HR_REL_H_C (2 * 2)
+#define HR_BLD_L_C (3 * 2)
+#define HR_BLD_H_C (4 * 2)
+#define HR_CTRL_C (5 * 2)
+#define HR_CTRL_C_READY BIT(14)
+#define HR_CTRL_C_TRANSITION BIT(13)
+#define HR_CTRL_C_ENABLE BIT(0)
+
+#define HR_PSEL (0xa6 * 2)
+#define HR_PSEL_PTWSEL_SHIFT 4
+#define HR_PSEL_PTWSEL_MASK GENMASK(5, 4)
+#define HR_PSEL_PRTCWSEL_SHIFT 0
+#define HR_PSEL_PRTCWSEL_MASK GENMASK(2, 0)
+
+#define HR_PTCFG (0xa7 * 2)
+#define HR_PTCFG_MLIMIT_EN BIT(13)
+#define HR_PTCFG_UMC_FLT BIT(10)
+#define HR_PTCFG_UUC_FLT BIT(9)
+#define HR_PTCFG_UNTRUST BIT(8)
+#define HR_PTCFG_TAG_REQUIRED BIT(7)
+#define HR_PTCFG_PPRIO_SHIFT 4
+#define HR_PTCFG_PPRIO_MASK GENMASK(6, 4)
+#define HR_PTCFG_INGRESSFLT BIT(3)
+#define HR_PTCFG_BLOCKED BIT(2)
+#define HR_PTCFG_LEARNING_EN BIT(1)
+#define HR_PTCFG_ADMIN_EN BIT(0)
+
+#define HR_PRTCCFG (0xa8 * 2)
+#define HR_PRTCCFG_PCP_TC_MAP_SHIFT 0
+#define HR_PRTCCFG_PCP_TC_MAP_MASK GENMASK(2, 0)
+
+#define HR_CSEL (0x8d * 2)
+#define HR_CSEL_SHIFT 0
+#define HR_CSEL_MASK GENMASK(7, 0)
+#define HR_CRDL (0x8e * 2)
+#define HR_CRDH (0x8f * 2)
+
+#define HR_SWTRC_CFG (0x90 * 2)
+#define HR_SWTRC0 (0x91 * 2)
+#define HR_SWTRC1 (0x92 * 2)
+#define HR_PFREE (0x93 * 2)
+#define HR_MFREE (0x94 * 2)
+
+#define HR_FDBAGE (0x97 * 2)
+#define HR_FDBMAX (0x98 * 2)
+#define HR_FDBRDL (0x99 * 2)
+#define HR_FDBRDM (0x9a * 2)
+#define HR_FDBRDH (0x9b * 2)
+
+#define HR_FDBMDRD (0x9c * 2)
+#define HR_FDBMDRD_PORTMASK_SHIFT 0
+#define HR_FDBMDRD_PORTMASK_MASK GENMASK(3, 0)
+#define HR_FDBMDRD_AGE_SHIFT 4
+#define HR_FDBMDRD_AGE_MASK GENMASK(7, 4)
+#define HR_FDBMDRD_OBT BIT(8)
+#define HR_FDBMDRD_PASS_BLOCKED BIT(9)
+#define HR_FDBMDRD_STATIC BIT(11)
+#define HR_FDBMDRD_REPRIO_TC_SHIFT 12
+#define HR_FDBMDRD_REPRIO_TC_MASK GENMASK(14, 12)
+#define HR_FDBMDRD_REPRIO_EN BIT(15)
+
+#define HR_FDBWDL (0x9d * 2)
+#define HR_FDBWDM (0x9e * 2)
+#define HR_FDBWDH (0x9f * 2)
+#define HR_FDBWRM0 (0xa0 * 2)
+#define HR_FDBWRM0_PORTMASK_SHIFT 0
+#define HR_FDBWRM0_PORTMASK_MASK GENMASK(3, 0)
+#define HR_FDBWRM0_OBT BIT(8)
+#define HR_FDBWRM0_PASS_BLOCKED BIT(9)
+#define HR_FDBWRM0_REPRIO_TC_SHIFT 12
+#define HR_FDBWRM0_REPRIO_TC_MASK GENMASK(14, 12)
+#define HR_FDBWRM0_REPRIO_EN BIT(15)
+#define HR_FDBWRM1 (0xa1 * 2)
+
+#define HR_FDBWRCMD (0xa2 * 2)
+#define HR_FDBWRCMD_FDBDEL BIT(9)
+
+#define HR_SWCFG (0xa3 * 2)
+#define HR_SWCFG_GM_STATEMD BIT(15)
+#define HR_SWCFG_LAS_MODE_SHIFT 12
+#define HR_SWCFG_LAS_MODE_MASK GENMASK(13, 12)
+#define HR_SWCFG_LAS_OFF (0x00)
+#define HR_SWCFG_LAS_ON (0x01)
+#define HR_SWCFG_LAS_STATIC (0x10)
+#define HR_SWCFG_CT_EN BIT(11)
+#define HR_SWCFG_VLAN_UNAWARE BIT(10)
+#define HR_SWCFG_ALWAYS_OBT BIT(9)
+#define HR_SWCFG_FDBAGE_EN BIT(5)
+#define HR_SWCFG_FDBLRN_EN BIT(4)
+
+#define HR_SWSTAT (0xa4 * 2)
+#define HR_SWSTAT_FAIL BIT(4)
+#define HR_SWSTAT_BUSY BIT(0)
+
+#define HR_SWCMD (0xa5 * 2)
+#define HW_SWCMD_FLUSH BIT(0)
+
+#define HR_VIDCFG (0xaa * 2)
+#define HR_VIDCFG_VID_SHIFT 0
+#define HR_VIDCFG_VID_MASK GENMASK(11, 0)
+#define HR_VIDCFG_PVID BIT(12)
+
+#define HR_VIDMBRCFG (0xab * 2)
+#define HR_VIDMBRCFG_P0MBR_SHIFT 0
+#define HR_VIDMBRCFG_P0MBR_MASK GENMASK(1, 0)
+#define HR_VIDMBRCFG_P1MBR_SHIFT 2
+#define HR_VIDMBRCFG_P1MBR_MASK GENMASK(3, 2)
+#define HR_VIDMBRCFG_P2MBR_SHIFT 4
+#define HR_VIDMBRCFG_P2MBR_MASK GENMASK(5, 4)
+#define HR_VIDMBRCFG_P3MBR_SHIFT 6
+#define HR_VIDMBRCFG_P3MBR_MASK GENMASK(7, 6)
+
+#define HR_FEABITS0 (0xac * 2)
+#define HR_FEABITS0_FDBBINS_SHIFT 4
+#define HR_FEABITS0_FDBBINS_MASK GENMASK(7, 4)
+#define HR_FEABITS0_PCNT_SHIFT 8
+#define HR_FEABITS0_PCNT_MASK GENMASK(11, 8)
+#define HR_FEABITS0_MCNT_SHIFT 12
+#define HR_FEABITS0_MCNT_MASK GENMASK(15, 12)
+
+#define TR_QTRACK (0xb1 * 2)
+#define TR_TGDVER (0xb3 * 2)
+#define TR_TGDVER_REV_MIN_MASK GENMASK(7, 0)
+#define TR_TGDVER_REV_MIN_SHIFT 0
+#define TR_TGDVER_REV_MAJ_MASK GENMASK(15, 8)
+#define TR_TGDVER_REV_MAJ_SHIFT 8
+#define TR_TGDSEL (0xb4 * 2)
+#define TR_TGDSEL_TDGSEL_MASK GENMASK(1, 0)
+#define TR_TGDSEL_TDGSEL_SHIFT 0
+#define TR_TGDCTRL (0xb5 * 2)
+#define TR_TGDCTRL_GATE_EN BIT(0)
+#define TR_TGDCTRL_CYC_SNAP BIT(4)
+#define TR_TGDCTRL_SNAP_EST BIT(5)
+#define TR_TGDCTRL_ADMINGATESTATES_MASK GENMASK(15, 8)
+#define TR_TGDCTRL_ADMINGATESTATES_SHIFT 8
+#define TR_TGDSTAT0 (0xb6 * 2)
+#define TR_TGDSTAT1 (0xb7 * 2)
+#define TR_ESTWRL (0xb8 * 2)
+#define TR_ESTWRH (0xb9 * 2)
+#define TR_ESTCMD (0xba * 2)
+#define TR_ESTCMD_ESTSEC_MASK GENMASK(2, 0)
+#define TR_ESTCMD_ESTSEC_SHIFT 0
+#define TR_ESTCMD_ESTARM BIT(4)
+#define TR_ESTCMD_ESTSWCFG BIT(5)
+#define TR_EETWRL (0xbb * 2)
+#define TR_EETWRH (0xbc * 2)
+#define TR_EETCMD (0xbd * 2)
+#define TR_EETCMD_EETSEC_MASK GEMASK(2, 0)
+#define TR_EETCMD_EETSEC_SHIFT 0
+#define TR_EETCMD_EETARM BIT(4)
+#define TR_CTWRL (0xbe * 2)
+#define TR_CTWRH (0xbf * 2)
+#define TR_LCNSL (0xc1 * 2)
+#define TR_LCNSH (0xc2 * 2)
+#define TR_LCS (0xc3 * 2)
+#define TR_GCLDAT (0xc4 * 2)
+#define TR_GCLDAT_GCLWRGATES_MASK GENMASK(7, 0)
+#define TR_GCLDAT_GCLWRGATES_SHIFT 0
+#define TR_GCLDAT_GCLWRLAST BIT(8)
+#define TR_GCLDAT_GCLOVRI BIT(9)
+#define TR_GCLTIL (0xc5 * 2)
+#define TR_GCLTIH (0xc6 * 2)
+#define TR_GCLCMD (0xc7 * 2)
+#define TR_GCLCMD_GCLWRADR_MASK GENMASK(7, 0)
+#define TR_GCLCMD_GCLWRADR_SHIFT 0
+#define TR_GCLCMD_INIT_GATE_STATES_MASK GENMASK(15, 8)
+#define TR_GCLCMD_INIT_GATE_STATES_SHIFT 8
+
+struct hellcreek_counter {
+ u8 offset;
+ const char *name;
+};
+
+struct hellcreek;
+
+/* State flags for hellcreek_port_hwtstamp::state */
+enum {
+ HELLCREEK_HWTSTAMP_ENABLED,
+ HELLCREEK_HWTSTAMP_TX_IN_PROGRESS,
+};
+
+/* A structure to hold hardware timestamping information per port */
+struct hellcreek_port_hwtstamp {
+ /* Timestamping state */
+ unsigned long state;
+
+ /* Resources for receive timestamping */
+ struct sk_buff_head rx_queue; /* For synchronization messages */
+
+ /* Resources for transmit timestamping */
+ unsigned long tx_tstamp_start;
+ struct sk_buff *tx_skb;
+
+ /* Current timestamp configuration */
+ struct hwtstamp_config tstamp_config;
+};
+
+struct hellcreek_port {
+ struct hellcreek *hellcreek;
+ unsigned long *vlan_dev_bitmap;
+ int port;
+ u16 ptcfg; /* ptcfg shadow */
+ u64 *counter_values;
+
+ /* Per-port timestamping resources */
+ struct hellcreek_port_hwtstamp port_hwtstamp;
+};
+
+struct hellcreek_fdb_entry {
+ size_t idx;
+ unsigned char mac[ETH_ALEN];
+ u8 portmask;
+ u8 age;
+ u8 is_obt;
+ u8 pass_blocked;
+ u8 is_static;
+ u8 reprio_tc;
+ u8 reprio_en;
+};
+
+struct hellcreek {
+ const struct hellcreek_platform_data *pdata;
+ struct device *dev;
+ struct dsa_switch *ds;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct hellcreek_port *ports;
+ struct delayed_work overflow_work;
+ struct led_classdev led_is_gm;
+ struct led_classdev led_sync_good;
+ struct mutex reg_lock; /* Switch IP register lock */
+ struct mutex vlan_lock; /* VLAN bitmaps lock */
+ struct mutex ptp_lock; /* PTP IP register lock */
+ void __iomem *base;
+ void __iomem *ptp_base;
+ u16 swcfg; /* swcfg shadow */
+ u8 *vidmbrcfg; /* vidmbrcfg shadow */
+ u64 seconds; /* PTP seconds */
+ u64 last_ts; /* Used for overflow detection */
+ u16 status_out; /* ptp.status_out shadow */
+ size_t fdb_entries;
+};
+
+#endif /* _HELLCREEK_H_ */
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
new file mode 100644
index 000000000000..69dd9a2e8bb6
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ * Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#include <linux/ptp_classify.h>
+
+#include "hellcreek.h"
+#include "hellcreek_hwtstamp.h"
+#include "hellcreek_ptp.h"
+
+int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ struct hellcreek *hellcreek = ds->priv;
+
+ info->phc_index = hellcreek->ptp_clock ?
+ ptp_clock_index(hellcreek->ptp_clock) : -1;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ /* enabled tx timestamping */
+ info->tx_types = BIT(HWTSTAMP_TX_ON);
+
+ /* L2 & L4 PTPv2 event rx messages are timestamped */
+ info->rx_filters = BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+/* Enabling/disabling TX and RX HW timestamping for different PTP messages is
+ * not available in the switch. Thus, this function only serves as a check if
+ * the user requested what is actually available or not
+ */
+static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
+ struct hwtstamp_config *config)
+{
+ struct hellcreek_port_hwtstamp *ps =
+ &hellcreek->ports[port].port_hwtstamp;
+ bool tx_tstamp_enable = false;
+ bool rx_tstamp_enable = false;
+
+ /* Interaction with the timestamp hardware is prevented here. It is
+ * enabled when this config function ends successfully
+ */
+ clear_bit_unlock(HELLCREEK_HWTSTAMP_ENABLED, &ps->state);
+
+ /* Reserved for future extensions */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_ON:
+ tx_tstamp_enable = true;
+ break;
+
+ /* TX HW timestamping can't be disabled on the switch */
+ case HWTSTAMP_TX_OFF:
+ config->tx_type = HWTSTAMP_TX_ON;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ /* RX HW timestamping can't be disabled on the switch */
+ case HWTSTAMP_FILTER_NONE:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ rx_tstamp_enable = true;
+ break;
+
+ /* RX HW timestamping can't be enabled for all messages on the switch */
+ case HWTSTAMP_FILTER_ALL:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ if (!tx_tstamp_enable)
+ return -ERANGE;
+
+ if (!rx_tstamp_enable)
+ return -ERANGE;
+
+ /* If this point is reached, then the requested hwtstamp config is
+ * compatible with the hwtstamp offered by the switch. Therefore,
+ * enable the interaction with the HW timestamping
+ */
+ set_bit(HELLCREEK_HWTSTAMP_ENABLED, &ps->state);
+
+ return 0;
+}
+
+int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct hwtstamp_config config;
+ int err;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = hellcreek_set_hwtstamp_config(hellcreek, port, &config);
+ if (err)
+ return err;
+
+ /* Save the chosen configuration to be returned later */
+ memcpy(&ps->tstamp_config, &config, sizeof(config));
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct hwtstamp_config *config;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+ config = &ps->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/* Returns a pointer to the PTP header if the caller should time stamp, or NULL
+ * if the caller should not.
+ */
+static struct ptp_header *hellcreek_should_tstamp(struct hellcreek *hellcreek,
+ int port, struct sk_buff *skb,
+ unsigned int type)
+{
+ struct hellcreek_port_hwtstamp *ps =
+ &hellcreek->ports[port].port_hwtstamp;
+ struct ptp_header *hdr;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return NULL;
+
+ if (!test_bit(HELLCREEK_HWTSTAMP_ENABLED, &ps->state))
+ return NULL;
+
+ return hdr;
+}
+
+static u64 hellcreek_get_reserved_field(const struct ptp_header *hdr)
+{
+ return be32_to_cpu(hdr->reserved2);
+}
+
+static void hellcreek_clear_reserved_field(struct ptp_header *hdr)
+{
+ hdr->reserved2 = 0;
+}
+
+static int hellcreek_ptp_hwtstamp_available(struct hellcreek *hellcreek,
+ unsigned int ts_reg)
+{
+ u16 status;
+
+ status = hellcreek_ptp_read(hellcreek, ts_reg);
+
+ if (status & PR_TS_STATUS_TS_LOST)
+ dev_err(hellcreek->dev,
+ "Tx time stamp lost! This should never happen!\n");
+
+ /* If hwtstamp is not available, this means the previous hwtstamp was
+ * successfully read, and the one we need is not yet available
+ */
+ return (status & PR_TS_STATUS_TS_AVAIL) ? 1 : 0;
+}
+
+/* Get nanoseconds timestamp from timestamping unit */
+static u64 hellcreek_ptp_hwtstamp_read(struct hellcreek *hellcreek,
+ unsigned int ts_reg)
+{
+ u16 nsl, nsh;
+
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsh = hellcreek_ptp_read(hellcreek, ts_reg);
+ nsl = hellcreek_ptp_read(hellcreek, ts_reg);
+
+ return (u64)nsl | ((u64)nsh << 16);
+}
+
+static int hellcreek_txtstamp_work(struct hellcreek *hellcreek,
+ struct hellcreek_port_hwtstamp *ps, int port)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ unsigned int status_reg, data_reg;
+ struct sk_buff *tmp_skb;
+ int ts_status;
+ u64 ns = 0;
+
+ if (!ps->tx_skb)
+ return 0;
+
+ switch (port) {
+ case 2:
+ status_reg = PR_TS_TX_P1_STATUS_C;
+ data_reg = PR_TS_TX_P1_DATA_C;
+ break;
+ case 3:
+ status_reg = PR_TS_TX_P2_STATUS_C;
+ data_reg = PR_TS_TX_P2_DATA_C;
+ break;
+ default:
+ dev_err(hellcreek->dev, "Wrong port for timestamping!\n");
+ return 0;
+ }
+
+ ts_status = hellcreek_ptp_hwtstamp_available(hellcreek, status_reg);
+
+ /* Not available yet? */
+ if (ts_status == 0) {
+ /* Check whether the operation of reading the tx timestamp has
+ * exceeded its allowed period
+ */
+ if (time_is_before_jiffies(ps->tx_tstamp_start +
+ TX_TSTAMP_TIMEOUT)) {
+ dev_err(hellcreek->dev,
+ "Timeout while waiting for Tx timestamp!\n");
+ goto free_and_clear_skb;
+ }
+
+ /* The timestamp should be available quickly, while getting it
+ * in high priority. Restart the work
+ */
+ return 1;
+ }
+
+ mutex_lock(&hellcreek->ptp_lock);
+ ns = hellcreek_ptp_hwtstamp_read(hellcreek, data_reg);
+ ns += hellcreek_ptp_gettime_seconds(hellcreek, ns);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ /* Now we have the timestamp in nanoseconds, store it in the correct
+ * structure in order to send it to the user
+ */
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+
+ tmp_skb = ps->tx_skb;
+ ps->tx_skb = NULL;
+
+ /* skb_complete_tx_timestamp() frees up the client to make another
+ * timestampable transmit. We have to be ready for it by clearing the
+ * ps->tx_skb "flag" beforehand
+ */
+ clear_bit_unlock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
+
+ /* Deliver a clone of the original outgoing tx_skb with tx hwtstamp */
+ skb_complete_tx_timestamp(tmp_skb, &shhwtstamps);
+
+ return 0;
+
+free_and_clear_skb:
+ dev_kfree_skb_any(ps->tx_skb);
+ ps->tx_skb = NULL;
+ clear_bit_unlock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
+
+ return 0;
+}
+
+static void hellcreek_get_rxts(struct hellcreek *hellcreek,
+ struct hellcreek_port_hwtstamp *ps,
+ struct sk_buff *skb, struct sk_buff_head *rxq,
+ int port)
+{
+ struct skb_shared_hwtstamps *shwt;
+ struct sk_buff_head received;
+ unsigned long flags;
+
+ /* The latched timestamp belongs to one of the received frames. */
+ __skb_queue_head_init(&received);
+
+ /* Lock & disable interrupts */
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ /* Add the reception queue "rxq" to the "received" queue an reintialize
+ * "rxq". From now on, we deal with "received" not with "rxq"
+ */
+ skb_queue_splice_tail_init(rxq, &received);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ for (; skb; skb = __skb_dequeue(&received)) {
+ struct ptp_header *hdr;
+ unsigned int type;
+ u64 ns;
+
+ /* Get nanoseconds from ptp packet */
+ type = SKB_PTP_TYPE(skb);
+ hdr = ptp_parse_header(skb, type);
+ ns = hellcreek_get_reserved_field(hdr);
+ hellcreek_clear_reserved_field(hdr);
+
+ /* Add seconds part */
+ mutex_lock(&hellcreek->ptp_lock);
+ ns += hellcreek_ptp_gettime_seconds(hellcreek, ns);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ /* Save time stamp */
+ shwt = skb_hwtstamps(skb);
+ memset(shwt, 0, sizeof(*shwt));
+ shwt->hwtstamp = ns_to_ktime(ns);
+ netif_rx_ni(skb);
+ }
+}
+
+static void hellcreek_rxtstamp_work(struct hellcreek *hellcreek,
+ struct hellcreek_port_hwtstamp *ps,
+ int port)
+{
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&ps->rx_queue);
+ if (skb)
+ hellcreek_get_rxts(hellcreek, ps, skb, &ps->rx_queue, port);
+}
+
+long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ struct dsa_switch *ds = hellcreek->ds;
+ int i, restart = 0;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ struct hellcreek_port_hwtstamp *ps;
+
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ ps = &hellcreek->ports[i].port_hwtstamp;
+
+ if (test_bit(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS, &ps->state))
+ restart |= hellcreek_txtstamp_work(hellcreek, ps, i);
+
+ hellcreek_rxtstamp_work(hellcreek, ps, i);
+ }
+
+ return restart ? 1 : -1;
+}
+
+bool hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct ptp_header *hdr;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+
+ /* Check if the driver is expected to do HW timestamping */
+ if (!(skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ /* Make sure the message is a PTP message that needs to be timestamped
+ * and the interaction with the HW timestamping is enabled. If not, stop
+ * here
+ */
+ hdr = hellcreek_should_tstamp(hellcreek, port, clone, type);
+ if (!hdr)
+ return false;
+
+ if (test_and_set_bit_lock(HELLCREEK_HWTSTAMP_TX_IN_PROGRESS,
+ &ps->state))
+ return false;
+
+ ps->tx_skb = clone;
+
+ /* store the number of ticks occurred since system start-up till this
+ * moment
+ */
+ ps->tx_tstamp_start = jiffies;
+
+ ptp_schedule_worker(hellcreek->ptp_clock, 0);
+
+ return true;
+}
+
+bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct hellcreek *hellcreek = ds->priv;
+ struct hellcreek_port_hwtstamp *ps;
+ struct ptp_header *hdr;
+
+ ps = &hellcreek->ports[port].port_hwtstamp;
+
+ /* This check only fails if the user did not initialize hardware
+ * timestamping beforehand.
+ */
+ if (ps->tstamp_config.rx_filter != HWTSTAMP_FILTER_PTP_V2_EVENT)
+ return false;
+
+ /* Make sure the message is a PTP message that needs to be timestamped
+ * and the interaction with the HW timestamping is enabled. If not, stop
+ * here
+ */
+ hdr = hellcreek_should_tstamp(hellcreek, port, skb, type);
+ if (!hdr)
+ return false;
+
+ SKB_PTP_TYPE(skb) = type;
+
+ skb_queue_tail(&ps->rx_queue, skb);
+
+ ptp_schedule_worker(hellcreek->ptp_clock, 0);
+
+ return true;
+}
+
+static void hellcreek_hwtstamp_port_setup(struct hellcreek *hellcreek, int port)
+{
+ struct hellcreek_port_hwtstamp *ps =
+ &hellcreek->ports[port].port_hwtstamp;
+
+ skb_queue_head_init(&ps->rx_queue);
+}
+
+int hellcreek_hwtstamp_setup(struct hellcreek *hellcreek)
+{
+ struct dsa_switch *ds = hellcreek->ds;
+ int i;
+
+ /* Initialize timestamping ports. */
+ for (i = 0; i < ds->num_ports; ++i) {
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ hellcreek_hwtstamp_port_setup(hellcreek, i);
+ }
+
+ /* Select the synchronized clock as the source timekeeper for the
+ * timestamps and enable inline timestamping.
+ */
+ hellcreek_ptp_write(hellcreek, PR_SETTINGS_C_TS_SRC_TK_MASK |
+ PR_SETTINGS_C_RES3TS,
+ PR_SETTINGS_C);
+
+ return 0;
+}
+
+void hellcreek_hwtstamp_free(struct hellcreek *hellcreek)
+{
+ /* Nothing todo */
+}
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
new file mode 100644
index 000000000000..c0745ffa1ebb
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kurt Kanzenbach <kurt@linutronix.de>
+ * Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ */
+
+#ifndef _HELLCREEK_HWTSTAMP_H_
+#define _HELLCREEK_HWTSTAMP_H_
+
+#include <net/dsa.h>
+#include "hellcreek.h"
+
+/* Timestamp Register */
+#define PR_TS_RX_P1_STATUS_C (0x1d * 2)
+#define PR_TS_RX_P1_DATA_C (0x1e * 2)
+#define PR_TS_TX_P1_STATUS_C (0x1f * 2)
+#define PR_TS_TX_P1_DATA_C (0x20 * 2)
+#define PR_TS_RX_P2_STATUS_C (0x25 * 2)
+#define PR_TS_RX_P2_DATA_C (0x26 * 2)
+#define PR_TS_TX_P2_STATUS_C (0x27 * 2)
+#define PR_TS_TX_P2_DATA_C (0x28 * 2)
+
+#define PR_TS_STATUS_TS_AVAIL BIT(2)
+#define PR_TS_STATUS_TS_LOST BIT(3)
+
+#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
+
+/* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX
+ * timestamp. When working properly, hardware will produce a timestamp
+ * within 1ms. Software may enounter delays, so the timeout is set
+ * accordingly.
+ */
+#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
+
+int hellcreek_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+int hellcreek_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+
+bool hellcreek_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type);
+bool hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type);
+
+int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info);
+
+long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp);
+
+int hellcreek_hwtstamp_setup(struct hellcreek *chip);
+void hellcreek_hwtstamp_free(struct hellcreek *chip);
+
+#endif /* _HELLCREEK_HWTSTAMP_H_ */
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
new file mode 100644
index 000000000000..2572c6087bb5
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ * Kurt Kanzenbach <kurt@linutronix.de>
+ */
+
+#include <linux/ptp_clock_kernel.h>
+#include "hellcreek.h"
+#include "hellcreek_ptp.h"
+#include "hellcreek_hwtstamp.h"
+
+u16 hellcreek_ptp_read(struct hellcreek *hellcreek, unsigned int offset)
+{
+ return readw(hellcreek->ptp_base + offset);
+}
+
+void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data,
+ unsigned int offset)
+{
+ writew(data, hellcreek->ptp_base + offset);
+}
+
+/* Get nanoseconds from PTP clock */
+static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek)
+{
+ u16 nsl, nsh;
+
+ /* Take a snapshot */
+ hellcreek_ptp_write(hellcreek, PR_COMMAND_C_SS, PR_COMMAND_C);
+
+ /* The time of the day is saved as 96 bits. However, due to hardware
+ * limitations the seconds are not or only partly kept in the PTP
+ * core. Currently only three bits for the seconds are available. That's
+ * why only the nanoseconds are used and the seconds are tracked in
+ * software. Anyway due to internal locking all five registers should be
+ * read.
+ */
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ nsl = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+
+ return (u64)nsl | ((u64)nsh << 16);
+}
+
+static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek)
+{
+ u64 ns;
+
+ ns = hellcreek_ptp_clock_read(hellcreek);
+ if (ns < hellcreek->last_ts)
+ hellcreek->seconds++;
+ hellcreek->last_ts = ns;
+ ns += hellcreek->seconds * NSEC_PER_SEC;
+
+ return ns;
+}
+
+/* Retrieve the seconds parts in nanoseconds for a packet timestamped with @ns.
+ * There has to be a check whether an overflow occurred between the packet
+ * arrival and now. If so use the correct seconds (-1) for calculating the
+ * packet arrival time.
+ */
+u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns)
+{
+ u64 s;
+
+ __hellcreek_ptp_gettime(hellcreek);
+ if (hellcreek->last_ts > ns)
+ s = hellcreek->seconds * NSEC_PER_SEC;
+ else
+ s = (hellcreek->seconds - 1) * NSEC_PER_SEC;
+
+ return s;
+}
+
+static int hellcreek_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u64 ns;
+
+ mutex_lock(&hellcreek->ptp_lock);
+ ns = __hellcreek_ptp_gettime(hellcreek);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int hellcreek_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u16 secl, nsh, nsl;
+
+ secl = ts->tv_sec & 0xffff;
+ nsh = ((u32)ts->tv_nsec & 0xffff0000) >> 16;
+ nsl = ts->tv_nsec & 0xffff;
+
+ mutex_lock(&hellcreek->ptp_lock);
+
+ /* Update overflow data structure */
+ hellcreek->seconds = ts->tv_sec;
+ hellcreek->last_ts = ts->tv_nsec;
+
+ /* Set time in clock */
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, secl, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, nsh, PR_CLOCK_WRITE_C);
+ hellcreek_ptp_write(hellcreek, nsl, PR_CLOCK_WRITE_C);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ return 0;
+}
+
+static int hellcreek_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u16 negative = 0, addendh, addendl;
+ u32 addend;
+ u64 adj;
+
+ if (scaled_ppm < 0) {
+ negative = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* IP-Core adjusts the nominal frequency by adding or subtracting 1 ns
+ * from the 8 ns (period of the oscillator) every time the accumulator
+ * register overflows. The value stored in the addend register is added
+ * to the accumulator register every 8 ns.
+ *
+ * addend value = (2^30 * accumulator_overflow_rate) /
+ * oscillator_frequency
+ * where:
+ *
+ * oscillator_frequency = 125 MHz
+ * accumulator_overflow_rate = 125 MHz * scaled_ppm * 2^-16 * 10^-6 * 8
+ */
+ adj = scaled_ppm;
+ adj <<= 11;
+ addend = (u32)div_u64(adj, 15625);
+
+ addendh = (addend & 0xffff0000) >> 16;
+ addendl = addend & 0xffff;
+
+ negative = (negative << 15) & 0x8000;
+
+ mutex_lock(&hellcreek->ptp_lock);
+
+ /* Set drift register */
+ hellcreek_ptp_write(hellcreek, negative, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, 0x00, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, addendh, PR_CLOCK_DRIFT_C);
+ hellcreek_ptp_write(hellcreek, addendl, PR_CLOCK_DRIFT_C);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ return 0;
+}
+
+static int hellcreek_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
+ u16 negative = 0, counth, countl;
+ u32 count_val;
+
+ /* If the offset is larger than IP-Core slow offset resources. Don't
+ * consider slow adjustment. Rather, add the offset directly to the
+ * current time
+ */
+ if (abs(delta) > MAX_SLOW_OFFSET_ADJ) {
+ struct timespec64 now, then = ns_to_timespec64(delta);
+
+ hellcreek_ptp_gettime(ptp, &now);
+ now = timespec64_add(now, then);
+ hellcreek_ptp_settime(ptp, &now);
+
+ return 0;
+ }
+
+ if (delta < 0) {
+ negative = 1;
+ delta = -delta;
+ }
+
+ /* 'count_val' does not exceed the maximum register size (2^30) */
+ count_val = div_s64(delta, MAX_NS_PER_STEP);
+
+ counth = (count_val & 0xffff0000) >> 16;
+ countl = count_val & 0xffff;
+
+ negative = (negative << 15) & 0x8000;
+
+ mutex_lock(&hellcreek->ptp_lock);
+
+ /* Set offset write register */
+ hellcreek_ptp_write(hellcreek, negative, PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, MAX_NS_PER_STEP, PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, MIN_CLK_CYCLES_BETWEEN_STEPS,
+ PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, countl, PR_CLOCK_OFFSET_C);
+ hellcreek_ptp_write(hellcreek, counth, PR_CLOCK_OFFSET_C);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ return 0;
+}
+
+static int hellcreek_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static void hellcreek_ptp_overflow_check(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct hellcreek *hellcreek;
+
+ hellcreek = dw_overflow_to_hellcreek(dw);
+
+ mutex_lock(&hellcreek->ptp_lock);
+ __hellcreek_ptp_gettime(hellcreek);
+ mutex_unlock(&hellcreek->ptp_lock);
+
+ schedule_delayed_work(&hellcreek->overflow_work,
+ HELLCREEK_OVERFLOW_PERIOD);
+}
+
+static enum led_brightness hellcreek_get_brightness(struct hellcreek *hellcreek,
+ int led)
+{
+ return (hellcreek->status_out & led) ? 1 : 0;
+}
+
+static void hellcreek_set_brightness(struct hellcreek *hellcreek, int led,
+ enum led_brightness b)
+{
+ mutex_lock(&hellcreek->ptp_lock);
+
+ if (b)
+ hellcreek->status_out |= led;
+ else
+ hellcreek->status_out &= ~led;
+
+ hellcreek_ptp_write(hellcreek, hellcreek->status_out, STATUS_OUT);
+
+ mutex_unlock(&hellcreek->ptp_lock);
+}
+
+static void hellcreek_led_sync_good_set(struct led_classdev *ldev,
+ enum led_brightness b)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_sync_good);
+
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_SYNC_GOOD, b);
+}
+
+static enum led_brightness hellcreek_led_sync_good_get(struct led_classdev *ldev)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_sync_good);
+
+ return hellcreek_get_brightness(hellcreek, STATUS_OUT_SYNC_GOOD);
+}
+
+static void hellcreek_led_is_gm_set(struct led_classdev *ldev,
+ enum led_brightness b)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_is_gm);
+
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, b);
+}
+
+static enum led_brightness hellcreek_led_is_gm_get(struct led_classdev *ldev)
+{
+ struct hellcreek *hellcreek = led_to_hellcreek(ldev, led_is_gm);
+
+ return hellcreek_get_brightness(hellcreek, STATUS_OUT_IS_GM);
+}
+
+/* There two available LEDs internally called sync_good and is_gm. However, the
+ * user might want to use a different label and specify the default state. Take
+ * those properties from device tree.
+ */
+static int hellcreek_led_setup(struct hellcreek *hellcreek)
+{
+ struct device_node *leds, *led = NULL;
+ const char *label, *state;
+ int ret = -EINVAL;
+
+ leds = of_find_node_by_name(hellcreek->dev->of_node, "leds");
+ if (!leds) {
+ dev_err(hellcreek->dev, "No LEDs specified in device tree!\n");
+ return ret;
+ }
+
+ hellcreek->status_out = 0;
+
+ led = of_get_next_available_child(leds, led);
+ if (!led) {
+ dev_err(hellcreek->dev, "First LED not specified!\n");
+ goto out;
+ }
+
+ ret = of_property_read_string(led, "label", &label);
+ hellcreek->led_sync_good.name = ret ? "sync_good" : label;
+
+ ret = of_property_read_string(led, "default-state", &state);
+ if (!ret) {
+ if (!strcmp(state, "on"))
+ hellcreek->led_sync_good.brightness = 1;
+ else if (!strcmp(state, "off"))
+ hellcreek->led_sync_good.brightness = 0;
+ else if (!strcmp(state, "keep"))
+ hellcreek->led_sync_good.brightness =
+ hellcreek_get_brightness(hellcreek,
+ STATUS_OUT_SYNC_GOOD);
+ }
+
+ hellcreek->led_sync_good.max_brightness = 1;
+ hellcreek->led_sync_good.brightness_set = hellcreek_led_sync_good_set;
+ hellcreek->led_sync_good.brightness_get = hellcreek_led_sync_good_get;
+
+ led = of_get_next_available_child(leds, led);
+ if (!led) {
+ dev_err(hellcreek->dev, "Second LED not specified!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_property_read_string(led, "label", &label);
+ hellcreek->led_is_gm.name = ret ? "is_gm" : label;
+
+ ret = of_property_read_string(led, "default-state", &state);
+ if (!ret) {
+ if (!strcmp(state, "on"))
+ hellcreek->led_is_gm.brightness = 1;
+ else if (!strcmp(state, "off"))
+ hellcreek->led_is_gm.brightness = 0;
+ else if (!strcmp(state, "keep"))
+ hellcreek->led_is_gm.brightness =
+ hellcreek_get_brightness(hellcreek,
+ STATUS_OUT_IS_GM);
+ }
+
+ hellcreek->led_is_gm.max_brightness = 1;
+ hellcreek->led_is_gm.brightness_set = hellcreek_led_is_gm_set;
+ hellcreek->led_is_gm.brightness_get = hellcreek_led_is_gm_get;
+
+ /* Set initial state */
+ if (hellcreek->led_sync_good.brightness == 1)
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_SYNC_GOOD, 1);
+ if (hellcreek->led_is_gm.brightness == 1)
+ hellcreek_set_brightness(hellcreek, STATUS_OUT_IS_GM, 1);
+
+ /* Register both leds */
+ led_classdev_register(hellcreek->dev, &hellcreek->led_sync_good);
+ led_classdev_register(hellcreek->dev, &hellcreek->led_is_gm);
+
+ ret = 0;
+
+out:
+ of_node_put(leds);
+
+ return ret;
+}
+
+int hellcreek_ptp_setup(struct hellcreek *hellcreek)
+{
+ u16 status;
+ int ret;
+
+ /* Set up the overflow work */
+ INIT_DELAYED_WORK(&hellcreek->overflow_work,
+ hellcreek_ptp_overflow_check);
+
+ /* Setup PTP clock */
+ hellcreek->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(hellcreek->ptp_clock_info.name,
+ sizeof(hellcreek->ptp_clock_info.name),
+ dev_name(hellcreek->dev));
+
+ /* IP-Core can add up to 0.5 ns per 8 ns cycle, which means
+ * accumulator_overflow_rate shall not exceed 62.5 MHz (which adjusts
+ * the nominal frequency by 6.25%)
+ */
+ hellcreek->ptp_clock_info.max_adj = 62500000;
+ hellcreek->ptp_clock_info.n_alarm = 0;
+ hellcreek->ptp_clock_info.n_pins = 0;
+ hellcreek->ptp_clock_info.n_ext_ts = 0;
+ hellcreek->ptp_clock_info.n_per_out = 0;
+ hellcreek->ptp_clock_info.pps = 0;
+ hellcreek->ptp_clock_info.adjfine = hellcreek_ptp_adjfine;
+ hellcreek->ptp_clock_info.adjtime = hellcreek_ptp_adjtime;
+ hellcreek->ptp_clock_info.gettime64 = hellcreek_ptp_gettime;
+ hellcreek->ptp_clock_info.settime64 = hellcreek_ptp_settime;
+ hellcreek->ptp_clock_info.enable = hellcreek_ptp_enable;
+ hellcreek->ptp_clock_info.do_aux_work = hellcreek_hwtstamp_work;
+
+ hellcreek->ptp_clock = ptp_clock_register(&hellcreek->ptp_clock_info,
+ hellcreek->dev);
+ if (IS_ERR(hellcreek->ptp_clock))
+ return PTR_ERR(hellcreek->ptp_clock);
+
+ /* Enable the offset correction process, if no offset correction is
+ * already taking place
+ */
+ status = hellcreek_ptp_read(hellcreek, PR_CLOCK_STATUS_C);
+ if (!(status & PR_CLOCK_STATUS_C_OFS_ACT))
+ hellcreek_ptp_write(hellcreek,
+ status | PR_CLOCK_STATUS_C_ENA_OFS,
+ PR_CLOCK_STATUS_C);
+
+ /* Enable the drift correction process */
+ hellcreek_ptp_write(hellcreek, status | PR_CLOCK_STATUS_C_ENA_DRIFT,
+ PR_CLOCK_STATUS_C);
+
+ /* LED setup */
+ ret = hellcreek_led_setup(hellcreek);
+ if (ret) {
+ if (hellcreek->ptp_clock)
+ ptp_clock_unregister(hellcreek->ptp_clock);
+ return ret;
+ }
+
+ schedule_delayed_work(&hellcreek->overflow_work,
+ HELLCREEK_OVERFLOW_PERIOD);
+
+ return 0;
+}
+
+void hellcreek_ptp_free(struct hellcreek *hellcreek)
+{
+ led_classdev_unregister(&hellcreek->led_is_gm);
+ led_classdev_unregister(&hellcreek->led_sync_good);
+ cancel_delayed_work_sync(&hellcreek->overflow_work);
+ if (hellcreek->ptp_clock)
+ ptp_clock_unregister(hellcreek->ptp_clock);
+ hellcreek->ptp_clock = NULL;
+}
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.h b/drivers/net/dsa/hirschmann/hellcreek_ptp.h
new file mode 100644
index 000000000000..0b51392c7e56
--- /dev/null
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * DSA driver for:
+ * Hirschmann Hellcreek TSN switch.
+ *
+ * Copyright (C) 2019,2020 Hochschule Offenburg
+ * Copyright (C) 2019,2020 Linutronix GmbH
+ * Authors: Kurt Kanzenbach <kurt@linutronix.de>
+ * Kamil Alkhouri <kamil.alkhouri@hs-offenburg.de>
+ */
+
+#ifndef _HELLCREEK_PTP_H_
+#define _HELLCREEK_PTP_H_
+
+#include <linux/bitops.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "hellcreek.h"
+
+/* Every jump in time is 7 ns */
+#define MAX_NS_PER_STEP 7L
+
+/* Correct offset at every clock cycle */
+#define MIN_CLK_CYCLES_BETWEEN_STEPS 0
+
+/* Maximum available slow offset resources */
+#define MAX_SLOW_OFFSET_ADJ \
+ ((unsigned long long)((1 << 30) - 1) * MAX_NS_PER_STEP)
+
+/* four times a second overflow check */
+#define HELLCREEK_OVERFLOW_PERIOD (HZ / 4)
+
+/* PTP Register */
+#define PR_SETTINGS_C (0x09 * 2)
+#define PR_SETTINGS_C_RES3TS BIT(4)
+#define PR_SETTINGS_C_TS_SRC_TK_SHIFT 8
+#define PR_SETTINGS_C_TS_SRC_TK_MASK GENMASK(9, 8)
+#define PR_COMMAND_C (0x0a * 2)
+#define PR_COMMAND_C_SS BIT(0)
+
+#define PR_CLOCK_STATUS_C (0x0c * 2)
+#define PR_CLOCK_STATUS_C_ENA_DRIFT BIT(12)
+#define PR_CLOCK_STATUS_C_OFS_ACT BIT(13)
+#define PR_CLOCK_STATUS_C_ENA_OFS BIT(14)
+
+#define PR_CLOCK_READ_C (0x0d * 2)
+#define PR_CLOCK_WRITE_C (0x0e * 2)
+#define PR_CLOCK_OFFSET_C (0x0f * 2)
+#define PR_CLOCK_DRIFT_C (0x10 * 2)
+
+#define PR_SS_FREE_DATA_C (0x12 * 2)
+#define PR_SS_SYNT_DATA_C (0x14 * 2)
+#define PR_SS_SYNC_DATA_C (0x16 * 2)
+#define PR_SS_DRAC_DATA_C (0x18 * 2)
+
+#define STATUS_OUT (0x60 * 2)
+#define STATUS_OUT_SYNC_GOOD BIT(0)
+#define STATUS_OUT_IS_GM BIT(1)
+
+int hellcreek_ptp_setup(struct hellcreek *hellcreek);
+void hellcreek_ptp_free(struct hellcreek *hellcreek);
+u16 hellcreek_ptp_read(struct hellcreek *hellcreek, unsigned int offset);
+void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data,
+ unsigned int offset);
+u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns);
+
+#define ptp_to_hellcreek(ptp) \
+ container_of(ptp, struct hellcreek, ptp_clock_info)
+
+#define dw_overflow_to_hellcreek(dw) \
+ container_of(dw, struct hellcreek, overflow_work)
+
+#define led_to_hellcreek(ldev, led) \
+ container_of(ldev, struct hellcreek, led)
+
+#endif /* _HELLCREEK_PTP_H_ */
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 1e101ab56cea..c973db101b72 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -23,7 +23,7 @@
static const struct {
char string[ETH_GSTRING_LEN];
-} mib_names[TOTAL_SWITCH_COUNTER_NUM] = {
+} mib_names[] = {
{ "rx_hi" },
{ "rx_undersize" },
{ "rx_fragments" },
@@ -125,7 +125,7 @@ static void ksz8795_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
u8 check;
int loop;
- ctrl_addr = addr + SWITCH_COUNTER_NUM * port;
+ ctrl_addr = addr + dev->reg_mib_cnt * port;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
mutex_lock(&dev->alu_mutex);
@@ -156,7 +156,7 @@ static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u8 check;
int loop;
- addr -= SWITCH_COUNTER_NUM;
+ addr -= dev->reg_mib_cnt;
ctrl_addr = (KS_MIB_TOTAL_RX_1 - KS_MIB_TOTAL_RX_0) * port;
ctrl_addr += addr + KS_MIB_TOTAL_RX_0;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
@@ -418,8 +418,8 @@ static void ksz8795_r_vlan_entries(struct ksz_device *dev, u16 addr)
int i;
ksz8795_r_table(dev, TABLE_VLAN, addr, &data);
- addr *= 4;
- for (i = 0; i < 4; i++) {
+ addr *= dev->phy_port_cnt;
+ for (i = 0; i < dev->phy_port_cnt; i++) {
dev->vlan_cache[addr + i].table[0] = (u16)data;
data >>= VLAN_TABLE_S;
}
@@ -433,7 +433,7 @@ static void ksz8795_r_vlan_table(struct ksz_device *dev, u16 vid, u16 *vlan)
u64 buf;
data = (u16 *)&buf;
- addr = vid / 4;
+ addr = vid / dev->phy_port_cnt;
index = vid & 3;
ksz8795_r_table(dev, TABLE_VLAN, addr, &buf);
*vlan = data[index];
@@ -447,7 +447,7 @@ static void ksz8795_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
u64 buf;
data = (u16 *)&buf;
- addr = vid / 4;
+ addr = vid / dev->phy_port_cnt;
index = vid & 3;
ksz8795_r_table(dev, TABLE_VLAN, addr, &buf);
data[index] = vlan;
@@ -654,9 +654,10 @@ static enum dsa_tag_protocol ksz8795_get_tag_protocol(struct dsa_switch *ds,
static void ksz8795_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *buf)
{
+ struct ksz_device *dev = ds->priv;
int i;
- for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
+ for (i = 0; i < dev->mib_cnt; i++) {
memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string,
ETH_GSTRING_LEN);
}
@@ -691,12 +692,12 @@ static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port,
switch (state) {
case BR_STATE_DISABLED:
data |= PORT_LEARN_DISABLE;
- if (port < SWITCH_PORT_NUM)
+ if (port < dev->phy_port_cnt)
member = 0;
break;
case BR_STATE_LISTENING:
data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- if (port < SWITCH_PORT_NUM &&
+ if (port < dev->phy_port_cnt &&
p->stp_state == BR_STATE_DISABLED)
member = dev->host_mask | p->vid_member;
break;
@@ -720,7 +721,7 @@ static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port,
break;
case BR_STATE_BLOCKING:
data |= PORT_LEARN_DISABLE;
- if (port < SWITCH_PORT_NUM &&
+ if (port < dev->phy_port_cnt &&
p->stp_state == BR_STATE_DISABLED)
member = dev->host_mask | p->vid_member;
break;
@@ -750,17 +751,17 @@ static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port,
static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port)
{
- u8 learn[TOTAL_PORT_NUM];
+ u8 learn[DSA_MAX_PORTS];
int first, index, cnt;
struct ksz_port *p;
- if ((uint)port < TOTAL_PORT_NUM) {
+ if ((uint)port < dev->port_cnt) {
first = port;
cnt = port + 1;
} else {
/* Flush all ports. */
first = 0;
- cnt = dev->mib_port_cnt;
+ cnt = dev->port_cnt;
}
for (index = first; index < cnt; index++) {
p = &dev->ports[index];
@@ -992,8 +993,6 @@ static void ksz8795_config_cpu_port(struct dsa_switch *ds)
u8 remote;
int i;
- ds->num_ports = dev->port_cnt + 1;
-
/* Switch marks the maximum frame with extra byte as oversize. */
ksz_cfg(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, true);
ksz_cfg(dev, S_TAIL_TAG_CTRL, SW_TAIL_TAG_ENABLE, true);
@@ -1005,7 +1004,7 @@ static void ksz8795_config_cpu_port(struct dsa_switch *ds)
ksz8795_port_setup(dev, dev->cpu_port, true);
dev->member = dev->host_mask;
- for (i = 0; i < SWITCH_PORT_NUM; i++) {
+ for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
/* Initialize to non-zero so that ksz_cfg_port_member() will
@@ -1016,7 +1015,7 @@ static void ksz8795_config_cpu_port(struct dsa_switch *ds)
ksz8795_port_stp_state_set(ds, i, BR_STATE_DISABLED);
/* Last port may be disabled. */
- if (i == dev->port_cnt)
+ if (i == dev->phy_port_cnt)
break;
p->on = 1;
p->phy = 1;
@@ -1085,7 +1084,7 @@ static int ksz8795_setup(struct dsa_switch *ds)
(BROADCAST_STORM_VALUE *
BROADCAST_STORM_PROT_RATE) / 100);
- for (i = 0; i < VLAN_TABLE_ENTRIES; i++)
+ for (i = 0; i < (dev->num_vlans / 4); i++)
ksz8795_r_vlan_entries(dev, i);
/* Setup STP address for STP operation. */
@@ -1150,10 +1149,6 @@ static int ksz8795_switch_detect(struct ksz_device *dev)
(id2 != CHIP_ID_94 && id2 != CHIP_ID_95))
return -ENODEV;
- dev->mib_port_cnt = TOTAL_PORT_NUM;
- dev->phy_port_cnt = SWITCH_PORT_NUM;
- dev->port_cnt = SWITCH_PORT_NUM;
-
if (id2 == CHIP_ID_95) {
u8 val;
@@ -1162,17 +1157,12 @@ static int ksz8795_switch_detect(struct ksz_device *dev)
if (val & PORT_FIBER_MODE)
id2 = 0x65;
} else if (id2 == CHIP_ID_94) {
- dev->port_cnt--;
- dev->last_port = dev->port_cnt;
id2 = 0x94;
}
id16 &= ~0xff;
id16 |= id2;
dev->chip_id = id16;
- dev->cpu_port = dev->mib_port_cnt - 1;
- dev->host_mask = BIT(dev->cpu_port);
-
return 0;
}
@@ -1194,7 +1184,7 @@ static const struct ksz_chip_data ksz8795_switch_chips[] = {
.num_alus = 0,
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
- .port_cnt = 4, /* total physical port count */
+ .port_cnt = 5, /* total cpu and user ports */
},
{
.chip_id = 0x8794,
@@ -1203,7 +1193,7 @@ static const struct ksz_chip_data ksz8795_switch_chips[] = {
.num_alus = 0,
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
- .port_cnt = 3, /* total physical port count */
+ .port_cnt = 4, /* total cpu and user ports */
},
{
.chip_id = 0x8765,
@@ -1212,7 +1202,7 @@ static const struct ksz_chip_data ksz8795_switch_chips[] = {
.num_alus = 0,
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
- .port_cnt = 4, /* total physical port count */
+ .port_cnt = 5, /* total cpu and user ports */
},
};
@@ -1244,27 +1234,32 @@ static int ksz8795_switch_init(struct ksz_device *dev)
dev->port_mask = BIT(dev->port_cnt) - 1;
dev->port_mask |= dev->host_mask;
- dev->reg_mib_cnt = SWITCH_COUNTER_NUM;
- dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM;
+ dev->reg_mib_cnt = KSZ8795_COUNTER_NUM;
+ dev->mib_cnt = ARRAY_SIZE(mib_names);
+
+ dev->phy_port_cnt = dev->port_cnt - 1;
+
+ dev->cpu_port = dev->port_cnt - 1;
+ dev->host_mask = BIT(dev->cpu_port);
- i = dev->mib_port_cnt;
- dev->ports = devm_kzalloc(dev->dev, sizeof(struct ksz_port) * i,
+ dev->ports = devm_kzalloc(dev->dev,
+ dev->port_cnt * sizeof(struct ksz_port),
GFP_KERNEL);
if (!dev->ports)
return -ENOMEM;
- for (i = 0; i < dev->mib_port_cnt; i++) {
+ for (i = 0; i < dev->port_cnt; i++) {
mutex_init(&dev->ports[i].mib.cnt_mutex);
dev->ports[i].mib.counters =
devm_kzalloc(dev->dev,
sizeof(u64) *
- (TOTAL_SWITCH_COUNTER_NUM + 1),
+ (dev->mib_cnt + 1),
GFP_KERNEL);
if (!dev->ports[i].mib.counters)
return -ENOMEM;
}
/* set the real number of ports */
- dev->ds->num_ports = dev->port_cnt + 1;
+ dev->ds->num_ports = dev->port_cnt;
return 0;
}
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index 3a50462df8fa..40372047d40d 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -846,16 +846,7 @@
#define KS_PRIO_IN_REG 4
-#define TOTAL_PORT_NUM 5
-
-/* Host port can only be last of them. */
-#define SWITCH_PORT_NUM (TOTAL_PORT_NUM - 1)
-
#define KSZ8795_COUNTER_NUM 0x20
-#define TOTAL_KSZ8795_COUNTER_NUM (KSZ8795_COUNTER_NUM + 4)
-
-#define SWITCH_COUNTER_NUM KSZ8795_COUNTER_NUM
-#define TOTAL_SWITCH_COUNTER_NUM TOTAL_KSZ8795_COUNTER_NUM
/* Common names used by other drivers */
@@ -998,7 +989,6 @@
#define TAIL_TAG_OVERRIDE BIT(6)
#define TAIL_TAG_LOOKUP BIT(7)
-#define VLAN_TABLE_ENTRIES (4096 / 4)
#define FID_ENTRIES 128
#endif
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
index 8b00f8e6c02f..f98432a3e2b5 100644
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ b/drivers/net/dsa/microchip/ksz8795_spi.c
@@ -49,6 +49,12 @@ static int ksz8795_spi_probe(struct spi_device *spi)
if (spi->dev.platform_data)
dev->pdata = spi->dev.platform_data;
+ /* setup spi */
+ spi->mode = SPI_MODE_3;
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+
ret = ksz8795_switch_register(dev);
/* Main DSA driver may not be started yet. */
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index abfd3802bb51..42e647b67abd 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -478,7 +478,7 @@ static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
- if (port < dev->mib_port_cnt) {
+ if (port < dev->port_cnt) {
/* flush individual port */
ksz_pread8(dev, port, P_STP_CTRL, &data);
if (!(data & PORT_LEARN_DISABLE))
@@ -1267,8 +1267,6 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
struct ksz_port *p;
int i;
- ds->num_ports = dev->port_cnt;
-
for (i = 0; i < dev->port_cnt; i++) {
if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
phy_interface_t interface;
@@ -1319,7 +1317,7 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
dev->member = dev->host_mask;
- for (i = 0; i < dev->mib_port_cnt; i++) {
+ for (i = 0; i < dev->port_cnt; i++) {
if (i == dev->cpu_port)
continue;
p = &dev->ports[i];
@@ -1446,7 +1444,6 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
return ret;
/* Number of ports can be reduced depending on chip. */
- dev->mib_port_cnt = TOTAL_PORT_NUM;
dev->phy_port_cnt = 5;
/* Default capability is gigabit capable. */
@@ -1463,7 +1460,6 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
/* Chip does not support gigabit. */
if (data8 & SW_QW_ABLE)
dev->features &= ~GBIT_SUPPORT;
- dev->mib_port_cnt = 3;
dev->phy_port_cnt = 2;
} else {
dev_info(dev->dev, "Found KSZ9477 or compatible\n");
@@ -1566,12 +1562,12 @@ static int ksz9477_switch_init(struct ksz_device *dev)
dev->reg_mib_cnt = SWITCH_COUNTER_NUM;
dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM;
- i = dev->mib_port_cnt;
- dev->ports = devm_kzalloc(dev->dev, sizeof(struct ksz_port) * i,
+ dev->ports = devm_kzalloc(dev->dev,
+ dev->port_cnt * sizeof(struct ksz_port),
GFP_KERNEL);
if (!dev->ports)
return -ENOMEM;
- for (i = 0; i < dev->mib_port_cnt; i++) {
+ for (i = 0; i < dev->port_cnt; i++) {
mutex_init(&dev->ports[i].mib.cnt_mutex);
dev->ports[i].mib.counters =
devm_kzalloc(dev->dev,
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index 1142768969c2..15bc11b3cda4 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -48,6 +48,12 @@ static int ksz9477_spi_probe(struct spi_device *spi)
if (spi->dev.platform_data)
dev->pdata = spi->dev.platform_data;
+ /* setup spi */
+ spi->mode = SPI_MODE_3;
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+
ret = ksz9477_switch_register(dev);
/* Main DSA driver may not be started yet. */
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 0ef854911f21..cf743133b0b9 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -72,7 +72,7 @@ static void ksz_mib_read_work(struct work_struct *work)
struct ksz_port *p;
int i;
- for (i = 0; i < dev->mib_port_cnt; i++) {
+ for (i = 0; i < dev->port_cnt; i++) {
if (dsa_is_unused_port(dev->ds, i))
continue;
@@ -103,7 +103,7 @@ void ksz_init_mib_timer(struct ksz_device *dev)
INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
- for (i = 0; i < dev->mib_port_cnt; i++)
+ for (i = 0; i < dev->port_cnt; i++)
dev->dev_ops->port_init_cnt(dev, i);
}
EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
@@ -426,7 +426,9 @@ int ksz_switch_register(struct ksz_device *dev,
ret = of_get_phy_mode(dev->dev->of_node, &interface);
if (ret == 0)
dev->compat_interface = interface;
- ports = of_get_child_by_name(dev->dev->of_node, "ports");
+ ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports");
+ if (!ports)
+ ports = of_get_child_by_name(dev->dev->of_node, "ports");
if (ports)
for_each_available_child_of_node(ports, port) {
if (of_property_read_u32(port, "reg",
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index cf866e48ff66..720f22275c84 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -71,8 +71,6 @@ struct ksz_device {
int port_cnt;
int reg_mib_cnt;
int mib_cnt;
- int mib_port_cnt;
- int last_port; /* ports after that not used */
phy_interface_t compat_interface;
u32 regs_size;
bool phy_errata_9477;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index de7692b763d8..6408402a44f5 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -558,7 +558,7 @@ mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
mt7530_write(priv, MT7531_PLLGP_CR0, val);
break;
- };
+ }
/* Set feedback divide ratio update signal to high */
val = mt7530_read(priv, MT7531_PLLGP_CR0);
@@ -1021,6 +1021,53 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
mutex_unlock(&priv->reg_mutex);
}
+static int
+mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct mii_bus *bus = priv->bus;
+ int length;
+ u32 val;
+
+ /* When a new MTU is set, DSA always set the CPU port's MTU to the
+ * largest MTU of the slave ports. Because the switch only has a global
+ * RX length register, only allowing CPU port here is enough.
+ */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ val = mt7530_mii_read(priv, MT7530_GMACCR);
+ val &= ~MAX_RX_PKT_LEN_MASK;
+
+ /* RX length also includes Ethernet header, MTK tag, and FCS length */
+ length = new_mtu + ETH_HLEN + MTK_HDR_LEN + ETH_FCS_LEN;
+ if (length <= 1522) {
+ val |= MAX_RX_PKT_LEN_1522;
+ } else if (length <= 1536) {
+ val |= MAX_RX_PKT_LEN_1536;
+ } else if (length <= 1552) {
+ val |= MAX_RX_PKT_LEN_1552;
+ } else {
+ val &= ~MAX_RX_JUMBO_MASK;
+ val |= MAX_RX_JUMBO(DIV_ROUND_UP(length, 1024));
+ val |= MAX_RX_PKT_LEN_JUMBO;
+ }
+
+ mt7530_mii_write(priv, MT7530_GMACCR, val);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ return 0;
+}
+
+static int
+mt7530_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return MT7530_MAX_MTU;
+}
+
static void
mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
@@ -2519,6 +2566,8 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.get_sset_count = mt7530_get_sset_count,
.port_enable = mt7530_port_enable,
.port_disable = mt7530_port_disable,
+ .port_change_mtu = mt7530_port_change_mtu,
+ .port_max_mtu = mt7530_port_max_mtu,
.port_stp_state_set = mt7530_stp_state_set,
.port_bridge_join = mt7530_port_bridge_join,
.port_bridge_leave = mt7530_port_bridge_leave,
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 9278a8e3d04e..ee3523a7537e 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -11,6 +11,9 @@
#define MT7530_NUM_FDB_RECORDS 2048
#define MT7530_ALL_MEMBERS 0xff
+#define MTK_HDR_LEN 4
+#define MT7530_MAX_MTU (15 * 1024 - ETH_HLEN - ETH_FCS_LEN - MTK_HDR_LEN)
+
enum mt753x_id {
ID_MT7530 = 0,
ID_MT7621 = 1,
@@ -289,6 +292,15 @@ enum mt7530_vlan_port_attr {
#define MT7531_DBG_CNT(x) (0x3018 + (x) * 0x100)
#define MT7531_DIS_CLR BIT(31)
+#define MT7530_GMACCR 0x30e0
+#define MAX_RX_JUMBO(x) ((x) << 2)
+#define MAX_RX_JUMBO_MASK GENMASK(5, 2)
+#define MAX_RX_PKT_LEN_MASK GENMASK(1, 0)
+#define MAX_RX_PKT_LEN_1522 0x0
+#define MAX_RX_PKT_LEN_1536 0x1
+#define MAX_RX_PKT_LEN_1552 0x2
+#define MAX_RX_PKT_LEN_JUMBO 0x3
+
/* Register for MIB */
#define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
#define MT7530_MIB_CCR 0x4fe0
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 34cca0a4b31c..e7f68ac0c7e3 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -727,8 +727,8 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
mv88e6xxx_reg_lock(chip);
if ((!mv88e6xxx_port_ppu_updates(chip, port) ||
- mode == MLO_AN_FIXED) && ops->port_set_link)
- err = ops->port_set_link(chip, port, LINK_FORCED_DOWN);
+ mode == MLO_AN_FIXED) && ops->port_sync_link)
+ err = ops->port_sync_link(chip, port, mode, false);
mv88e6xxx_reg_unlock(chip);
if (err)
@@ -768,8 +768,8 @@ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
goto error;
}
- if (ops->port_set_link)
- err = ops->port_set_link(chip, port, LINK_FORCED_UP);
+ if (ops->port_sync_link)
+ err = ops->port_sync_link(chip, port, mode, true);
}
error:
mv88e6xxx_reg_unlock(chip);
@@ -1442,7 +1442,7 @@ static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip)
{
- if (!chip->info->max_vid)
+ if (!mv88e6xxx_max_vid(chip))
return 0;
return mv88e6xxx_g1_vtu_flush(chip);
@@ -1484,7 +1484,7 @@ int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
}
/* Set every FID bit used by the VLAN entries */
- vlan.vid = chip->info->max_vid;
+ vlan.vid = mv88e6xxx_max_vid(chip);
vlan.valid = false;
do {
@@ -1496,7 +1496,7 @@ int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
break;
set_bit(vlan.fid, fid_bitmap);
- } while (vlan.vid < chip->info->max_vid);
+ } while (vlan.vid < mv88e6xxx_max_vid(chip));
return 0;
}
@@ -1587,7 +1587,7 @@ static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
int err;
if (switchdev_trans_ph_prepare(trans))
- return chip->info->max_vid ? 0 : -EOPNOTSUPP;
+ return mv88e6xxx_max_vid(chip) ? 0 : -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
@@ -1603,7 +1603,7 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (!chip->info->max_vid)
+ if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
/* If the requested port doesn't belong to the same bridge as the VLAN
@@ -1973,7 +1973,7 @@ static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
u8 member;
u16 vid;
- if (!chip->info->max_vid)
+ if (!mv88e6xxx_max_vid(chip))
return;
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
@@ -2051,7 +2051,7 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
u16 pvid, vid;
int err = 0;
- if (!chip->info->max_vid)
+ if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
@@ -2157,7 +2157,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
return err;
/* Dump VLANs' Filtering Information Databases */
- vlan.vid = chip->info->max_vid;
+ vlan.vid = mv88e6xxx_max_vid(chip);
vlan.valid = false;
do {
@@ -2172,7 +2172,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
cb, data);
if (err)
return err;
- } while (vlan.vid < chip->info->max_vid);
+ } while (vlan.vid < mv88e6xxx_max_vid(chip));
return err;
}
@@ -2853,6 +2853,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
chip->ds = ds;
ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
+ ds->configure_vlan_while_not_filtering = true;
mv88e6xxx_reg_lock(chip);
@@ -3209,6 +3210,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -3248,6 +3250,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6185_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
.port_set_egress_floods = mv88e6185_port_set_egress_floods,
@@ -3260,6 +3263,9 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.stats_get_strings = mv88e6095_stats_get_strings,
.stats_get_stats = mv88e6095_stats_get_stats,
.mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
+ .serdes_power = mv88e6185_serdes_power,
+ .serdes_get_lane = mv88e6185_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state,
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
.reset = mv88e6185_g1_reset,
@@ -3278,6 +3284,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6185_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -3298,6 +3305,12 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .serdes_power = mv88e6185_serdes_power,
+ .serdes_get_lane = mv88e6185_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state,
+ .serdes_irq_mapping = mv88e6390_serdes_irq_mapping,
+ .serdes_irq_enable = mv88e6097_serdes_irq_enable,
+ .serdes_irq_status = mv88e6097_serdes_irq_status,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6085_g1_rmu_disable,
@@ -3316,6 +3329,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
@@ -3350,6 +3364,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -3391,6 +3406,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
@@ -3442,6 +3458,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -3483,6 +3500,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.phy_read = mv88e6165_phy_read,
.phy_write = mv88e6165_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
@@ -3517,6 +3535,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
@@ -3559,6 +3578,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
@@ -3610,6 +3630,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
@@ -3652,6 +3673,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
@@ -3705,6 +3727,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.phy_read = mv88e6185_phy_ppu_read,
.phy_write = mv88e6185_phy_ppu_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6185_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_set_frame_mode = mv88e6085_port_set_frame_mode,
.port_set_egress_floods = mv88e6185_port_set_egress_floods,
@@ -3722,6 +3745,9 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu,
+ .serdes_power = mv88e6185_serdes_power,
+ .serdes_get_lane = mv88e6185_serdes_get_lane,
+ .serdes_pcs_get_state = mv88e6185_serdes_pcs_get_state,
.set_cascade_port = mv88e6185_g1_set_cascade_port,
.ppu_enable = mv88e6185_g1_ppu_enable,
.ppu_disable = mv88e6185_g1_ppu_disable,
@@ -3742,6 +3768,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
@@ -3801,6 +3828,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6390x_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390x_port_max_speed_mode,
@@ -3860,6 +3888,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
@@ -3919,6 +3948,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
@@ -3977,6 +4007,7 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6250_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
@@ -4014,6 +4045,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
@@ -4075,6 +4107,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -4117,6 +4150,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -4157,6 +4191,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
@@ -4210,6 +4245,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
@@ -4250,6 +4286,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
@@ -4294,6 +4331,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6352_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
@@ -4354,6 +4392,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6390_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390_port_max_speed_mode,
@@ -4417,6 +4456,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
+ .port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6390x_port_set_speed_duplex,
.port_max_speed_mode = mv88e6390x_port_max_speed_mode,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 81c244fc0419..3543055bcb51 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -245,6 +245,7 @@ enum mv88e6xxx_region_id {
MV88E6XXX_REGION_GLOBAL1 = 0,
MV88E6XXX_REGION_GLOBAL2,
MV88E6XXX_REGION_ATU,
+ MV88E6XXX_REGION_VTU,
_MV88E6XXX_REGION_MAX,
};
@@ -416,6 +417,10 @@ struct mv88e6xxx_ops {
*/
int (*port_set_link)(struct mv88e6xxx_chip *chip, int port, int link);
+ /* Synchronise the port link state with that of the SERDES
+ */
+ int (*port_sync_link)(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
+
#define PAUSE_ON 1
#define PAUSE_OFF 0
@@ -672,6 +677,11 @@ static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
return chip->info->num_ports;
}
+static inline unsigned int mv88e6xxx_max_vid(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->max_vid;
+}
+
static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
{
return GENMASK((s32)mv88e6xxx_num_ports(chip) - 1, 0);
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index ade04c036fd9..21953d6d484c 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -417,6 +417,92 @@ out:
return err;
}
+/**
+ * struct mv88e6xxx_devlink_vtu_entry - Devlink VTU entry
+ * @fid: Global1/2: FID and VLAN policy.
+ * @sid: Global1/3: SID, unknown filters and learning.
+ * @op: Global1/5: FID (old chipsets).
+ * @vid: Global1/6: VID, valid, and page.
+ * @data: Global1/7-9: Membership data and priority override.
+ * @resvd: Reserved. Also happens to align the size to 16B.
+ *
+ * The VTU entry format varies between chipset generations, the
+ * descriptions above represent the superset of all possible
+ * information, not all fields are valid on all devices. Since this is
+ * a low-level debug interface, copy all data verbatim and defer
+ * parsing to the consumer.
+ */
+struct mv88e6xxx_devlink_vtu_entry {
+ u16 fid;
+ u16 sid;
+ u16 op;
+ u16 vid;
+ u16 data[3];
+ u16 resvd;
+};
+
+static int mv88e6xxx_region_vtu_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct mv88e6xxx_devlink_vtu_entry *table, *entry;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ int err;
+
+ table = kcalloc(mv88e6xxx_max_vid(chip) + 1,
+ sizeof(struct mv88e6xxx_devlink_vtu_entry),
+ GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ entry = table;
+ vlan.vid = mv88e6xxx_max_vid(chip);
+ vlan.valid = false;
+
+ mv88e6xxx_reg_lock(chip);
+
+ do {
+ err = mv88e6xxx_g1_vtu_getnext(chip, &vlan);
+ if (err)
+ break;
+
+ if (!vlan.valid)
+ break;
+
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_FID,
+ &entry->fid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID,
+ &entry->sid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_OP,
+ &entry->op);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID,
+ &entry->vid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1,
+ &entry->data[0]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA2,
+ &entry->data[1]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA3,
+ &entry->data[2]);
+ if (err)
+ break;
+
+ entry++;
+ } while (vlan.vid < mv88e6xxx_max_vid(chip));
+
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err) {
+ kfree(table);
+ return err;
+ }
+
+ *data = (u8 *)table;
+ return 0;
+}
+
static int mv88e6xxx_region_port_snapshot(struct devlink_port *devlink_port,
const struct devlink_port_region_ops *ops,
struct netlink_ext_ack *extack,
@@ -475,6 +561,12 @@ static struct devlink_region_ops mv88e6xxx_region_atu_ops = {
.destructor = kfree,
};
+static struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
+ .name = "vtu",
+ .snapshot = mv88e6xxx_region_vtu_snapshot,
+ .destructor = kfree,
+};
+
static const struct devlink_port_region_ops mv88e6xxx_region_port_ops = {
.name = "port",
.snapshot = mv88e6xxx_region_port_snapshot,
@@ -498,6 +590,10 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
.ops = &mv88e6xxx_region_atu_ops
/* calculated at runtime */
},
+ [MV88E6XXX_REGION_VTU] = {
+ .ops = &mv88e6xxx_region_vtu_ops
+ /* calculated at runtime */
+ },
};
static void
@@ -576,9 +672,16 @@ static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
ops = mv88e6xxx_regions[i].ops;
size = mv88e6xxx_regions[i].size;
- if (i == MV88E6XXX_REGION_ATU)
+ switch (i) {
+ case MV88E6XXX_REGION_ATU:
size = mv88e6xxx_num_databases(chip) *
sizeof(struct mv88e6xxx_devlink_atu_entry);
+ break;
+ case MV88E6XXX_REGION_VTU:
+ size = mv88e6xxx_max_vid(chip) *
+ sizeof(struct mv88e6xxx_devlink_vtu_entry);
+ break;
+ }
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region))
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index e05abe61fa11..80a182c5b98a 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -330,6 +330,8 @@ void mv88e6xxx_g1_atu_prob_irq_free(struct mv88e6xxx_chip *chip);
int mv88e6165_g1_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash);
int mv88e6165_g1_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash);
+int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry);
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index 1048509a849b..66ddf67b8737 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -307,8 +307,8 @@ static int mv88e6xxx_g1_vtu_stu_get(struct mv88e6xxx_chip *chip,
return 0;
}
-static int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_vtu_entry *entry)
{
int err;
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 8128dc607cf4..77a5fd1798cd 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -162,6 +162,42 @@ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
return 0;
}
+int mv88e6xxx_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup)
+{
+ const struct mv88e6xxx_ops *ops = chip->info->ops;
+ int err = 0;
+ int link;
+
+ if (isup)
+ link = LINK_FORCED_UP;
+ else
+ link = LINK_FORCED_DOWN;
+
+ if (ops->port_set_link)
+ err = ops->port_set_link(chip, port, link);
+
+ return err;
+}
+
+int mv88e6185_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup)
+{
+ const struct mv88e6xxx_ops *ops = chip->info->ops;
+ int err = 0;
+ int link;
+
+ if (mode == MLO_AN_INBAND)
+ link = LINK_UNFORCED;
+ else if (isup)
+ link = LINK_FORCED_UP;
+ else
+ link = LINK_FORCED_DOWN;
+
+ if (ops->port_set_link)
+ err = ops->port_set_link(chip, port, link);
+
+ return err;
+}
+
static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip,
int port, int speed, bool alt_bit,
bool force_bit, int duplex)
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 44d76ac973f6..500e1d4896ff 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -298,6 +298,9 @@ int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link);
+int mv88e6xxx_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
+int mv88e6185_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
+
int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex);
int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 9c07b4f3d345..3195936dc5be 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -400,14 +400,16 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
{
u16 *p = _p;
u16 reg;
+ int err;
int i;
if (!mv88e6352_port_has_serdes(chip, port))
return;
for (i = 0 ; i < 32; i++) {
- mv88e6352_serdes_read(chip, i, &reg);
- p[i] = reg;
+ err = mv88e6352_serdes_read(chip, i, &reg);
+ if (!err)
+ p[i] = reg;
}
}
@@ -428,6 +430,115 @@ u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
return lane;
}
+int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ bool up)
+{
+ /* The serdes power can't be controlled on this switch chip but we need
+ * to supply this function to avoid returning -EOPNOTSUPP in
+ * mv88e6xxx_serdes_power_up/mv88e6xxx_serdes_power_down
+ */
+ return 0;
+}
+
+u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
+{
+ /* There are no configurable serdes lanes on this switch chip but we
+ * need to return non-zero so that callers of
+ * mv88e6xxx_serdes_get_lane() know this is a serdes port.
+ */
+ switch (chip->ports[port].cmode) {
+ case MV88E6185_PORT_STS_CMODE_SERDES:
+ case MV88E6185_PORT_STS_CMODE_1000BASE_X:
+ return 0xff;
+ default:
+ return 0;
+ }
+}
+
+int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, struct phylink_link_state *state)
+{
+ int err;
+ u16 status;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status);
+ if (err)
+ return err;
+
+ state->link = !!(status & MV88E6XXX_PORT_STS_LINK);
+
+ if (state->link) {
+ state->duplex = status & MV88E6XXX_PORT_STS_DUPLEX ? DUPLEX_FULL : DUPLEX_HALF;
+
+ switch (status & MV88E6XXX_PORT_STS_SPEED_MASK) {
+ case MV88E6XXX_PORT_STS_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ case MV88E6XXX_PORT_STS_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case MV88E6XXX_PORT_STS_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ default:
+ dev_err(chip->dev, "invalid PHY speed\n");
+ return -EINVAL;
+ }
+ } else {
+ state->duplex = DUPLEX_UNKNOWN;
+ state->speed = SPEED_UNKNOWN;
+ }
+
+ return 0;
+}
+
+int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ bool enable)
+{
+ u8 cmode = chip->ports[port].cmode;
+
+ /* The serdes interrupts are enabled in the G2_INT_MASK register. We
+ * need to return 0 to avoid returning -EOPNOTSUPP in
+ * mv88e6xxx_serdes_irq_enable/mv88e6xxx_serdes_irq_disable
+ */
+ switch (cmode) {
+ case MV88E6185_PORT_STS_CMODE_SERDES:
+ case MV88E6185_PORT_STS_CMODE_1000BASE_X:
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static void mv88e6097_serdes_irq_link(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 status;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &status);
+ if (err) {
+ dev_err(chip->dev, "can't read port status: %d\n", err);
+ return;
+ }
+
+ dsa_port_phylink_mac_change(chip->ds, port, !!(status & MV88E6XXX_PORT_STS_LINK));
+}
+
+irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ u8 lane)
+{
+ u8 cmode = chip->ports[port].cmode;
+
+ switch (cmode) {
+ case MV88E6185_PORT_STS_CMODE_SERDES:
+ case MV88E6185_PORT_STS_CMODE_1000BASE_X:
+ mv88e6097_serdes_irq_link(chip, port);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
{
u8 cmode = chip->ports[port].cmode;
@@ -987,6 +1098,7 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
u16 *p = _p;
int lane;
u16 reg;
+ int err;
int i;
lane = mv88e6xxx_serdes_get_lane(chip, port);
@@ -994,8 +1106,9 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
return;
for (i = 0 ; i < ARRAY_SIZE(mv88e6390_serdes_regs); i++) {
- mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- mv88e6390_serdes_regs[i], &reg);
- p[i] = reg;
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ mv88e6390_serdes_regs[i], &reg);
+ if (!err)
+ p[i] = reg;
}
}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 14315f26228a..93822ef9bab8 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -73,6 +73,7 @@
#define MV88E6390_PG_CONTROL 0xf010
#define MV88E6390_PG_CONTROL_ENABLE_PC BIT(0)
+u8 mv88e6185_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
u8 mv88e6341_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
u8 mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
u8 mv88e6390_serdes_get_lane(struct mv88e6xxx_chip *chip, int port);
@@ -85,6 +86,8 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
u8 lane, unsigned int mode,
phy_interface_t interface,
const unsigned long *advertise);
+int mv88e6185_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
+ u8 lane, struct phylink_link_state *state);
int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
u8 lane, struct phylink_link_state *state);
int mv88e6390_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
@@ -101,14 +104,20 @@ unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
int port);
unsigned int mv88e6390_serdes_irq_mapping(struct mv88e6xxx_chip *chip,
int port);
+int mv88e6185_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ bool up);
int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
bool on);
int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, u8 lane,
bool on);
+int mv88e6097_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane,
+ bool enable);
int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane,
bool enable);
int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, u8 lane,
bool enable);
+irqreturn_t mv88e6097_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
+ u8 lane);
irqreturn_t mv88e6352_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
u8 lane);
irqreturn_t mv88e6390_serdes_irq_status(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index f791860d495f..ada75fa15861 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -112,10 +112,32 @@ static void felix_bridge_leave(struct dsa_switch *ds, int port,
ocelot_port_bridge_leave(ocelot, port, br);
}
-/* This callback needs to be present */
static int felix_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
+ struct ocelot *ocelot = ds->priv;
+ u16 vid, flags = vlan->flags;
+ int err;
+
+ /* Ocelot switches copy frames as-is to the CPU, so the flags:
+ * egress-untagged or not, pvid or not, make no difference. This
+ * behavior is already better than what DSA just tries to approximate
+ * when it installs the VLAN with the same flags on the CPU port.
+ * Just accept any configuration, and don't let ocelot deny installing
+ * multiple native VLANs on the NPI port, because the switch doesn't
+ * look at the port tag settings towards the NPI interface anyway.
+ */
+ if (port == ocelot->npi)
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ err = ocelot_vlan_prepare(ocelot, port, vid,
+ flags & BRIDGE_VLAN_INFO_PVID,
+ flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -135,9 +157,6 @@ static void felix_vlan_add(struct dsa_switch *ds, int port,
u16 vid;
int err;
- if (dsa_is_cpu_port(ds, port))
- flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
-
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
err = ocelot_vlan_add(ocelot, port, vid,
flags & BRIDGE_VLAN_INFO_PVID,
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index bab3a9bb5e6f..f82ad7419508 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -124,7 +124,7 @@ static void dummy_setup(struct net_device *dev)
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST;
- dev->features |= NETIF_F_ALL_TSO;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_ENCAP_ALL;
dev->hw_features |= dev->features;
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index d60a86aa8aa8..9aac7119d382 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -175,7 +175,6 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
default:
return MAC8390_APPLE;
}
- break;
case NUBUS_DRSW_APPLE:
switch (fres->dr_hw) {
@@ -186,11 +185,9 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
default:
return MAC8390_APPLE;
}
- break;
case NUBUS_DRSW_ASANTE:
return MAC8390_ASANTE;
- break;
case NUBUS_DRSW_TECHWORKS:
case NUBUS_DRSW_DAYNA2:
@@ -199,11 +196,9 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
return MAC8390_CABLETRON;
else
return MAC8390_APPLE;
- break;
case NUBUS_DRSW_FARALLON:
return MAC8390_FARALLON;
- break;
case NUBUS_DRSW_KINETICS:
switch (fres->dr_hw) {
@@ -212,7 +207,6 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
default:
return MAC8390_KINETICS;
}
- break;
case NUBUS_DRSW_DAYNA:
/*
@@ -224,7 +218,6 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
return MAC8390_NONE;
else
return MAC8390_DAYNA;
- break;
}
return MAC8390_NONE;
}
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 1c97e39b478e..e9756d0ea5b8 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -710,7 +710,7 @@ static void ne_block_output(struct net_device *dev, int count,
retry:
#endif
-#ifdef NE8390_RW_BUGFIX
+#ifdef NE_RW_BUGFIX
/* Handle the read-before-write bug the same way as the
Crynwr packet driver -- the NatSemi method doesn't work.
Actually this doesn't always work either, but if you have
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index bc6edb3f1af3..d6715008e04d 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -610,7 +610,7 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
/* We should already be in page 0, but to be safe... */
outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
-#ifdef NE8390_RW_BUGFIX
+#ifdef NE_RW_BUGFIX
/* Handle the read-before-write bug the same way as the
* Crynwr packet driver -- the NatSemi method doesn't work.
* Actually this doesn't always work either, but if you have
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 3b2cd28f962d..6cdd9efe8df3 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -3,6 +3,7 @@
* Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
+#include <linux/ethtool.h>
#include <linux/pci.h>
#include "ena_netdev.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index bf5e0e9bd0e2..6c049864dac0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -1474,7 +1474,7 @@ int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map)
for (i = 0; i < sizeof(cfg->prio_tc_map); i++)
cfg->prio_tc_map[i] = cfg->tcs * i / 8;
- cfg->is_qos = (tcs != 0 ? true : false);
+ cfg->is_qos = !!tcs;
cfg->is_ptp = (cfg->tcs <= AQ_HW_PTP_TC);
if (!cfg->is_ptp)
netdev_warn(self->ndev, "%s\n",
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 926cca9a0c83..1a7148041e3d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -10,6 +10,8 @@
#ifndef AQ_NIC_H
#define AQ_NIC_H
+#include <linux/ethtool.h>
+
#include "aq_common.h"
#include "aq_rss.h"
#include "aq_hw.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 47b3c3127879..950ea26ae0d2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -20,6 +20,7 @@
#define DRV_VER_MIN 10
#define DRV_VER_UPD 1
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
#include <linux/crash_dump.h>
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 184b6d0513b2..6b7b69ed62db 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -30,14 +30,12 @@ bnxt_dl_flash_update(struct devlink *dl,
return -EPERM;
}
- devlink_flash_update_begin_notify(dl);
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
- rc = bnxt_flash_package_from_file(bp->dev, params->file_name, 0);
+ rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0);
if (!rc)
devlink_flash_update_status_notify(dl, "Flashing done", NULL, 0, 0);
else
devlink_flash_update_status_notify(dl, "Flashing failed", NULL, 0, 0);
- devlink_flash_update_end_notify(dl);
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 1471c9a36238..7b444fcb6289 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2419,13 +2419,12 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
return rc;
}
-int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
- u32 install_type)
+int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
+ u32 install_type)
{
struct bnxt *bp = netdev_priv(dev);
struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_nvm_install_update_input install = {0};
- const struct firmware *fw;
u32 item_len;
int rc = 0;
u16 index;
@@ -2440,13 +2439,6 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
return rc;
}
- rc = request_firmware(&fw, filename, &dev->dev);
- if (rc != 0) {
- netdev_err(dev, "PKG error %d requesting file: %s\n",
- rc, filename);
- return rc;
- }
-
if (fw->size > item_len) {
netdev_err(dev, "PKG insufficient update area in nvram: %lu\n",
(unsigned long)fw->size);
@@ -2478,7 +2470,6 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
dma_handle);
}
}
- release_firmware(fw);
if (rc)
goto err_exit;
@@ -2517,6 +2508,26 @@ err_exit:
return rc;
}
+static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
+ u32 install_type)
+{
+ const struct firmware *fw;
+ int rc;
+
+ rc = request_firmware(&fw, filename, &dev->dev);
+ if (rc != 0) {
+ netdev_err(dev, "PKG error %d requesting file: %s\n",
+ rc, filename);
+ return rc;
+ }
+
+ rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type);
+
+ release_firmware(fw);
+
+ return rc;
+}
+
static int bnxt_flash_device(struct net_device *dev,
struct ethtool_flash *flash)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index fa6fbde52bea..0a57cb6a4a4b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -94,8 +94,8 @@ u32 bnxt_fw_to_ethtool_speed(u16);
u16 bnxt_get_fw_auto_link_speeds(u32);
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
-int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
- u32 install_type);
+int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
+ u32 install_type);
void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 23b80aa171dd..a217316228f4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -8,6 +8,7 @@
* the Free Software Foundation.
*/
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 5de47f6fde5a..1f5da4e4f4b2 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -77,10 +77,12 @@
#define MACB_RBQPH 0x04D4
/* GEM register offsets. */
+#define GEM_NCR 0x0000 /* Network Control */
#define GEM_NCFGR 0x0004 /* Network Config */
#define GEM_USRIO 0x000c /* User IO */
#define GEM_DMACFG 0x0010 /* DMA Configuration */
#define GEM_JML 0x0048 /* Jumbo Max Length */
+#define GEM_HS_MAC_CONFIG 0x0050 /* GEM high speed config */
#define GEM_HRB 0x0080 /* Hash Bottom */
#define GEM_HRT 0x0084 /* Hash Top */
#define GEM_SA1B 0x0088 /* Specific1 Bottom */
@@ -166,6 +168,9 @@
#define GEM_DCFG7 0x0298 /* Design Config 7 */
#define GEM_DCFG8 0x029C /* Design Config 8 */
#define GEM_DCFG10 0x02A4 /* Design Config 10 */
+#define GEM_DCFG12 0x02AC /* Design Config 12 */
+#define GEM_USX_CONTROL 0x0A80 /* High speed PCS control register */
+#define GEM_USX_STATUS 0x0A88 /* High speed PCS status register */
#define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */
#define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */
@@ -272,11 +277,19 @@
#define MACB_IRXFCS_OFFSET 19
#define MACB_IRXFCS_SIZE 1
+/* GEM specific NCR bitfields. */
+#define GEM_ENABLE_HS_MAC_OFFSET 31
+#define GEM_ENABLE_HS_MAC_SIZE 1
+
/* GEM specific NCFGR bitfields. */
+#define GEM_FD_OFFSET 1 /* Full duplex */
+#define GEM_FD_SIZE 1
#define GEM_GBE_OFFSET 10 /* Gigabit mode enable */
#define GEM_GBE_SIZE 1
#define GEM_PCSSEL_OFFSET 11
#define GEM_PCSSEL_SIZE 1
+#define GEM_PAE_OFFSET 13 /* Pause enable */
+#define GEM_PAE_SIZE 1
#define GEM_CLK_OFFSET 18 /* MDC clock division */
#define GEM_CLK_SIZE 3
#define GEM_DBW_OFFSET 21 /* Data bus width */
@@ -461,11 +474,17 @@
#define MACB_REV_OFFSET 0
#define MACB_REV_SIZE 16
+/* Bitfield in HS_MAC_CONFIG */
+#define GEM_HS_MAC_SPEED_OFFSET 0
+#define GEM_HS_MAC_SPEED_SIZE 3
+
/* Bitfields in DCFG1. */
#define GEM_IRQCOR_OFFSET 23
#define GEM_IRQCOR_SIZE 1
#define GEM_DBWDEF_OFFSET 25
#define GEM_DBWDEF_SIZE 3
+#define GEM_NO_PCS_OFFSET 0
+#define GEM_NO_PCS_SIZE 1
/* Bitfields in DCFG2. */
#define GEM_RX_PKT_BUFF_OFFSET 20
@@ -500,6 +519,28 @@
#define GEM_RXBD_RDBUFF_OFFSET 8
#define GEM_RXBD_RDBUFF_SIZE 4
+/* Bitfields in DCFG12. */
+#define GEM_HIGH_SPEED_OFFSET 26
+#define GEM_HIGH_SPEED_SIZE 1
+
+/* Bitfields in USX_CONTROL. */
+#define GEM_USX_CTRL_SPEED_OFFSET 14
+#define GEM_USX_CTRL_SPEED_SIZE 3
+#define GEM_SERDES_RATE_OFFSET 12
+#define GEM_SERDES_RATE_SIZE 2
+#define GEM_RX_SCR_BYPASS_OFFSET 9
+#define GEM_RX_SCR_BYPASS_SIZE 1
+#define GEM_TX_SCR_BYPASS_OFFSET 8
+#define GEM_TX_SCR_BYPASS_SIZE 1
+#define GEM_TX_EN_OFFSET 1
+#define GEM_TX_EN_SIZE 1
+#define GEM_SIGNAL_OK_OFFSET 0
+#define GEM_SIGNAL_OK_SIZE 1
+
+/* Bitfields in USX_STATUS. */
+#define GEM_USX_BLOCK_LOCK_OFFSET 0
+#define GEM_USX_BLOCK_LOCK_SIZE 1
+
/* Bitfields in TISUBN */
#define GEM_SUBNSINCR_OFFSET 0
#define GEM_SUBNSINCRL_OFFSET 24
@@ -663,6 +704,8 @@
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
+#define MACB_CAPS_PCS 0x01000000
+#define MACB_CAPS_HIGH_SPEED 0x02000000
/* LSO settings */
#define MACB_LSO_UFO_ENABLE 0x01
@@ -1201,6 +1244,7 @@ struct macb {
struct mii_bus *mii_bus;
struct phylink *phylink;
struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
u32 caps;
unsigned int dma_burst_length;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 286f0341bdf8..7b1d195787dc 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -84,6 +84,9 @@ struct sifive_fu540_macb_mgmt {
#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
#define MACB_WOL_ENABLED (0x1 << 1)
+#define HS_SPEED_10000M 4
+#define MACB_SERDES_RATE_10G 1
+
/* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/
@@ -513,6 +516,7 @@ static void macb_validate(struct phylink_config *config,
state->interface != PHY_INTERFACE_MODE_RMII &&
state->interface != PHY_INTERFACE_MODE_GMII &&
state->interface != PHY_INTERFACE_MODE_SGMII &&
+ state->interface != PHY_INTERFACE_MODE_10GBASER &&
!phy_interface_mode_is_rgmii(state->interface)) {
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
return;
@@ -525,10 +529,31 @@ static void macb_validate(struct phylink_config *config,
return;
}
+ if (state->interface == PHY_INTERFACE_MODE_10GBASER &&
+ !(bp->caps & MACB_CAPS_HIGH_SPEED &&
+ bp->caps & MACB_CAPS_PCS)) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
phylink_set_port_modes(mask);
phylink_set(mask, Autoneg);
phylink_set(mask, Asym_Pause);
+ if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
+ (state->interface == PHY_INTERFACE_MODE_NA ||
+ state->interface == PHY_INTERFACE_MODE_10GBASER)) {
+ phylink_set(mask, 10000baseCR_Full);
+ phylink_set(mask, 10000baseER_Full);
+ phylink_set(mask, 10000baseKR_Full);
+ phylink_set(mask, 10000baseLR_Full);
+ phylink_set(mask, 10000baseLRM_Full);
+ phylink_set(mask, 10000baseSR_Full);
+ phylink_set(mask, 10000baseT_Full);
+ if (state->interface != PHY_INTERFACE_MODE_NA)
+ goto out;
+ }
+
phylink_set(mask, 10baseT_Half);
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
@@ -545,23 +570,90 @@ static void macb_validate(struct phylink_config *config,
if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
phylink_set(mask, 1000baseT_Half);
}
-
+out:
bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void macb_mac_pcs_get_state(struct phylink_config *config,
+static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex)
+{
+ struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ u32 config;
+
+ config = gem_readl(bp, USX_CONTROL);
+ config = GEM_BFINS(SERDES_RATE, MACB_SERDES_RATE_10G, config);
+ config = GEM_BFINS(USX_CTRL_SPEED, HS_SPEED_10000M, config);
+ config &= ~(GEM_BIT(TX_SCR_BYPASS) | GEM_BIT(RX_SCR_BYPASS));
+ config |= GEM_BIT(TX_EN);
+ gem_writel(bp, USX_CONTROL, config);
+}
+
+static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
+ struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ u32 val;
+
+ state->speed = SPEED_10000;
+ state->duplex = 1;
+ state->an_complete = 1;
+
+ val = gem_readl(bp, USX_STATUS);
+ state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK));
+ val = gem_readl(bp, NCFGR);
+ if (val & GEM_BIT(PAE))
+ state->pause = MLO_PAUSE_RX;
+}
+
+static int macb_usx_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+
+ gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) |
+ GEM_BIT(SIGNAL_OK));
+
+ return 0;
+}
+
+static void macb_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
state->link = 0;
}
-static void macb_mac_an_restart(struct phylink_config *config)
+static void macb_pcs_an_restart(struct phylink_pcs *pcs)
{
/* Not supported */
}
+static int macb_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static const struct phylink_pcs_ops macb_phylink_usx_pcs_ops = {
+ .pcs_get_state = macb_usx_pcs_get_state,
+ .pcs_config = macb_usx_pcs_config,
+ .pcs_link_up = macb_usx_pcs_link_up,
+};
+
+static const struct phylink_pcs_ops macb_phylink_pcs_ops = {
+ .pcs_get_state = macb_pcs_get_state,
+ .pcs_an_restart = macb_pcs_an_restart,
+ .pcs_config = macb_pcs_config,
+};
+
static void macb_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -569,25 +661,35 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode,
struct macb *bp = netdev_priv(ndev);
unsigned long flags;
u32 old_ctrl, ctrl;
+ u32 old_ncr, ncr;
spin_lock_irqsave(&bp->lock, flags);
old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
+ old_ncr = ncr = macb_or_gem_readl(bp, NCR);
if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
if (state->interface == PHY_INTERFACE_MODE_RMII)
ctrl |= MACB_BIT(RM9200_RMII);
} else if (macb_is_gem(bp)) {
ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
+ ncr &= ~GEM_BIT(ENABLE_HS_MAC);
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ if (state->interface == PHY_INTERFACE_MODE_SGMII) {
ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
+ } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) {
+ ctrl |= GEM_BIT(PCSSEL);
+ ncr |= GEM_BIT(ENABLE_HS_MAC);
+ }
}
/* Apply the new configuration, if any */
if (old_ctrl ^ ctrl)
macb_or_gem_writel(bp, NCFGR, ctrl);
+ if (old_ncr ^ ncr)
+ macb_or_gem_writel(bp, NCR, ncr);
+
spin_unlock_irqrestore(&bp->lock, flags);
}
@@ -664,6 +766,10 @@ static void macb_mac_link_up(struct phylink_config *config,
macb_or_gem_writel(bp, NCFGR, ctrl);
+ if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER)
+ gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M,
+ gem_readl(bp, HS_MAC_CONFIG)));
+
spin_unlock_irqrestore(&bp->lock, flags);
/* Enable Rx and Tx */
@@ -672,10 +778,28 @@ static void macb_mac_link_up(struct phylink_config *config,
netif_tx_wake_all_queues(ndev);
}
+static int macb_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
+
+ if (interface == PHY_INTERFACE_MODE_10GBASER)
+ bp->phylink_pcs.ops = &macb_phylink_usx_pcs_ops;
+ else if (interface == PHY_INTERFACE_MODE_SGMII)
+ bp->phylink_pcs.ops = &macb_phylink_pcs_ops;
+ else
+ bp->phylink_pcs.ops = NULL;
+
+ if (bp->phylink_pcs.ops)
+ phylink_set_pcs(bp->phylink, &bp->phylink_pcs);
+
+ return 0;
+}
+
static const struct phylink_mac_ops macb_phylink_ops = {
.validate = macb_validate,
- .mac_pcs_get_state = macb_mac_pcs_get_state,
- .mac_an_restart = macb_mac_an_restart,
+ .mac_prepare = macb_mac_prepare,
.mac_config = macb_mac_config,
.mac_link_down = macb_mac_link_down,
.mac_link_up = macb_mac_link_up,
@@ -3524,6 +3648,11 @@ static void macb_configure_caps(struct macb *bp,
dcfg = gem_readl(bp, DCFG1);
if (GEM_BFEXT(IRQCOR, dcfg) == 0)
bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
+ if (GEM_BFEXT(NO_PCS, dcfg) == 0)
+ bp->caps |= MACB_CAPS_PCS;
+ dcfg = gem_readl(bp, DCFG12);
+ if (GEM_BFEXT(HIGH_SPEED, dcfg) == 1)
+ bp->caps |= MACB_CAPS_HIGH_SPEED;
dcfg = gem_readl(bp, DCFG2);
if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
bp->caps |= MACB_CAPS_FIFO_MODE;
@@ -3582,19 +3711,13 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
}
if (IS_ERR_OR_NULL(*pclk)) {
- err = PTR_ERR(*pclk);
- if (!err)
- err = -ENODEV;
-
+ err = IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV;
dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err);
return err;
}
if (IS_ERR_OR_NULL(*hclk)) {
- err = PTR_ERR(*hclk);
- if (!err)
- err = -ENODEV;
-
+ err = IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV;
dev_err(&pdev->dev, "failed to get hclk (%d)\n", err);
return err;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
index 2a6d1cadac9e..30254e4cf70f 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -27,7 +27,6 @@
#include "cn66xx_device.h"
#include "cn68xx_device.h"
#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
{
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 16eebfc52109..66f2c553370c 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -15,6 +15,7 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more details.
***********************************************************************/
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/net_tstamp.h>
#include <linux/pci.h>
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index c7bdac79299a..2f218fbfed06 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -5,6 +5,7 @@
/* ETHTOOL Support for VNIC_VF Device*/
+#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/net_tstamp.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 27308600da15..8e681ce72d62 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -39,6 +39,7 @@
#include <linux/bitops.h>
#include <linux/cache.h>
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 17410fe86626..7d49fd4edc9e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2671,7 +2671,7 @@ do { \
seq_printf(seq, "%-12s", s); \
for (i = 0; i < n; ++i) \
seq_printf(seq, " %16" fmt_spec, v); \
- seq_putc(seq, '\n'); \
+ seq_putc(seq, '\n'); \
} while (0)
#define S(s, v) S3("s", s, v)
#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index cd8f9a481d73..d546993bda09 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -33,6 +33,7 @@
* SOFTWARE.
*/
+#include <linux/ethtool.h>
#include <linux/pci.h>
#include "t4vf_common.h"
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 2d3dfdd2a716..e7b78b68eaac 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -235,6 +235,7 @@ struct chtls_dev {
struct list_head na_node;
unsigned int send_page_order;
int max_host_sndbuf;
+ u32 round_robin_cnt;
struct key_map kmap;
unsigned int cdev_state;
};
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 50e3a70e5a29..95ab871d8d59 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -1218,8 +1218,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->sndbuf = csk->snd_win;
csk->ulp_mode = ULP_MODE_TLS;
step = cdev->lldi->nrxq / cdev->lldi->nchan;
- csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
rxq_idx = port_id * step;
+ rxq_idx += cdev->round_robin_cnt++ % step;
+ csk->rss_qid = cdev->lldi->rxq_ids[rxq_idx];
csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx :
port_id * step;
csk->sndbuf = newsk->sk_sndbuf;
diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig
index 76247103e566..7af86b6d4150 100644
--- a/drivers/net/ethernet/davicom/Kconfig
+++ b/drivers/net/ethernet/davicom/Kconfig
@@ -5,7 +5,7 @@
config DM9000
tristate "DM9000 support"
- depends on ARM || MIPS || COLDFIRE || NIOS2
+ depends on ARM || MIPS || COLDFIRE || NIOS2 || COMPILE_TEST
select CRC32
select MII
help
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 5c6c8c5ec747..3fdc70dab5c1 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -232,32 +232,29 @@ static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
static void dm9000_dumpblk_8bit(void __iomem *reg, int count)
{
int i;
- int tmp;
for (i = 0; i < count; i++)
- tmp = readb(reg);
+ readb(reg);
}
static void dm9000_dumpblk_16bit(void __iomem *reg, int count)
{
int i;
- int tmp;
count = (count + 1) >> 1;
for (i = 0; i < count; i++)
- tmp = readw(reg);
+ readw(reg);
}
static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
{
int i;
- int tmp;
count = (count + 3) >> 2;
for (i = 0; i < count; i++)
- tmp = readl(reg);
+ readl(reg);
}
/*
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index d9f6c19940ef..c3cbe55205a7 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -2175,11 +2175,21 @@ out:
static SIMPLE_DEV_PM_OPS(de_pm_ops, de_suspend, de_resume);
+static void de_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ rtnl_lock();
+ dev_close(dev);
+ rtnl_unlock();
+}
+
static struct pci_driver de_driver = {
.name = DRV_NAME,
.id_table = de_pci_tbl,
.probe = de_init_one,
.remove = de_remove_one,
+ .shutdown = de_shutdown,
.driver.pm = &de_pm_ops,
};
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index e7b0d7de40fd..c1dcd6ca1457 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1293,7 +1293,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static unsigned char last_phys_addr[ETH_ALEN] = {
0x00, 'L', 'i', 'n', 'u', 'x'
};
+#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
static int last_irq;
+#endif
int i, irq;
unsigned short sum;
unsigned char *ee_data;
@@ -1617,7 +1619,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < 6; i++)
last_phys_addr[i] = dev->dev_addr[i];
+#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
last_irq = irq;
+#endif
/* The lower four bits are the media type. */
if (board_idx >= 0 && board_idx < MAX_UNITS) {
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 80fb1f537bb3..88bfe2107938 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1044,10 +1044,39 @@ static void ftgmac100_adjust_link(struct net_device *netdev)
schedule_work(&priv->reset_task);
}
-static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf)
+static int ftgmac100_mii_probe(struct net_device *netdev)
{
- struct net_device *netdev = priv->netdev;
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct platform_device *pdev = to_platform_device(priv->dev);
+ struct device_node *np = pdev->dev.of_node;
struct phy_device *phydev;
+ phy_interface_t phy_intf;
+ int err;
+
+ /* Default to RGMII. It's a gigabit part after all */
+ err = of_get_phy_mode(np, &phy_intf);
+ if (err)
+ phy_intf = PHY_INTERFACE_MODE_RGMII;
+
+ /* Aspeed only supports these. I don't know about other IP
+ * block vendors so I'm going to just let them through for
+ * now. Note that this is only a warning if for some obscure
+ * reason the DT really means to lie about it or it's a newer
+ * part we don't know about.
+ *
+ * On the Aspeed SoC there are additionally straps and SCU
+ * control bits that could tell us what the interface is
+ * (or allow us to configure it while the IP block is held
+ * in reset). For now I chose to keep this driver away from
+ * those SoC specific bits and assume the device-tree is
+ * right and the SCU has been configured properly by pinmux
+ * or the firmware.
+ */
+ if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
+ netdev_warn(netdev,
+ "Unsupported PHY mode %s !\n",
+ phy_modes(phy_intf));
+ }
phydev = phy_find_first(priv->mii_bus);
if (!phydev) {
@@ -1056,7 +1085,7 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf)
}
phydev = phy_connect(netdev, phydev_name(phydev),
- &ftgmac100_adjust_link, intf);
+ &ftgmac100_adjust_link, phy_intf);
if (IS_ERR(phydev)) {
netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
@@ -1601,8 +1630,8 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
struct platform_device *pdev = to_platform_device(priv->dev);
- phy_interface_t phy_intf = PHY_INTERFACE_MODE_RGMII;
struct device_node *np = pdev->dev.of_node;
+ struct device_node *mdio_np;
int i, err = 0;
u32 reg;
@@ -1623,39 +1652,6 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
}
- /* Get PHY mode from device-tree */
- if (np) {
- /* Default to RGMII. It's a gigabit part after all */
- err = of_get_phy_mode(np, &phy_intf);
- if (err)
- phy_intf = PHY_INTERFACE_MODE_RGMII;
-
- /* Aspeed only supports these. I don't know about other IP
- * block vendors so I'm going to just let them through for
- * now. Note that this is only a warning if for some obscure
- * reason the DT really means to lie about it or it's a newer
- * part we don't know about.
- *
- * On the Aspeed SoC there are additionally straps and SCU
- * control bits that could tell us what the interface is
- * (or allow us to configure it while the IP block is held
- * in reset). For now I chose to keep this driver away from
- * those SoC specific bits and assume the device-tree is
- * right and the SCU has been configured properly by pinmux
- * or the firmware.
- */
- if (priv->is_aspeed &&
- phy_intf != PHY_INTERFACE_MODE_RMII &&
- phy_intf != PHY_INTERFACE_MODE_RGMII &&
- phy_intf != PHY_INTERFACE_MODE_RGMII_ID &&
- phy_intf != PHY_INTERFACE_MODE_RGMII_RXID &&
- phy_intf != PHY_INTERFACE_MODE_RGMII_TXID) {
- netdev_warn(netdev,
- "Unsupported PHY mode %s !\n",
- phy_modes(phy_intf));
- }
- }
-
priv->mii_bus->name = "ftgmac100_mdio";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
pdev->name, pdev->id);
@@ -1667,35 +1663,38 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
for (i = 0; i < PHY_MAX_ADDR; i++)
priv->mii_bus->irq[i] = PHY_POLL;
- err = mdiobus_register(priv->mii_bus);
+ mdio_np = of_get_child_by_name(np, "mdio");
+
+ err = of_mdiobus_register(priv->mii_bus, mdio_np);
if (err) {
dev_err(priv->dev, "Cannot register MDIO bus!\n");
goto err_register_mdiobus;
}
- err = ftgmac100_mii_probe(priv, phy_intf);
- if (err) {
- dev_err(priv->dev, "MII Probe failed!\n");
- goto err_mii_probe;
- }
+ of_node_put(mdio_np);
return 0;
-err_mii_probe:
- mdiobus_unregister(priv->mii_bus);
err_register_mdiobus:
mdiobus_free(priv->mii_bus);
return err;
}
+static void ftgmac100_phy_disconnect(struct net_device *netdev)
+{
+ if (!netdev->phydev)
+ return;
+
+ phy_disconnect(netdev->phydev);
+}
+
static void ftgmac100_destroy_mdio(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
- if (!netdev->phydev)
+ if (!priv->mii_bus)
return;
- phy_disconnect(netdev->phydev);
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
}
@@ -1830,22 +1829,33 @@ static int ftgmac100_probe(struct platform_device *pdev)
if (np && of_get_property(np, "use-ncsi", NULL)) {
if (!IS_ENABLED(CONFIG_NET_NCSI)) {
dev_err(&pdev->dev, "NCSI stack not enabled\n");
- goto err_ncsi_dev;
+ goto err_phy_connect;
}
dev_info(&pdev->dev, "Using NCSI interface\n");
priv->use_ncsi = true;
priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
if (!priv->ndev)
- goto err_ncsi_dev;
+ goto err_phy_connect;
} else if (np && of_get_property(np, "phy-handle", NULL)) {
struct phy_device *phy;
+ /* Support "mdio"/"phy" child nodes for ast2400/2500 with
+ * an embedded MDIO controller. Automatically scan the DTS for
+ * available PHYs and register them.
+ */
+ if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2500-mac")) {
+ err = ftgmac100_setup_mdio(netdev);
+ if (err)
+ goto err_setup_mdio;
+ }
+
phy = of_phy_get_and_connect(priv->netdev, np,
&ftgmac100_adjust_link);
if (!phy) {
dev_err(&pdev->dev, "Failed to connect to phy\n");
- goto err_setup_mdio;
+ goto err_phy_connect;
}
/* Indicate that we support PAUSE frames (see comment in
@@ -1865,12 +1875,19 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = ftgmac100_setup_mdio(netdev);
if (err)
goto err_setup_mdio;
+
+ err = ftgmac100_mii_probe(netdev);
+ if (err) {
+ dev_err(priv->dev, "MII probe failed!\n");
+ goto err_ncsi_dev;
+ }
+
}
if (priv->is_aspeed) {
err = ftgmac100_setup_clk(priv);
if (err)
- goto err_ncsi_dev;
+ goto err_phy_connect;
}
/* Default ring sizes */
@@ -1906,6 +1923,8 @@ static int ftgmac100_probe(struct platform_device *pdev)
err_register_netdev:
clk_disable_unprepare(priv->rclk);
clk_disable_unprepare(priv->clk);
+err_phy_connect:
+ ftgmac100_phy_disconnect(netdev);
err_ncsi_dev:
if (priv->ndev)
ncsi_unregister_dev(priv->ndev);
@@ -1940,6 +1959,7 @@ static int ftgmac100_remove(struct platform_device *pdev)
*/
cancel_work_sync(&priv->reset_task);
+ ftgmac100_phy_disconnect(netdev);
ftgmac100_destroy_mdio(netdev);
iounmap(priv->base);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index cb7c028b1bf5..33c71b5c5738 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -53,6 +53,8 @@
#include <linux/dma-mapping.h>
#include <linux/sort.h>
#include <linux/phy_fixed.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <soc/fsl/bman.h>
#include <soc/fsl/qman.h>
#include "fman.h"
@@ -177,7 +179,7 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
#define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \
+ DPAA_HASH_RESULTS_SIZE)
#define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \
- dpaa_rx_extra_headroom)
+ XDP_PACKET_HEADROOM - DPAA_HWA_SIZE)
#ifdef CONFIG_DPAA_ERRATUM_A050385
#define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
#define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \
@@ -1128,6 +1130,25 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
dpaa_fq->fqid = qman_fq_fqid(fq);
+ if (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
+ dpaa_fq->fq_type == FQ_TYPE_RX_PCD) {
+ err = xdp_rxq_info_reg(&dpaa_fq->xdp_rxq, dpaa_fq->net_dev,
+ dpaa_fq->fqid);
+ if (err) {
+ dev_err(dev, "xdp_rxq_info_reg() = %d\n", err);
+ return err;
+ }
+
+ err = xdp_rxq_info_reg_mem_model(&dpaa_fq->xdp_rxq,
+ MEM_TYPE_PAGE_ORDER0, NULL);
+ if (err) {
+ dev_err(dev, "xdp_rxq_info_reg_mem_model() = %d\n",
+ err);
+ xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq);
+ return err;
+ }
+ }
+
return 0;
}
@@ -1157,6 +1178,11 @@ static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
}
}
+ if ((dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
+ dpaa_fq->fq_type == FQ_TYPE_RX_PCD) &&
+ xdp_rxq_info_is_reg(&dpaa_fq->xdp_rxq))
+ xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq);
+
qman_destroy_fq(fq);
list_del(&dpaa_fq->list);
@@ -1623,6 +1649,9 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
*
* Return the skb backpointer, since for S/G frames the buffer containing it
* gets freed here.
+ *
+ * No skb backpointer is set when transmitting XDP frames. Cleanup the buffer
+ * and return NULL in this case.
*/
static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
const struct qm_fd *fd, bool ts)
@@ -1633,6 +1662,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
dma_addr_t addr = qm_fd_addr(fd);
void *vaddr = phys_to_virt(addr);
const struct qm_sg_entry *sgt;
+ struct dpaa_eth_swbp *swbp;
struct sk_buff *skb;
u64 ns;
int i;
@@ -1661,11 +1691,20 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
}
} else {
dma_unmap_single(priv->tx_dma_dev, addr,
- priv->tx_headroom + qm_fd_get_length(fd),
+ qm_fd_get_offset(fd) + qm_fd_get_length(fd),
dma_dir);
}
- skb = *(struct sk_buff **)vaddr;
+ swbp = (struct dpaa_eth_swbp *)vaddr;
+ skb = swbp->skb;
+
+ /* No skb backpointer is set when running XDP. An xdp_frame
+ * backpointer is saved instead.
+ */
+ if (!skb) {
+ xdp_return_frame(swbp->xdpf);
+ return NULL;
+ }
/* DMA unmapping is required before accessing the HW provided info */
if (ts && priv->tx_tstamp &&
@@ -1731,7 +1770,6 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
goto free_buffer;
- WARN_ON(fd_off != priv->rx_headroom);
skb_reserve(skb, fd_off);
skb_put(skb, qm_fd_get_length(fd));
@@ -1879,8 +1917,8 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
{
struct net_device *net_dev = priv->net_dev;
enum dma_data_direction dma_dir;
+ struct dpaa_eth_swbp *swbp;
unsigned char *buff_start;
- struct sk_buff **skbh;
dma_addr_t addr;
int err;
@@ -1891,8 +1929,8 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
buff_start = skb->data - priv->tx_headroom;
dma_dir = DMA_TO_DEVICE;
- skbh = (struct sk_buff **)buff_start;
- *skbh = skb;
+ swbp = (struct dpaa_eth_swbp *)buff_start;
+ swbp->skb = skb;
/* Enable L3/L4 hardware checksum computation.
*
@@ -1931,8 +1969,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
const int nr_frags = skb_shinfo(skb)->nr_frags;
struct net_device *net_dev = priv->net_dev;
+ struct dpaa_eth_swbp *swbp;
struct qm_sg_entry *sgt;
- struct sk_buff **skbh;
void *buff_start;
skb_frag_t *frag;
dma_addr_t addr;
@@ -2005,8 +2043,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
/* DMA map the SGT page */
- skbh = (struct sk_buff **)buff_start;
- *skbh = skb;
+ swbp = (struct dpaa_eth_swbp *)buff_start;
+ swbp->skb = skb;
addr = dma_map_page(priv->tx_dma_dev, p, 0,
priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
@@ -2067,7 +2105,7 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
}
#ifdef CONFIG_DPAA_ERRATUM_A050385
-static int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s)
+static int dpaa_a050385_wa_skb(struct net_device *net_dev, struct sk_buff **s)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
struct sk_buff *new_skb, *skb = *s;
@@ -2141,6 +2179,52 @@ workaround:
return 0;
}
+
+static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
+ struct xdp_frame **init_xdpf)
+{
+ struct xdp_frame *new_xdpf, *xdpf = *init_xdpf;
+ void *new_buff;
+ struct page *p;
+
+ /* Check the data alignment and make sure the headroom is large
+ * enough to store the xdpf backpointer. Use an aligned headroom
+ * value.
+ *
+ * Due to alignment constraints, we give XDP access to the full 256
+ * byte frame headroom. If the XDP program uses all of it, copy the
+ * data to a new buffer and make room for storing the backpointer.
+ */
+ if (PTR_IS_ALIGNED(xdpf->data, DPAA_A050385_ALIGN) &&
+ xdpf->headroom >= priv->tx_headroom) {
+ xdpf->headroom = priv->tx_headroom;
+ return 0;
+ }
+
+ p = dev_alloc_pages(0);
+ if (unlikely(!p))
+ return -ENOMEM;
+
+ /* Copy the data to the new buffer at a properly aligned offset */
+ new_buff = page_address(p);
+ memcpy(new_buff + priv->tx_headroom, xdpf->data, xdpf->len);
+
+ /* Create an XDP frame around the new buffer in a similar fashion
+ * to xdp_convert_buff_to_frame.
+ */
+ new_xdpf = new_buff;
+ new_xdpf->data = new_buff + priv->tx_headroom;
+ new_xdpf->len = xdpf->len;
+ new_xdpf->headroom = priv->tx_headroom;
+ new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
+ new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
+
+ /* Release the initial buffer */
+ xdp_return_frame_rx_napi(xdpf);
+
+ *init_xdpf = new_xdpf;
+ return 0;
+}
#endif
static netdev_tx_t
@@ -2191,7 +2275,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
#ifdef CONFIG_DPAA_ERRATUM_A050385
if (unlikely(fman_has_errata_a050385())) {
- if (dpaa_a050385_wa(net_dev, &skb))
+ if (dpaa_a050385_wa_skb(net_dev, &skb))
goto enomem;
nonlinear = skb_is_nonlinear(skb);
}
@@ -2275,8 +2359,11 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
{
struct dpaa_napi_portal *np =
container_of(napi, struct dpaa_napi_portal, napi);
+ int cleaned;
- int cleaned = qman_p_poll_dqrr(np->p, budget);
+ np->xdp_act = 0;
+
+ cleaned = qman_p_poll_dqrr(np->p, budget);
if (cleaned < budget) {
napi_complete_done(napi, cleaned);
@@ -2285,6 +2372,9 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
}
+ if (np->xdp_act & XDP_REDIRECT)
+ xdp_do_flush();
+
return cleaned;
}
@@ -2313,9 +2403,9 @@ static void dpaa_tx_conf(struct net_device *net_dev,
}
static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
- struct qman_portal *portal)
+ struct qman_portal *portal, bool sched_napi)
{
- if (unlikely(in_irq() || !in_serving_softirq())) {
+ if (sched_napi) {
/* Disable QMan IRQ and invoke NAPI */
qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
@@ -2329,7 +2419,8 @@ static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
{
struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
struct dpaa_percpu_priv *percpu_priv;
@@ -2345,7 +2436,7 @@ static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
percpu_priv = this_cpu_ptr(priv->percpu_priv);
- if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
return qman_cb_dqrr_stop;
dpaa_eth_refill_bpools(priv);
@@ -2354,29 +2445,208 @@ static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
return qman_cb_dqrr_consume;
}
+static int dpaa_xdp_xmit_frame(struct net_device *net_dev,
+ struct xdp_frame *xdpf)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa_percpu_priv *percpu_priv;
+ struct dpaa_eth_swbp *swbp;
+ struct netdev_queue *txq;
+ void *buff_start;
+ struct qm_fd fd;
+ dma_addr_t addr;
+ int err;
+
+ percpu_priv = this_cpu_ptr(priv->percpu_priv);
+ percpu_stats = &percpu_priv->stats;
+
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+ if (unlikely(fman_has_errata_a050385())) {
+ if (dpaa_a050385_wa_xdpf(priv, &xdpf)) {
+ err = -ENOMEM;
+ goto out_error;
+ }
+ }
+#endif
+
+ if (xdpf->headroom < DPAA_TX_PRIV_DATA_SIZE) {
+ err = -EINVAL;
+ goto out_error;
+ }
+
+ buff_start = xdpf->data - xdpf->headroom;
+
+ /* Leave empty the skb backpointer at the start of the buffer.
+ * Save the XDP frame for easy cleanup on confirmation.
+ */
+ swbp = (struct dpaa_eth_swbp *)buff_start;
+ swbp->skb = NULL;
+ swbp->xdpf = xdpf;
+
+ qm_fd_clear_fd(&fd);
+ fd.bpid = FSL_DPAA_BPID_INV;
+ fd.cmd |= cpu_to_be32(FM_FD_CMD_FCO);
+ qm_fd_set_contig(&fd, xdpf->headroom, xdpf->len);
+
+ addr = dma_map_single(priv->tx_dma_dev, buff_start,
+ xdpf->headroom + xdpf->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
+ err = -EINVAL;
+ goto out_error;
+ }
+
+ qm_fd_addr_set64(&fd, addr);
+
+ /* Bump the trans_start */
+ txq = netdev_get_tx_queue(net_dev, smp_processor_id());
+ txq->trans_start = jiffies;
+
+ err = dpaa_xmit(priv, percpu_stats, smp_processor_id(), &fd);
+ if (err) {
+ dma_unmap_single(priv->tx_dma_dev, addr,
+ qm_fd_get_offset(&fd) + qm_fd_get_length(&fd),
+ DMA_TO_DEVICE);
+ goto out_error;
+ }
+
+ return 0;
+
+out_error:
+ percpu_stats->tx_errors++;
+ return err;
+}
+
+static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
+ struct dpaa_fq *dpaa_fq, unsigned int *xdp_meta_len)
+{
+ ssize_t fd_off = qm_fd_get_offset(fd);
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ struct xdp_buff xdp;
+ u32 xdp_act;
+ int err;
+
+ rcu_read_lock();
+
+ xdp_prog = READ_ONCE(priv->xdp_prog);
+ if (!xdp_prog) {
+ rcu_read_unlock();
+ return XDP_PASS;
+ }
+
+ xdp.data = vaddr + fd_off;
+ xdp.data_meta = xdp.data;
+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+ xdp.data_end = xdp.data + qm_fd_get_length(fd);
+ xdp.frame_sz = DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE;
+ xdp.rxq = &dpaa_fq->xdp_rxq;
+
+ /* We reserve a fixed headroom of 256 bytes under the erratum and we
+ * offer it all to XDP programs to use. If no room is left for the
+ * xdpf backpointer on TX, we will need to copy the data.
+ * Disable metadata support since data realignments might be required
+ * and the information can be lost.
+ */
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+ if (unlikely(fman_has_errata_a050385())) {
+ xdp_set_data_meta_invalid(&xdp);
+ xdp.data_hard_start = vaddr;
+ xdp.frame_sz = DPAA_BP_RAW_SIZE;
+ }
+#endif
+
+ xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ /* Update the length and the offset of the FD */
+ qm_fd_set_contig(fd, xdp.data - vaddr, xdp.data_end - xdp.data);
+
+ switch (xdp_act) {
+ case XDP_PASS:
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+ *xdp_meta_len = xdp_data_meta_unsupported(&xdp) ? 0 :
+ xdp.data - xdp.data_meta;
+#else
+ *xdp_meta_len = xdp.data - xdp.data_meta;
+#endif
+ break;
+ case XDP_TX:
+ /* We can access the full headroom when sending the frame
+ * back out
+ */
+ xdp.data_hard_start = vaddr;
+ xdp.frame_sz = DPAA_BP_RAW_SIZE;
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ if (unlikely(!xdpf)) {
+ free_pages((unsigned long)vaddr, 0);
+ break;
+ }
+
+ if (dpaa_xdp_xmit_frame(priv->net_dev, xdpf))
+ xdp_return_frame_rx_napi(xdpf);
+
+ break;
+ case XDP_REDIRECT:
+ /* Allow redirect to use the full headroom */
+ xdp.data_hard_start = vaddr;
+ xdp.frame_sz = DPAA_BP_RAW_SIZE;
+
+ err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
+ if (err) {
+ trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
+ free_pages((unsigned long)vaddr, 0);
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(xdp_act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
+ fallthrough;
+ case XDP_DROP:
+ /* Free the buffer */
+ free_pages((unsigned long)vaddr, 0);
+ break;
+ }
+
+ rcu_read_unlock();
+
+ return xdp_act;
+}
+
static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
{
+ bool ts_valid = false, hash_valid = false;
struct skb_shared_hwtstamps *shhwtstamps;
+ unsigned int skb_len, xdp_meta_len = 0;
struct rtnl_link_stats64 *percpu_stats;
struct dpaa_percpu_priv *percpu_priv;
const struct qm_fd *fd = &dq->fd;
dma_addr_t addr = qm_fd_addr(fd);
+ struct dpaa_napi_portal *np;
enum qm_fd_format fd_format;
struct net_device *net_dev;
u32 fd_status, hash_offset;
+ struct qm_sg_entry *sgt;
struct dpaa_bp *dpaa_bp;
+ struct dpaa_fq *dpaa_fq;
struct dpaa_priv *priv;
- unsigned int skb_len;
struct sk_buff *skb;
int *count_ptr;
+ u32 xdp_act;
void *vaddr;
+ u32 hash;
u64 ns;
+ np = container_of(&portal, struct dpaa_napi_portal, p);
+ dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
fd_status = be32_to_cpu(fd->status);
fd_format = qm_fd_get_format(fd);
- net_dev = ((struct dpaa_fq *)fq)->net_dev;
+ net_dev = dpaa_fq->net_dev;
priv = netdev_priv(net_dev);
dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
if (!dpaa_bp)
@@ -2388,7 +2658,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
percpu_priv = this_cpu_ptr(priv->percpu_priv);
percpu_stats = &percpu_priv->stats;
- if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
+ if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)))
return qman_cb_dqrr_stop;
/* Make sure we didn't run out of buffers */
@@ -2427,35 +2697,68 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
(*count_ptr)--;
- if (likely(fd_format == qm_fd_contig))
+ /* Extract the timestamp stored in the headroom before running XDP */
+ if (priv->rx_tstamp) {
+ if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
+ ts_valid = true;
+ else
+ WARN_ONCE(1, "fman_port_get_tstamp failed!\n");
+ }
+
+ /* Extract the hash stored in the headroom before running XDP */
+ if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
+ !fman_port_get_hash_result_offset(priv->mac_dev->port[RX],
+ &hash_offset)) {
+ hash = be32_to_cpu(*(u32 *)(vaddr + hash_offset));
+ hash_valid = true;
+ }
+
+ if (likely(fd_format == qm_fd_contig)) {
+ xdp_act = dpaa_run_xdp(priv, (struct qm_fd *)fd, vaddr,
+ dpaa_fq, &xdp_meta_len);
+ np->xdp_act |= xdp_act;
+ if (xdp_act != XDP_PASS) {
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += qm_fd_get_length(fd);
+ return qman_cb_dqrr_consume;
+ }
skb = contig_fd_to_skb(priv, fd);
- else
+ } else {
+ /* XDP doesn't support S/G frames. Return the fragments to the
+ * buffer pool and release the SGT.
+ */
+ if (READ_ONCE(priv->xdp_prog)) {
+ WARN_ONCE(1, "S/G frames not supported under XDP\n");
+ sgt = vaddr + qm_fd_get_offset(fd);
+ dpaa_release_sgt_members(sgt);
+ free_pages((unsigned long)vaddr, 0);
+ return qman_cb_dqrr_consume;
+ }
skb = sg_fd_to_skb(priv, fd);
+ }
if (!skb)
return qman_cb_dqrr_consume;
- if (priv->rx_tstamp) {
+ if (xdp_meta_len)
+ skb_metadata_set(skb, xdp_meta_len);
+
+ /* Set the previously extracted timestamp */
+ if (ts_valid) {
shhwtstamps = skb_hwtstamps(skb);
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-
- if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
- shhwtstamps->hwtstamp = ns_to_ktime(ns);
- else
- dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n");
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
}
skb->protocol = eth_type_trans(skb, net_dev);
- if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
- !fman_port_get_hash_result_offset(priv->mac_dev->port[RX],
- &hash_offset)) {
+ /* Set the previously extracted hash */
+ if (hash_valid) {
enum pkt_hash_types type;
/* if L4 exists, it was used in the hash generation */
type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ?
PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
- skb_set_hash(skb, be32_to_cpu(*(u32 *)(vaddr + hash_offset)),
- type);
+ skb_set_hash(skb, hash, type);
}
skb_len = skb->len;
@@ -2473,7 +2776,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
{
struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev;
@@ -2484,7 +2788,7 @@ static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
percpu_priv = this_cpu_ptr(priv->percpu_priv);
- if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
return qman_cb_dqrr_stop;
dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
@@ -2494,7 +2798,8 @@ static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
{
struct dpaa_percpu_priv *percpu_priv;
struct net_device *net_dev;
@@ -2508,7 +2813,7 @@ static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
percpu_priv = this_cpu_ptr(priv->percpu_priv);
- if (dpaa_eth_napi_schedule(percpu_priv, portal))
+ if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
return qman_cb_dqrr_stop;
dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
@@ -2554,7 +2859,7 @@ static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
for_each_online_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- percpu_priv->np.down = 0;
+ percpu_priv->np.down = false;
napi_enable(&percpu_priv->np.napi);
}
}
@@ -2567,7 +2872,7 @@ static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
for_each_online_cpu(i) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
- percpu_priv->np.down = 1;
+ percpu_priv->np.down = true;
napi_disable(&percpu_priv->np.napi);
}
}
@@ -2673,6 +2978,101 @@ static int dpaa_eth_stop(struct net_device *net_dev)
return err;
}
+static bool xdp_validate_mtu(struct dpaa_priv *priv, int mtu)
+{
+ int max_contig_data = priv->dpaa_bp->size - priv->rx_headroom;
+
+ /* We do not support S/G fragments when XDP is enabled.
+ * Limit the MTU in relation to the buffer size.
+ */
+ if (mtu + VLAN_ETH_HLEN + ETH_FCS_LEN > max_contig_data) {
+ dev_warn(priv->net_dev->dev.parent,
+ "The maximum MTU for XDP is %d\n",
+ max_contig_data - VLAN_ETH_HLEN - ETH_FCS_LEN);
+ return false;
+ }
+
+ return true;
+}
+
+static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+
+ if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu))
+ return -EINVAL;
+
+ net_dev->mtu = new_mtu;
+ return 0;
+}
+
+static int dpaa_setup_xdp(struct net_device *net_dev, struct netdev_bpf *bpf)
+{
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct bpf_prog *old_prog;
+ int err;
+ bool up;
+
+ /* S/G fragments are not supported in XDP-mode */
+ if (bpf->prog && !xdp_validate_mtu(priv, net_dev->mtu)) {
+ NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
+ return -EINVAL;
+ }
+
+ up = netif_running(net_dev);
+
+ if (up)
+ dpaa_eth_stop(net_dev);
+
+ old_prog = xchg(&priv->xdp_prog, bpf->prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (up) {
+ err = dpaa_open(net_dev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(bpf->extack, "dpaa_open() failed");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int dpaa_xdp(struct net_device *net_dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return dpaa_setup_xdp(net_dev, xdp);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct xdp_frame *xdpf;
+ int i, err, drops = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (!netif_running(net_dev))
+ return -ENETDOWN;
+
+ for (i = 0; i < n; i++) {
+ xdpf = frames[i];
+ err = dpaa_xdp_xmit_frame(net_dev, xdpf);
+ if (err) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+
+ return n - drops;
+}
+
static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct dpaa_priv *priv = netdev_priv(dev);
@@ -2739,6 +3139,9 @@ static const struct net_device_ops dpaa_ops = {
.ndo_set_rx_mode = dpaa_set_rx_mode,
.ndo_do_ioctl = dpaa_ioctl,
.ndo_setup_tc = dpaa_setup_tc,
+ .ndo_change_mtu = dpaa_change_mtu,
+ .ndo_bpf = dpaa_xdp,
+ .ndo_xdp_xmit = dpaa_xdp_xmit,
};
static int dpaa_napi_add(struct net_device *net_dev)
@@ -2870,10 +3273,16 @@ static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
*/
headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE);
- if (port == RX)
+ if (port == RX) {
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+ if (unlikely(fman_has_errata_a050385()))
+ headroom = XDP_PACKET_HEADROOM;
+#endif
+
return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT);
- else
+ } else {
return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
+ }
}
static int dpaa_eth_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index fc2cc4c48e06..daf894a97050 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -68,6 +68,7 @@ struct dpaa_fq {
u16 channel;
u8 wq;
enum dpaa_fq_type fq_type;
+ struct xdp_rxq_info xdp_rxq;
};
struct dpaa_fq_cbs {
@@ -126,6 +127,7 @@ struct dpaa_napi_portal {
struct napi_struct napi;
struct qman_portal *p;
bool down;
+ int xdp_act;
};
struct dpaa_percpu_priv {
@@ -144,6 +146,15 @@ struct dpaa_buffer_layout {
u16 priv_data_size;
};
+/* Information to be used on the Tx confirmation path. Stored just
+ * before the start of the transmit buffer. Maximum size allowed
+ * is DPAA_TX_PRIV_DATA_SIZE bytes.
+ */
+struct dpaa_eth_swbp {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+};
+
struct dpaa_priv {
struct dpaa_percpu_priv __percpu *percpu_priv;
struct dpaa_bp *dpaa_bp;
@@ -188,6 +199,8 @@ struct dpaa_priv {
bool tx_tstamp; /* Tx timestamping enabled */
bool rx_tstamp; /* Rx timestamping enabled */
+
+ struct bpf_prog *xdp_prog;
};
/* from dpaa_ethtool.c */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index cf9400a9886d..a0a30c721fe7 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -686,7 +686,7 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
&offset1, &offset2) ||
- msgtype != 0 || twostep) {
+ msgtype != PTP_MSGTYPE_SYNC || twostep) {
WARN_ONCE(1, "Bad packet for one-step timestamping\n");
return;
}
@@ -1212,7 +1212,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
&offset1, &offset2))
- if (msgtype == 0 && twostep == 0) {
+ if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) {
skb_queue_tail(&priv->tx_skbs, skb);
queue_work(priv->dpaa2_ptp_wq,
&priv->tx_onestep_tstamp);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index fc2075ea57fe..c78d12229730 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -50,40 +50,6 @@ drop_packet_err:
return NETDEV_TX_OK;
}
-static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd)
-{
- int l3_start, l3_hsize;
- u16 l3_flags, l4_flags;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return false;
-
- switch (skb->csum_offset) {
- case offsetof(struct tcphdr, check):
- l4_flags = ENETC_TXBD_L4_TCP;
- break;
- case offsetof(struct udphdr, check):
- l4_flags = ENETC_TXBD_L4_UDP;
- break;
- default:
- skb_checksum_help(skb);
- return false;
- }
-
- l3_start = skb_network_offset(skb);
- l3_hsize = skb_network_header_len(skb);
-
- l3_flags = 0;
- if (skb->protocol == htons(ETH_P_IPV6))
- l3_flags = ENETC_TXBD_L3_IPV6;
-
- /* write BD fields */
- txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags);
- txbd->l4_csoff = l4_flags;
-
- return true;
-}
-
static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
struct enetc_tx_swbd *tx_swbd)
{
@@ -149,22 +115,16 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
if (do_vlan || do_tstamp)
flags |= ENETC_TXBD_FLAGS_EX;
- if (enetc_tx_csum(skb, &temp_bd))
- flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
- else if (tx_ring->tsd_enable)
+ if (tx_ring->tsd_enable)
flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
/* first BD needs frm_len and offload flags set */
temp_bd.frm_len = cpu_to_le16(skb->len);
temp_bd.flags = flags;
- if (flags & ENETC_TXBD_FLAGS_TSE) {
- u32 temp;
-
- temp = (skb->skb_mstamp_ns >> 5 & ENETC_TXBD_TXSTART_MASK)
- | (flags << ENETC_TXBD_FLAGS_OFFSET);
- temp_bd.txstart = cpu_to_le32(temp);
- }
+ if (flags & ENETC_TXBD_FLAGS_TSE)
+ temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns,
+ flags);
if (flags & ENETC_TXBD_FLAGS_EX) {
u8 e_flags = 0;
@@ -1925,8 +1885,7 @@ static void enetc_kfree_si(struct enetc_si *si)
static void enetc_detect_errata(struct enetc_si *si)
{
if (si->pdev->revision == ENETC_REV1)
- si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL |
- ENETC_ERR_UCMCSWP;
+ si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP;
}
int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index dd0fb0c066d7..8532d23b54f5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -147,9 +147,8 @@ struct enetc_msg_swbd {
#define ENETC_REV1 0x1
enum enetc_errata {
- ENETC_ERR_TXCSUM = BIT(0),
- ENETC_ERR_VLAN_ISOL = BIT(1),
- ENETC_ERR_UCMCSWP = BIT(2),
+ ENETC_ERR_VLAN_ISOL = BIT(0),
+ ENETC_ERR_UCMCSWP = BIT(1),
};
#define ENETC_SI_F_QBV BIT(0)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index eb6bbf1113c7..d18f439f2b81 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -477,8 +477,7 @@ union enetc_tx_bd {
__le16 frm_len;
union {
struct {
- __le16 l3_csoff;
- u8 l4_csoff;
+ u8 reserved[3];
u8 flags;
}; /* default layout */
__le32 txstart;
@@ -501,41 +500,37 @@ union enetc_tx_bd {
} wb; /* writeback descriptor */
};
-#define ENETC_TXBD_FLAGS_L4CS BIT(0)
-#define ENETC_TXBD_FLAGS_TSE BIT(1)
-#define ENETC_TXBD_FLAGS_W BIT(2)
-#define ENETC_TXBD_FLAGS_CSUM BIT(3)
-#define ENETC_TXBD_FLAGS_TXSTART BIT(4)
-#define ENETC_TXBD_FLAGS_EX BIT(6)
-#define ENETC_TXBD_FLAGS_F BIT(7)
+enum enetc_txbd_flags {
+ ENETC_TXBD_FLAGS_RES0 = BIT(0), /* reserved */
+ ENETC_TXBD_FLAGS_TSE = BIT(1),
+ ENETC_TXBD_FLAGS_W = BIT(2),
+ ENETC_TXBD_FLAGS_RES3 = BIT(3), /* reserved */
+ ENETC_TXBD_FLAGS_TXSTART = BIT(4),
+ ENETC_TXBD_FLAGS_EX = BIT(6),
+ ENETC_TXBD_FLAGS_F = BIT(7)
+};
#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
#define ENETC_TXBD_FLAGS_OFFSET 24
+
+static inline __le32 enetc_txbd_set_tx_start(u64 tx_start, u8 flags)
+{
+ u32 temp;
+
+ temp = (tx_start >> 5 & ENETC_TXBD_TXSTART_MASK) |
+ (flags << ENETC_TXBD_FLAGS_OFFSET);
+
+ return cpu_to_le32(temp);
+}
+
static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd)
{
memset(txbd, 0, sizeof(*txbd));
}
-/* L3 csum flags */
-#define ENETC_TXBD_L3_IPCS BIT(7)
-#define ENETC_TXBD_L3_IPV6 BIT(15)
-
-#define ENETC_TXBD_L3_START_MASK GENMASK(6, 0)
-#define ENETC_TXBD_L3_SET_HSIZE(val) ((((val) >> 2) & 0x7f) << 8)
-
/* Extension flags */
#define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0)
#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2)
-static inline __le16 enetc_txbd_l3_csoff(int start, int hdr_sz, u16 l3_flags)
-{
- return cpu_to_le16(l3_flags | ENETC_TXBD_L3_SET_HSIZE(hdr_sz) |
- (start & ENETC_TXBD_L3_START_MASK));
-}
-
-/* L4 csum flags */
-#define ENETC_TXBD_L4_UDP BIT(5)
-#define ENETC_TXBD_L4_TCP BIT(6)
-
union enetc_rx_bd {
struct {
__le64 addr;
@@ -580,10 +575,10 @@ struct enetc_cmd_rfse {
u8 smac_m[6];
u8 dmac_h[6];
u8 dmac_m[6];
- u32 sip_h[4];
- u32 sip_m[4];
- u32 dip_h[4];
- u32 dip_m[4];
+ __be32 sip_h[4];
+ __be32 sip_m[4];
+ __be32 dip_h[4];
+ __be32 dip_m[4];
u16 ethtype_h;
u16 ethtype_m;
u16 ethtype4_h;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 419306342ac5..ecdc2af8c292 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -714,22 +714,16 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->watchdog_timeo = 5 * HZ;
ndev->max_mtu = ENETC_MAX_MTU;
- ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK;
- ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
- NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
- if (si->errata & ENETC_ERR_TXCSUM) {
- ndev->hw_features &= ~NETIF_F_HW_CSUM;
- ndev->features &= ~NETIF_F_HW_CSUM;
- }
-
ndev->priv_flags |= IFF_UNICAST_FLT;
if (si->hw_features & ENETC_SI_F_QBV)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index dbceb99c4441..a9aee219fb58 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -118,8 +118,8 @@ static int enetc_setup_taprio(struct net_device *ndev,
return -ENOMEM;
}
- cbd.addr[0] = lower_32_bits(dma);
- cbd.addr[1] = upper_32_bits(dma);
+ cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
+ cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
cbd.cls = BDCR_CMD_PORT_GCL;
cbd.status_flags = 0;
@@ -496,16 +496,15 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
return -ENOMEM;
}
- cbd.addr[0] = lower_32_bits(dma);
- cbd.addr[1] = upper_32_bits(dma);
+ cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
+ cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
eth_broadcast_addr(si_data->dmac);
- si_data->vid_vidm_tg =
- cpu_to_le16(ENETC_CBDR_SID_VID_MASK
- + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
+ si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK
+ + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
si_conf = &cbd.sid_set;
/* Only one port supported for one entry, set itself */
- si_conf->iports = 1 << enetc_get_port(priv);
+ si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv));
si_conf->id_type = 1;
si_conf->oui[2] = 0x0;
si_conf->oui[1] = 0x80;
@@ -530,7 +529,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
si_conf->en = 0x80;
si_conf->stream_handle = cpu_to_le32(sid->handle);
- si_conf->iports = 1 << enetc_get_port(priv);
+ si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv));
si_conf->id_type = sid->filtertype;
si_conf->oui[2] = 0x0;
si_conf->oui[1] = 0x80;
@@ -540,8 +539,8 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
cbd.length = cpu_to_le16(data_size);
- cbd.addr[0] = lower_32_bits(dma);
- cbd.addr[1] = upper_32_bits(dma);
+ cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
+ cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
/* VIDM default to be 1.
* VID Match. If set (b1) then the VID must match, otherwise
@@ -550,16 +549,14 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
*/
if (si_conf->id_type == STREAMID_TYPE_NULL) {
ether_addr_copy(si_data->dmac, sid->dst_mac);
- si_data->vid_vidm_tg =
- cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) +
- ((((u16)(sid->tagged) & 0x3) << 14)
- | ENETC_CBDR_SID_VIDM));
+ si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
+ ((((u16)(sid->tagged) & 0x3) << 14)
+ | ENETC_CBDR_SID_VIDM);
} else if (si_conf->id_type == STREAMID_TYPE_SMAC) {
ether_addr_copy(si_data->smac, sid->src_mac);
- si_data->vid_vidm_tg =
- cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) +
- ((((u16)(sid->tagged) & 0x3) << 14)
- | ENETC_CBDR_SID_VIDM));
+ si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
+ ((((u16)(sid->tagged) & 0x3) << 14)
+ | ENETC_CBDR_SID_VIDM);
}
err = enetc_send_cmd(priv->si, &cbd);
@@ -594,7 +591,7 @@ static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
}
sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id);
- sfi_config->input_ports = 1 << enetc_get_port(priv);
+ sfi_config->input_ports = cpu_to_le32(1 << enetc_get_port(priv));
/* The priority value which may be matched against the
* frame’s priority value to determine a match for this entry.
@@ -648,8 +645,8 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
err = -ENOMEM;
goto exit;
}
- cbd.addr[0] = lower_32_bits(dma);
- cbd.addr[1] = upper_32_bits(dma);
+ cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
+ cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
cbd.length = cpu_to_le16(data_size);
@@ -657,28 +654,25 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
if (err)
goto exit;
- cnt->matching_frames_count =
- ((u64)le32_to_cpu(data_buf->matchh) << 32)
- + data_buf->matchl;
+ cnt->matching_frames_count = ((u64)data_buf->matchh << 32) +
+ data_buf->matchl;
- cnt->not_passing_sdu_count =
- ((u64)le32_to_cpu(data_buf->msdu_droph) << 32)
- + data_buf->msdu_dropl;
+ cnt->not_passing_sdu_count = ((u64)data_buf->msdu_droph << 32) +
+ data_buf->msdu_dropl;
cnt->passing_sdu_count = cnt->matching_frames_count
- cnt->not_passing_sdu_count;
cnt->not_passing_frames_count =
- ((u64)le32_to_cpu(data_buf->stream_gate_droph) << 32)
- + le32_to_cpu(data_buf->stream_gate_dropl);
+ ((u64)data_buf->stream_gate_droph << 32) +
+ data_buf->stream_gate_dropl;
- cnt->passing_frames_count = cnt->matching_frames_count
- - cnt->not_passing_sdu_count
- - cnt->not_passing_frames_count;
+ cnt->passing_frames_count = cnt->matching_frames_count -
+ cnt->not_passing_sdu_count -
+ cnt->not_passing_frames_count;
- cnt->red_frames_count =
- ((u64)le32_to_cpu(data_buf->flow_meter_droph) << 32)
- + le32_to_cpu(data_buf->flow_meter_dropl);
+ cnt->red_frames_count = ((u64)data_buf->flow_meter_droph << 32) +
+ data_buf->flow_meter_dropl;
exit:
kfree(data_buf);
@@ -785,15 +779,15 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
return -ENOMEM;
}
- cbd.addr[0] = lower_32_bits(dma);
- cbd.addr[1] = upper_32_bits(dma);
+ cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
+ cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
sgce = &sgcl_data->sgcl[0];
sgcl_config->agtst = 0x80;
- sgcl_data->ct = cpu_to_le32(sgi->cycletime);
- sgcl_data->cte = cpu_to_le32(sgi->cycletimext);
+ sgcl_data->ct = sgi->cycletime;
+ sgcl_data->cte = sgi->cycletimext;
if (sgi->init_ipv >= 0)
sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8;
@@ -815,7 +809,7 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
to->msdu[2] = (from->maxoctets >> 16) & 0xFF;
}
- to->interval = cpu_to_le32(from->interval);
+ to->interval = from->interval;
}
/* If basetime is less than now, calculate start time */
@@ -827,15 +821,15 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
err = get_start_ns(now, sgi->cycletime, &start);
if (err)
goto exit;
- sgcl_data->btl = cpu_to_le32(lower_32_bits(start));
- sgcl_data->bth = cpu_to_le32(upper_32_bits(start));
+ sgcl_data->btl = lower_32_bits(start);
+ sgcl_data->bth = upper_32_bits(start);
} else {
u32 hi, lo;
hi = upper_32_bits(sgi->basetime);
lo = lower_32_bits(sgi->basetime);
- sgcl_data->bth = cpu_to_le32(hi);
- sgcl_data->btl = cpu_to_le32(lo);
+ sgcl_data->bth = hi;
+ sgcl_data->btl = lo;
}
err = enetc_send_cmd(priv->si, &cbd);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 7b5c82c7e4e5..39c1a09e69a9 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -120,22 +120,16 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->watchdog_timeo = 5 * HZ;
ndev->max_mtu = ENETC_MAX_MTU;
- ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
- ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
- NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
+ ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
- if (si->errata & ENETC_ERR_TXCSUM) {
- ndev->hw_features &= ~NETIF_F_HW_CSUM;
- ndev->features &= ~NETIF_F_HW_CSUM;
- }
-
/* pick up primary MAC address from SI */
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 3fe903972195..1a9bdf66a7d8 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -882,7 +882,6 @@ struct ucc_geth_hardware_statistics {
addresses */
#define TX_TIMEOUT (1*HZ)
-#define SKB_ALLOC_TIMEOUT 100000
#define PHY_INIT_TIMEOUT 100000
#define PHY_CHANGE_TIME 2
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 7b44769bd87c..2fb197fd3daf 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -4,6 +4,7 @@
* Copyright (C) 2015-2019 Google, Inc.
*/
+#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include "gve.h"
#include "gve_adminq.h"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 912c51e327d6..78b48861ff8b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -25,6 +25,7 @@
#include <linux/dcbnl.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
@@ -80,12 +81,13 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B,
HNAE3_DEV_SUPPORT_PTP_B,
HNAE3_DEV_SUPPORT_INT_QL_B,
- HNAE3_DEV_SUPPORT_SIMPLE_BD_B,
+ HNAE3_DEV_SUPPORT_HW_TX_CSUM_B,
HNAE3_DEV_SUPPORT_TX_PUSH_B,
HNAE3_DEV_SUPPORT_PHY_IMP_B,
HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B,
HNAE3_DEV_SUPPORT_HW_PAD_B,
HNAE3_DEV_SUPPORT_STASH_B,
+ HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B,
};
#define hnae3_dev_fd_supported(hdev) \
@@ -112,8 +114,8 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_dev_int_ql_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, (hdev)->ae_dev->caps)
-#define hnae3_dev_simple_bd_supported(hdev) \
- test_bit(HNAE3_DEV_SUPPORT_SIMPLE_BD_B, (hdev)->ae_dev->caps)
+#define hnae3_dev_hw_csum_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, (hdev)->ae_dev->caps)
#define hnae3_dev_tx_push_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, (hdev)->ae_dev->caps)
@@ -278,6 +280,7 @@ struct hnae3_dev_specs {
u16 rss_ind_tbl_size;
u16 rss_key_size;
u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */
+ u16 max_int_gl; /* max value of interrupt coalesce based on INT_GL */
u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
};
@@ -688,6 +691,7 @@ struct hnae3_knic_private_info {
struct hnae3_roce_private_info {
struct net_device *netdev;
void __iomem *roce_io_base;
+ void __iomem *roce_mem_base;
int base_vector;
int num_vectors;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index dc9a85745e62..cb26742e2ed8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -178,6 +178,8 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
u32 tx_index, rx_index;
u32 q_num, value;
dma_addr_t addr;
+ u16 mss_hw_csum;
+ u32 l234info;
int cnt;
cnt = sscanf(&cmd_buf[8], "%u %u", &q_num, &tx_index);
@@ -206,26 +208,46 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
tx_desc = &ring->desc[tx_index];
addr = le64_to_cpu(tx_desc->addr);
+ mss_hw_csum = le16_to_cpu(tx_desc->tx.mss_hw_csum);
dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
dev_info(dev, "(TX)addr: %pad\n", &addr);
dev_info(dev, "(TX)vlan_tag: %u\n", le16_to_cpu(tx_desc->tx.vlan_tag));
dev_info(dev, "(TX)send_size: %u\n",
le16_to_cpu(tx_desc->tx.send_size));
- dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso);
- dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len);
- dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len);
- dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len);
+
+ if (mss_hw_csum & BIT(HNS3_TXD_HW_CS_B)) {
+ u32 offset = le32_to_cpu(tx_desc->tx.ol_type_vlan_len_msec);
+ u32 start = le32_to_cpu(tx_desc->tx.type_cs_vlan_tso_len);
+
+ dev_info(dev, "(TX)csum start: %u\n",
+ hnae3_get_field(start,
+ HNS3_TXD_CSUM_START_M,
+ HNS3_TXD_CSUM_START_S));
+ dev_info(dev, "(TX)csum offset: %u\n",
+ hnae3_get_field(offset,
+ HNS3_TXD_CSUM_OFFSET_M,
+ HNS3_TXD_CSUM_OFFSET_S));
+ } else {
+ dev_info(dev, "(TX)vlan_tso: %u\n",
+ tx_desc->tx.type_cs_vlan_tso);
+ dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len);
+ dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len);
+ dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len);
+ dev_info(dev, "(TX)vlan_msec: %u\n",
+ tx_desc->tx.ol_type_vlan_msec);
+ dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len);
+ dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len);
+ dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len);
+ }
+
dev_info(dev, "(TX)vlan_tag: %u\n",
le16_to_cpu(tx_desc->tx.outer_vlan_tag));
dev_info(dev, "(TX)tv: %u\n", le16_to_cpu(tx_desc->tx.tv));
- dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec);
- dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len);
- dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len);
- dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len);
- dev_info(dev, "(TX)paylen: %u\n", le32_to_cpu(tx_desc->tx.paylen));
+ dev_info(dev, "(TX)paylen_ol4cs: %u\n",
+ le32_to_cpu(tx_desc->tx.paylen_ol4cs));
dev_info(dev, "(TX)vld_ra_ri: %u\n",
le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri));
- dev_info(dev, "(TX)mss: %u\n", le16_to_cpu(tx_desc->tx.mss));
+ dev_info(dev, "(TX)mss_hw_csum: %u\n", mss_hw_csum);
ring = &priv->ring[q_num + h->kinfo.num_tqps];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
@@ -233,10 +255,21 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
rx_desc = &ring->desc[rx_index];
addr = le64_to_cpu(rx_desc->addr);
+ l234info = le32_to_cpu(rx_desc->rx.l234_info);
dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
dev_info(dev, "(RX)addr: %pad\n", &addr);
- dev_info(dev, "(RX)l234_info: %u\n",
- le32_to_cpu(rx_desc->rx.l234_info));
+ dev_info(dev, "(RX)l234_info: %u\n", l234info);
+
+ if (l234info & BIT(HNS3_RXD_L2_CSUM_B)) {
+ u32 lo, hi;
+
+ lo = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_L_M,
+ HNS3_RXD_L2_CSUM_L_S);
+ hi = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_H_M,
+ HNS3_RXD_L2_CSUM_H_S);
+ dev_info(dev, "(RX)csum: %u\n", lo | hi << 8);
+ }
+
dev_info(dev, "(RX)pkt_len: %u\n", le16_to_cpu(rx_desc->rx.pkt_len));
dev_info(dev, "(RX)size: %u\n", le16_to_cpu(rx_desc->rx.size));
dev_info(dev, "(RX)rss_hash: %u\n", le32_to_cpu(rx_desc->rx.rss_hash));
@@ -324,6 +357,11 @@ static void hns3_dbg_dev_caps(struct hnae3_handle *h)
test_bit(HNAE3_DEV_SUPPORT_PTP_B, caps) ? "yes" : "no");
dev_info(&h->pdev->dev, "support INT QL: %s\n",
test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support HW TX csum: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support UDP tunnel csum: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, caps) ?
+ "yes" : "no");
}
static void hns3_dbg_dev_specs(struct hnae3_handle *h)
@@ -349,6 +387,7 @@ static void hns3_dbg_dev_specs(struct hnae3_handle *h)
dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max);
+ dev_info(priv->dev, "MAX INT GL: %u\n", dev_specs->max_int_gl);
}
static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index a362516a3185..1798c0a04b0e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -211,8 +211,8 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
*/
- if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
- !tqp_vector->rx_group.coal.gl_adapt_enable)
+ if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable &&
+ !tqp_vector->rx_group.coal.adapt_enable)
/* According to the hardware, the range of rl_reg is
* 0-59 and the unit is 4.
*/
@@ -224,48 +224,99 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
u32 gl_value)
{
- u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+ u32 new_val;
- writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
+ if (tqp_vector->rx_group.coal.unit_1us)
+ new_val = gl_value | HNS3_INT_GL_1US;
+ else
+ new_val = hns3_gl_usec_to_reg(gl_value);
+
+ writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
}
void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
u32 gl_value)
{
- u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+ u32 new_val;
+
+ if (tqp_vector->tx_group.coal.unit_1us)
+ new_val = gl_value | HNS3_INT_GL_1US;
+ else
+ new_val = hns3_gl_usec_to_reg(gl_value);
+
+ writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
+}
+
+void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 ql_value)
+{
+ writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET);
+}
- writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
+void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 ql_value)
+{
+ writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET);
}
-static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
- struct hns3_nic_priv *priv)
+static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hns3_nic_priv *priv)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
+ struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+
/* initialize the configuration for interrupt coalescing.
* 1. GL (Interrupt Gap Limiter)
* 2. RL (Interrupt Rate Limiter)
+ * 3. QL (Interrupt Quantity Limiter)
*
* Default: enable interrupt coalescing self-adaptive and GL
*/
- tqp_vector->tx_group.coal.gl_adapt_enable = 1;
- tqp_vector->rx_group.coal.gl_adapt_enable = 1;
+ tx_coal->adapt_enable = 1;
+ rx_coal->adapt_enable = 1;
+
+ tx_coal->int_gl = HNS3_INT_GL_50K;
+ rx_coal->int_gl = HNS3_INT_GL_50K;
- tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
- tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
+ rx_coal->flow_level = HNS3_FLOW_LOW;
+ tx_coal->flow_level = HNS3_FLOW_LOW;
- tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
- tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
+ /* device version above V3(include V3), GL can configure 1us
+ * unit, so uses 1us unit.
+ */
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
+ tx_coal->unit_1us = 1;
+ rx_coal->unit_1us = 1;
+ }
+
+ if (ae_dev->dev_specs.int_ql_max) {
+ tx_coal->ql_enable = 1;
+ rx_coal->ql_enable = 1;
+ tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
+ rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
+ tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ }
}
-static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
- struct hns3_nic_priv *priv)
+static void
+hns3_vector_coalesce_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hns3_nic_priv *priv)
{
+ struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
+ struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
struct hnae3_handle *h = priv->ae_handle;
- hns3_set_vector_coalesce_tx_gl(tqp_vector,
- tqp_vector->tx_group.coal.int_gl);
- hns3_set_vector_coalesce_rx_gl(tqp_vector,
- tqp_vector->rx_group.coal.int_gl);
+ hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl);
+ hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl);
hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
+
+ if (tx_coal->ql_enable)
+ hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql);
+
+ if (rx_coal->ql_enable)
+ hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql);
}
static int hns3_nic_set_real_num_queue(struct net_device *netdev)
@@ -644,7 +695,7 @@ void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
}
}
-static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
+static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
u16 *mss, u32 *type_cs_vlan_tso)
{
u32 l4_offset, hdr_len;
@@ -674,15 +725,6 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
SKB_GSO_GRE_CSUM |
SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
- if ((!(skb_shinfo(skb)->gso_type &
- SKB_GSO_PARTIAL)) &&
- (skb_shinfo(skb)->gso_type &
- SKB_GSO_UDP_TUNNEL_CSUM)) {
- /* Software should clear the udp's checksum
- * field when tso is needed.
- */
- l4.udp->check = 0;
- }
/* reset l3&l4 pointers from outer to inner headers */
l3.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
@@ -711,9 +753,13 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
}
/* find the txbd field values */
- *paylen = skb->len - hdr_len;
+ *paylen_fdop_ol4cs = skb->len - hdr_len;
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
+ /* offload outer UDP header checksum */
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
+ hns3_set_field(*paylen_fdop_ol4cs, HNS3_TXD_OL4CS_B, 1);
+
/* get MSS for TSO */
*mss = skb_shinfo(skb)->gso_size;
@@ -782,8 +828,16 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
*/
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
+ struct hns3_nic_priv *priv = netdev_priv(skb->dev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
union l4_hdr_info l4;
+ /* device version above V3(include V3), the hardware can
+ * do this checksum offload.
+ */
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ return false;
+
l4.hdr = skb_transport_header(skb);
if (!(!skb->encapsulation &&
@@ -1004,15 +1058,31 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
return 0;
}
+/* check if the hardware is capable of checksum offloading */
+static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
+{
+ struct hns3_nic_priv *priv = netdev_priv(skb->dev);
+
+ /* Kindly note, due to backward compatibility of the TX descriptor,
+ * HW checksum of the non-IP packets and GSO packets is handled at
+ * different place in the following code
+ */
+ if (skb->csum_not_inet || skb_is_gso(skb) ||
+ !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state))
+ return false;
+
+ return true;
+}
+
static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
struct sk_buff *skb, struct hns3_desc *desc)
{
u32 ol_type_vlan_len_msec = 0;
+ u32 paylen_ol4cs = skb->len;
u32 type_cs_vlan_tso = 0;
- u32 paylen = skb->len;
+ u16 mss_hw_csum = 0;
u16 inner_vtag = 0;
u16 out_vtag = 0;
- u16 mss = 0;
int ret;
ret = hns3_handle_vtags(ring, skb);
@@ -1037,6 +1107,17 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 ol4_proto, il4_proto;
+ if (hns3_check_hw_tx_csum(skb)) {
+ /* set checksum start and offset, defined in 2 Bytes */
+ hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
+ skb_checksum_start_offset(skb) >> 1);
+ hns3_set_field(ol_type_vlan_len_msec,
+ HNS3_TXD_CSUM_OFFSET_S,
+ skb->csum_offset >> 1);
+ mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
+ goto out_hw_tx_csum;
+ }
+
skb_reset_mac_len(skb);
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
@@ -1057,7 +1138,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
return ret;
}
- ret = hns3_set_tso(skb, &paylen, &mss,
+ ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum,
&type_cs_vlan_tso);
if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp);
@@ -1067,12 +1148,13 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
}
}
+out_hw_tx_csum:
/* Set txbd */
desc->tx.ol_type_vlan_len_msec =
cpu_to_le32(ol_type_vlan_len_msec);
desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
- desc->tx.paylen = cpu_to_le32(paylen);
- desc->tx.mss = cpu_to_le16(mss);
+ desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs);
+ desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
@@ -2275,39 +2357,32 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+ netdev->hw_enc_features |= NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
- NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST;
+ NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST;
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
- netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_FILTER |
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
- NETIF_F_FRAGLIST;
+ NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
- netdev->vlan_features |=
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
+ netdev->vlan_features |= NETIF_F_RXCSUM |
NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
- NETIF_F_FRAGLIST;
+ NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
- netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
- NETIF_F_FRAGLIST;
+ NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
netdev->hw_features |= NETIF_F_GRO_HW;
@@ -2325,6 +2400,25 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
}
+
+ if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) {
+ netdev->hw_features |= NETIF_F_HW_CSUM;
+ netdev->features |= NETIF_F_HW_CSUM;
+ netdev->vlan_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+ } else {
+ netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ }
+
+ if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) {
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -2747,6 +2841,22 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
return 0;
}
+static void hns3_checksum_complete(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, u32 l234info)
+{
+ u32 lo, hi;
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.csum_complete++;
+ u64_stats_update_end(&ring->syncp);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ lo = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_L_M,
+ HNS3_RXD_L2_CSUM_L_S);
+ hi = hnae3_get_field(l234info, HNS3_RXD_L2_CSUM_H_M,
+ HNS3_RXD_L2_CSUM_H_S);
+ skb->csum = csum_unfold((__force __sum16)(lo | hi << 8));
+}
+
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
u32 l234info, u32 bd_base_info, u32 ol_info)
{
@@ -2761,6 +2871,11 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (!(netdev->features & NETIF_F_RXCSUM))
return;
+ if (l234info & BIT(HNS3_RXD_L2_CSUM_B)) {
+ hns3_checksum_complete(ring, skb, l234info);
+ return;
+ }
+
/* check if hardware has done checksum */
if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
return;
@@ -3333,14 +3448,14 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
return;
- if (rx_group->coal.gl_adapt_enable) {
+ if (rx_group->coal.adapt_enable) {
rx_update = hns3_get_new_int_gl(rx_group);
if (rx_update)
hns3_set_vector_coalesce_rx_gl(tqp_vector,
rx_group->coal.int_gl);
}
- if (tx_group->coal.gl_adapt_enable) {
+ if (tx_group->coal.adapt_enable) {
tx_update = hns3_get_new_int_gl(tx_group);
if (tx_update)
hns3_set_vector_coalesce_tx_gl(tqp_vector,
@@ -3536,7 +3651,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
- hns3_vector_gl_rl_init_hw(tqp_vector, priv);
+ hns3_vector_coalesce_init_hw(tqp_vector, priv);
tqp_vector->num_tqps = 0;
}
@@ -3594,8 +3709,6 @@ map_ring_fail:
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
{
-#define HNS3_VECTOR_PF_MAX_NUM 64
-
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_vector_info *vector;
@@ -3608,7 +3721,6 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
/* RSS size, cpu online and vector_num should be the same */
/* Should consider 2p/4p later */
vector_num = min_t(u16, num_online_cpus(), tqp_num);
- vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
GFP_KERNEL);
@@ -3632,7 +3744,7 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
tqp_vector->idx = i;
tqp_vector->mask_addr = vector[i].io_addr;
tqp_vector->vector_irq = vector[i].vector;
- hns3_vector_gl_rl_init(tqp_vector, priv);
+ hns3_vector_coalesce_init(tqp_vector, priv);
}
out:
@@ -4109,6 +4221,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
/* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
netdev->max_mtu = HNS3_MAX_MTU;
+ if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
+ set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
+
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
if (netif_msg_drv(handle))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 1c81dea0da1e..0a7b606e7c93 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -18,6 +18,7 @@ enum hns3_nic_state {
HNS3_NIC_STATE_SERVICE_INITED,
HNS3_NIC_STATE_SERVICE_SCHED,
HNS3_NIC_STATE2_RESET_REQUESTED,
+ HNS3_NIC_STATE_HW_TX_CSUM_ENABLE,
HNS3_NIC_STATE_MAX
};
@@ -82,6 +83,12 @@ enum hns3_nic_state {
#define HNS3_RXD_STRP_TAGP_S 13
#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S)
+#define HNS3_RXD_L2_CSUM_B 15
+#define HNS3_RXD_L2_CSUM_L_S 4
+#define HNS3_RXD_L2_CSUM_L_M (0xff << HNS3_RXD_L2_CSUM_L_S)
+#define HNS3_RXD_L2_CSUM_H_S 24
+#define HNS3_RXD_L2_CSUM_H_M (0xff << HNS3_RXD_L2_CSUM_H_S)
+
#define HNS3_RXD_L2E_B 16
#define HNS3_RXD_L3E_B 17
#define HNS3_RXD_L4E_B 18
@@ -139,6 +146,9 @@ enum hns3_nic_state {
#define HNS3_TXD_L4LEN_S 24
#define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S)
+#define HNS3_TXD_CSUM_START_S 8
+#define HNS3_TXD_CSUM_START_M (0xffff << HNS3_TXD_CSUM_START_S)
+
#define HNS3_TXD_OL3T_S 0
#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S)
#define HNS3_TXD_OVLAN_B 2
@@ -146,6 +156,9 @@ enum hns3_nic_state {
#define HNS3_TXD_TUNTYPE_S 4
#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S)
+#define HNS3_TXD_CSUM_OFFSET_S 8
+#define HNS3_TXD_CSUM_OFFSET_M (0xffff << HNS3_TXD_CSUM_OFFSET_S)
+
#define HNS3_TXD_BDTYPE_S 0
#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S)
#define HNS3_TXD_FE_B 4
@@ -159,8 +172,11 @@ enum hns3_nic_state {
#define HNS3_TXD_DECTTL_S 12
#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S)
+#define HNS3_TXD_OL4CS_B 22
+
#define HNS3_TXD_MSS_S 0
#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
+#define HNS3_TXD_HW_CS_B 14
#define HNS3_VECTOR_TX_IRQ BIT_ULL(0)
#define HNS3_VECTOR_RX_IRQ BIT_ULL(1)
@@ -181,6 +197,8 @@ enum hns3_nic_state {
#define HNS3_VECTOR_GL2_OFFSET 0x300
#define HNS3_VECTOR_RL_OFFSET 0x900
#define HNS3_VECTOR_RL_EN_B 6
+#define HNS3_VECTOR_TX_QL_OFFSET 0xe00
+#define HNS3_VECTOR_RX_QL_OFFSET 0xf00
#define HNS3_RING_EN_B 0
@@ -248,9 +266,9 @@ struct __packed hns3_desc {
};
};
- __le32 paylen;
+ __le32 paylen_ol4cs;
__le16 bdtp_fe_sc_vld_ra_ri;
- __le16 mss;
+ __le16 mss_hw_csum;
} tx;
struct {
@@ -369,6 +387,7 @@ struct ring_stats {
u64 err_bd_num;
u64 l2_err;
u64 l3l4_csum_err;
+ u64 csum_complete;
u64 rx_multicast;
u64 non_reuse_pg;
};
@@ -418,18 +437,25 @@ enum hns3_flow_level_range {
HNS3_FLOW_ULTRA = 3,
};
-#define HNS3_INT_GL_MAX 0x1FE0
#define HNS3_INT_GL_50K 0x0014
#define HNS3_INT_GL_20K 0x0032
#define HNS3_INT_GL_18K 0x0036
#define HNS3_INT_GL_8K 0x007C
+#define HNS3_INT_GL_1US BIT(31)
+
#define HNS3_INT_RL_MAX 0x00EC
#define HNS3_INT_RL_ENABLE_MASK 0x40
+#define HNS3_INT_QL_DEFAULT_CFG 0x20
+
struct hns3_enet_coalesce {
u16 int_gl;
- u8 gl_adapt_enable;
+ u16 int_ql;
+ u16 int_ql_max;
+ u8 adapt_enable:1;
+ u8 ql_enable:1;
+ u8 unit_1us:1;
enum hns3_flow_level_range flow_level;
};
@@ -595,6 +621,10 @@ void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
u32 gl_value);
void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
u32 rl_value);
+void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 ql_value);
+void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 ql_value);
void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 6b07b2771172..3cca3c125c03 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -55,6 +55,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
HNS3_TQP_STAT("err_bd_num", err_bd_num),
HNS3_TQP_STAT("l2_err", l2_err),
HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
+ HNS3_TQP_STAT("csum_complete", csum_complete),
HNS3_TQP_STAT("multicast", rx_multicast),
HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg),
};
@@ -1105,9 +1106,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
rx_vector = priv->ring[queue_num + queue].tqp_vector;
cmd->use_adaptive_tx_coalesce =
- tx_vector->tx_group.coal.gl_adapt_enable;
+ tx_vector->tx_group.coal.adapt_enable;
cmd->use_adaptive_rx_coalesce =
- rx_vector->rx_group.coal.gl_adapt_enable;
+ rx_vector->rx_group.coal.adapt_enable;
cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl;
cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl;
@@ -1115,6 +1116,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+ cmd->tx_max_coalesced_frames = tx_vector->tx_group.coal.int_ql;
+ cmd->rx_max_coalesced_frames = rx_vector->rx_group.coal.int_ql;
+
return 0;
}
@@ -1127,22 +1131,30 @@ static int hns3_get_coalesce(struct net_device *netdev,
static int hns3_check_gl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
u32 rx_gl, tx_gl;
- if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ if (cmd->rx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) {
netdev_err(netdev,
- "Invalid rx-usecs value, rx-usecs range is 0-%d\n",
- HNS3_INT_GL_MAX);
+ "invalid rx-usecs value, rx-usecs range is 0-%u\n",
+ ae_dev->dev_specs.max_int_gl);
return -EINVAL;
}
- if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ if (cmd->tx_coalesce_usecs > ae_dev->dev_specs.max_int_gl) {
netdev_err(netdev,
- "Invalid tx-usecs value, tx-usecs range is 0-%d\n",
- HNS3_INT_GL_MAX);
+ "invalid tx-usecs value, tx-usecs range is 0-%u\n",
+ ae_dev->dev_specs.max_int_gl);
return -EINVAL;
}
+ /* device version above V3(include V3), GL uses 1us unit,
+ * so the round down is not needed.
+ */
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ return 0;
+
rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
if (rx_gl != cmd->rx_coalesce_usecs) {
netdev_info(netdev,
@@ -1188,6 +1200,29 @@ static int hns3_check_rl_coalesce_para(struct net_device *netdev,
return 0;
}
+static int hns3_check_ql_coalesce_param(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+
+ if ((cmd->tx_max_coalesced_frames || cmd->rx_max_coalesced_frames) &&
+ !ae_dev->dev_specs.int_ql_max) {
+ netdev_err(netdev, "coalesced frames is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (cmd->tx_max_coalesced_frames > ae_dev->dev_specs.int_ql_max ||
+ cmd->rx_max_coalesced_frames > ae_dev->dev_specs.int_ql_max) {
+ netdev_err(netdev,
+ "invalid coalesced_frames value, range is 0-%u\n",
+ ae_dev->dev_specs.int_ql_max);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
static int hns3_check_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
@@ -1207,6 +1242,10 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
return ret;
}
+ ret = hns3_check_ql_coalesce_param(netdev, cmd);
+ if (ret)
+ return ret;
+
if (cmd->use_adaptive_tx_coalesce == 1 ||
cmd->use_adaptive_rx_coalesce == 1) {
netdev_info(netdev,
@@ -1230,14 +1269,17 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev,
tx_vector = priv->ring[queue].tqp_vector;
rx_vector = priv->ring[queue_num + queue].tqp_vector;
- tx_vector->tx_group.coal.gl_adapt_enable =
+ tx_vector->tx_group.coal.adapt_enable =
cmd->use_adaptive_tx_coalesce;
- rx_vector->rx_group.coal.gl_adapt_enable =
+ rx_vector->rx_group.coal.adapt_enable =
cmd->use_adaptive_rx_coalesce;
tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs;
rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs;
+ tx_vector->tx_group.coal.int_ql = cmd->tx_max_coalesced_frames;
+ rx_vector->rx_group.coal.int_ql = cmd->rx_max_coalesced_frames;
+
hns3_set_vector_coalesce_tx_gl(tx_vector,
tx_vector->tx_group.coal.int_gl);
hns3_set_vector_coalesce_rx_gl(rx_vector,
@@ -1245,6 +1287,13 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev,
hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting);
hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting);
+
+ if (tx_vector->tx_group.coal.ql_enable)
+ hns3_set_vector_coalesce_tx_ql(tx_vector,
+ tx_vector->tx_group.coal.int_ql);
+ if (rx_vector->rx_group.coal.ql_enable)
+ hns3_set_vector_coalesce_rx_ql(rx_vector,
+ rx_vector->rx_group.coal.int_ql);
}
static int hns3_set_coalesce(struct net_device *netdev,
@@ -1471,7 +1520,8 @@ static int hns3_get_module_eeprom(struct net_device *netdev,
#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
ETHTOOL_COALESCE_USE_ADAPTIVE | \
ETHTOOL_COALESCE_RX_USECS_HIGH | \
- ETHTOOL_COALESCE_TX_USECS_HIGH)
+ ETHTOOL_COALESCE_TX_USECS_HIGH | \
+ ETHTOOL_COALESCE_MAX_FRAMES)
static const struct ethtool_ops hns3vf_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index e6321dda0f3f..85986c7d71fa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -355,6 +355,10 @@ static void hclge_parse_capability(struct hclge_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B))
set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_HW_TX_CSUM_B))
+ set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_UDP_TUNNEL_CSUM_B))
+ set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
}
static enum hclge_cmd_status
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 096e26a2e16b..49cbd954f76b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -307,6 +307,9 @@ enum hclge_opcode_type {
#define HCLGE_TQP_REG_OFFSET 0x80000
#define HCLGE_TQP_REG_SIZE 0x200
+#define HCLGE_TQP_MAX_SIZE_DEV_V2 1024
+#define HCLGE_TQP_EXT_REG_OFFSET 0x100
+
#define HCLGE_RCB_INIT_QUERY_TIMEOUT 10
#define HCLGE_RCB_INIT_FLAG_EN_B 0
#define HCLGE_RCB_INIT_FLAG_FINI_B 8
@@ -336,7 +339,9 @@ enum hclge_int_type {
};
struct hclge_ctrl_vector_chain_cmd {
- u8 int_vector_id;
+#define HCLGE_VECTOR_ID_L_S 0
+#define HCLGE_VECTOR_ID_L_M GENMASK(7, 0)
+ u8 int_vector_id_l;
u8 int_cause_num;
#define HCLGE_INT_TYPE_S 0
#define HCLGE_INT_TYPE_M GENMASK(1, 0)
@@ -346,7 +351,9 @@ struct hclge_ctrl_vector_chain_cmd {
#define HCLGE_INT_GL_IDX_M GENMASK(14, 13)
__le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD];
u8 vfid;
- u8 rsv;
+#define HCLGE_VECTOR_ID_H_S 8
+#define HCLGE_VECTOR_ID_H_M GENMASK(15, 8)
+ u8 int_vector_id_h;
};
#define HCLGE_MAX_TC_NUM 8
@@ -369,12 +376,13 @@ enum HCLGE_CAP_BITS {
HCLGE_CAP_FD_FORWARD_TC_B,
HCLGE_CAP_PTP_B,
HCLGE_CAP_INT_QL_B,
- HCLGE_CAP_SIMPLE_BD_B,
+ HCLGE_CAP_HW_TX_CSUM_B,
HCLGE_CAP_TX_PUSH_B,
HCLGE_CAP_PHY_IMP_B,
HCLGE_CAP_TQP_TXRX_INDEP_B,
HCLGE_CAP_HW_PAD_B,
HCLGE_CAP_STASH_B,
+ HCLGE_CAP_UDP_TUNNEL_CSUM_B,
};
#define HCLGE_QUERY_CAP_LENGTH 3
@@ -470,16 +478,13 @@ struct hclge_pf_res_cmd {
__le16 tqp_num;
__le16 buf_size;
__le16 msixcap_localid_ba_nic;
- __le16 msixcap_localid_ba_rocee;
-#define HCLGE_MSIX_OFT_ROCEE_S 0
-#define HCLGE_MSIX_OFT_ROCEE_M GENMASK(15, 0)
-#define HCLGE_PF_VEC_NUM_S 0
-#define HCLGE_PF_VEC_NUM_M GENMASK(7, 0)
- __le16 pf_intr_vector_number;
+ __le16 msixcap_localid_number_nic;
+ __le16 pf_intr_vector_number_roce;
__le16 pf_own_fun_number;
__le16 tx_buf_size;
__le16 dv_buf_size;
- __le32 rsv[2];
+ __le16 ext_tqp_num;
+ u8 rsv[6];
};
#define HCLGE_CFG_OFFSET_S 0
@@ -643,7 +648,6 @@ struct hclge_config_mac_speed_dup_cmd {
u8 rsv[22];
};
-#define HCLGE_RING_ID_MASK GENMASK(9, 0)
#define HCLGE_TQP_ENABLE_B 0
#define HCLGE_MAC_CFG_AN_EN_B 0
@@ -1103,6 +1107,14 @@ struct hclge_dev_specs_0_cmd {
__le32 max_tm_rate;
};
+#define HCLGE_DEF_MAX_INT_GL 0x1FE0U
+
+struct hclge_dev_specs_1_cmd {
+ __le32 rsv0;
+ __le16 max_int_gl;
+ u8 rsv1[18];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 3606240025a8..f990f6915226 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -4,7 +4,6 @@
#include "hclge_main.h"
#include "hclge_dcb.h"
#include "hclge_tm.h"
-#include "hclge_dcb.h"
#include "hnae3.h"
#define BW_PERCENT 100
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 16df050e72cf..bedbc118c4a3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -498,6 +498,9 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
+ dev_info(&hdev->pdev->dev, "PG_P flag: %#x\n", pg_shap_cfg_cmd->flag);
+ dev_info(&hdev->pdev->dev, "PG_P pg_rate: %u(Mbps)\n",
+ le32_to_cpu(pg_shap_cfg_cmd->pg_rate));
cmd = HCLGE_OPC_TM_PORT_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -508,6 +511,9 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
le32_to_cpu(port_shap_cfg_cmd->port_shapping_para));
+ dev_info(&hdev->pdev->dev, "PORT flag: %#x\n", port_shap_cfg_cmd->flag);
+ dev_info(&hdev->pdev->dev, "PORT port_rate: %u(Mbps)\n",
+ le32_to_cpu(port_shap_cfg_cmd->port_rate));
cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -655,6 +661,9 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
+ dev_info(&hdev->pdev->dev, "PRI_C flag: %#x\n", shap_cfg_cmd->flag);
+ dev_info(&hdev->pdev->dev, "PRI_C pri_rate: %u(Mbps)\n",
+ le32_to_cpu(shap_cfg_cmd->pri_rate));
cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
hclge_cmd_setup_basic_desc(&desc, cmd, true);
@@ -666,6 +675,9 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
+ dev_info(&hdev->pdev->dev, "PRI_P flag: %#x\n", shap_cfg_cmd->flag);
+ dev_info(&hdev->pdev->dev, "PRI_P pri_rate: %u(Mbps)\n",
+ le32_to_cpu(shap_cfg_cmd->pri_rate));
hclge_dbg_dump_tm_pg(hdev);
@@ -681,14 +693,17 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
{
struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
+ u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
struct hclge_qs_to_pri_link_cmd *map;
struct hclge_tqp_tx_queue_tc_cmd *tc;
enum hclge_opcode_type cmd;
struct hclge_desc desc;
int queue_id, group_id;
- u32 qset_mapping[32];
int tc_id, qset_id;
int pri_id, ret;
+ u16 qs_id_l;
+ u16 qs_id_h;
+ u8 grp_num;
u32 i;
ret = kstrtouint(cmd_buf, 0, &queue_id);
@@ -701,7 +716,24 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
goto err_tm_map_cmd_send;
- qset_id = le16_to_cpu(nq_to_qs_map->qset_id) & 0x3FF;
+ qset_id = le16_to_cpu(nq_to_qs_map->qset_id);
+
+ /* convert qset_id to the following format, drop the vld bit
+ * | qs_id_h | vld | qs_id_l |
+ * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
+ * \ \ / /
+ * \ \ / /
+ * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 |
+ */
+ qs_id_l = hnae3_get_field(qset_id, HCLGE_TM_QS_ID_L_MSK,
+ HCLGE_TM_QS_ID_L_S);
+ qs_id_h = hnae3_get_field(qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
+ HCLGE_TM_QS_ID_H_EXT_S);
+ qset_id = 0;
+ hnae3_set_field(qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
+ qs_id_l);
+ hnae3_set_field(qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
+ qs_id_h);
cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
@@ -731,9 +763,11 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
return;
}
+ grp_num = hdev->num_tqps <= HCLGE_TQP_MAX_SIZE_DEV_V2 ?
+ HCLGE_BP_GRP_NUM : HCLGE_BP_EXT_GRP_NUM;
cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
- for (group_id = 0; group_id < 32; group_id++) {
+ for (group_id = 0; group_id < grp_num; group_id++) {
hclge_cmd_setup_basic_desc(&desc, cmd, true);
bp_to_qs_map_cmd->tc_id = tc_id;
bp_to_qs_map_cmd->qs_group_id = group_id;
@@ -748,7 +782,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");
i = 0;
- for (group_id = 0; group_id < 4; group_id++) {
+ for (group_id = 0; group_id < grp_num / 8; group_id++) {
dev_info(&hdev->pdev->dev,
"%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
group_id * 256, qset_mapping[(u32)(i + 7)],
@@ -1379,6 +1413,7 @@ static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
u8 ir_u, ir_b, ir_s, bs_b, bs_s;
struct hclge_desc desc;
u32 shapping_para;
+ u32 rate;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
@@ -1400,10 +1435,11 @@ static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
ir_s = hclge_tm_get_field(shapping_para, IR_S);
bs_b = hclge_tm_get_field(shapping_para, BS_B);
bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ rate = le32_to_cpu(shap_cfg_cmd->qs_rate);
dev_info(&hdev->pdev->dev,
- "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n",
- qsid, ir_b, ir_u, ir_s, bs_b, bs_s);
+ "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u, flag:%#x, rate:%u(Mbps)\n",
+ qsid, ir_b, ir_u, ir_s, bs_b, bs_s, shap_cfg_cmd->flag, rate);
}
static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 1f026408ad38..ca668a47121e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -556,7 +556,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
true);
- desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
+ desc[0].data[0] = cpu_to_le32(tqp->index);
ret = hclge_cmd_send(&hdev->hw, desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -576,7 +576,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
HCLGE_OPC_QUERY_TX_STATS,
true);
- desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
+ desc[0].data[0] = cpu_to_le32(tqp->index);
ret = hclge_cmd_send(&hdev->hw, desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -886,7 +886,8 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
}
req = (struct hclge_pf_res_cmd *)desc.data;
- hdev->num_tqps = le16_to_cpu(req->tqp_num);
+ hdev->num_tqps = le16_to_cpu(req->tqp_num) +
+ le16_to_cpu(req->ext_tqp_num);
hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (req->tx_buf_size)
@@ -905,35 +906,24 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
+ hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
+ if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "only %u msi resources available, not enough for pf(min:2).\n",
+ hdev->num_nic_msi);
+ return -EINVAL;
+ }
+
if (hnae3_dev_roce_supported(hdev)) {
- hdev->roce_base_msix_offset =
- hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
- HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
hdev->num_roce_msi =
- hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
- HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
-
- /* nic's msix numbers is always equals to the roce's. */
- hdev->num_nic_msi = hdev->num_roce_msi;
+ le16_to_cpu(req->pf_intr_vector_number_roce);
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
- hdev->num_msi = hdev->num_roce_msi +
- hdev->roce_base_msix_offset;
+ hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
} else {
- hdev->num_msi =
- hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
- HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
-
- hdev->num_nic_msi = hdev->num_msi;
- }
-
- if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
- dev_err(&hdev->pdev->dev,
- "Just %u msi resources, not enough for pf(min:2).\n",
- hdev->num_nic_msi);
- return -EINVAL;
+ hdev->num_msi = hdev->num_nic_msi;
}
return 0;
@@ -1366,6 +1356,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
+ ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
@@ -1373,14 +1364,18 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_dev_specs_0_cmd *req0;
+ struct hclge_dev_specs_1_cmd *req1;
req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
+ req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
ae_dev->dev_specs.rss_ind_tbl_size =
le16_to_cpu(req0->rss_ind_tbl_size);
+ ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
+ ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -1395,6 +1390,8 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
if (!dev_specs->max_tm_rate)
dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
+ if (!dev_specs->max_int_gl)
+ dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
}
static int hclge_query_dev_specs(struct hclge_dev *hdev)
@@ -1591,8 +1588,20 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev)
tqp->q.buf_size = hdev->rx_buf_len;
tqp->q.tx_desc_num = hdev->num_tx_desc;
tqp->q.rx_desc_num = hdev->num_rx_desc;
- tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
- i * HCLGE_TQP_REG_SIZE;
+
+ /* need an extended offset to configure queues >=
+ * HCLGE_TQP_MAX_SIZE_DEV_V2
+ */
+ if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
+ tqp->q.io_base = hdev->hw.io_base +
+ HCLGE_TQP_REG_OFFSET +
+ i * HCLGE_TQP_REG_SIZE;
+ else
+ tqp->q.io_base = hdev->hw.io_base +
+ HCLGE_TQP_REG_OFFSET +
+ HCLGE_TQP_EXT_REG_OFFSET +
+ (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
+ HCLGE_TQP_REG_SIZE;
tqp++;
}
@@ -2405,17 +2414,18 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
{
struct hnae3_handle *roce = &vport->roce;
struct hnae3_handle *nic = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
roce->rinfo.num_vectors = vport->back->num_roce_msi;
- if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
- vport->back->num_msi_left == 0)
+ if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
return -EINVAL;
- roce->rinfo.base_vector = vport->back->roce_base_vector;
+ roce->rinfo.base_vector = hdev->roce_base_vector;
roce->rinfo.netdev = nic->kinfo.netdev;
- roce->rinfo.roce_io_base = vport->back->hw.io_base;
+ roce->rinfo.roce_io_base = hdev->hw.io_base;
+ roce->rinfo.roce_mem_base = hdev->hw.mem_base;
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
@@ -2449,7 +2459,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)
hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = hdev->base_msi_vector +
- hdev->roce_base_msix_offset;
+ hdev->num_nic_msi;
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
@@ -4122,6 +4132,30 @@ struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
return container_of(handle, struct hclge_vport, nic);
}
+static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
+ struct hnae3_vector_info *vector_info)
+{
+#define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
+
+ vector_info->vector = pci_irq_vector(hdev->pdev, idx);
+
+ /* need an extend offset to config vector >= 64 */
+ if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
+ vector_info->io_addr = hdev->hw.io_base +
+ HCLGE_VECTOR_REG_BASE +
+ (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
+ else
+ vector_info->io_addr = hdev->hw.io_base +
+ HCLGE_VECTOR_EXT_REG_BASE +
+ (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
+ HCLGE_VECTOR_REG_OFFSET_H +
+ (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
+ HCLGE_VECTOR_REG_OFFSET;
+
+ hdev->vector_status[idx] = hdev->vport[0].vport_id;
+ hdev->vector_irq[idx] = vector_info->vector;
+}
+
static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
struct hnae3_vector_info *vector_info)
{
@@ -4129,23 +4163,16 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
struct hnae3_vector_info *vector = vector_info;
struct hclge_dev *hdev = vport->back;
int alloc = 0;
- int i, j;
+ u16 i = 0;
+ u16 j;
vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
vector_num = min(hdev->num_msi_left, vector_num);
for (j = 0; j < vector_num; j++) {
- for (i = 1; i < hdev->num_msi; i++) {
+ while (++i < hdev->num_nic_msi) {
if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
- vector->vector = pci_irq_vector(hdev->pdev, i);
- vector->io_addr = hdev->hw.io_base +
- HCLGE_VECTOR_REG_BASE +
- (i - 1) * HCLGE_VECTOR_REG_OFFSET +
- vport->vport_id *
- HCLGE_VECTOR_VF_OFFSET;
- hdev->vector_status[i] = vport->vport_id;
- hdev->vector_irq[i] = vector->vector;
-
+ hclge_get_vector_info(hdev, i, vector);
vector++;
alloc++;
@@ -4694,7 +4721,12 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
hclge_cmd_setup_basic_desc(&desc, op, false);
- req->int_vector_id = vector_id;
+ req->int_vector_id_l = hnae3_get_field(vector_id,
+ HCLGE_VECTOR_ID_L_M,
+ HCLGE_VECTOR_ID_L_S);
+ req->int_vector_id_h = hnae3_get_field(vector_id,
+ HCLGE_VECTOR_ID_H_M,
+ HCLGE_VECTOR_ID_H_S);
i = 0;
for (node = ring_chain; node; node = node->next) {
@@ -4726,7 +4758,14 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
hclge_cmd_setup_basic_desc(&desc,
op,
false);
- req->int_vector_id = vector_id;
+ req->int_vector_id_l =
+ hnae3_get_field(vector_id,
+ HCLGE_VECTOR_ID_L_M,
+ HCLGE_VECTOR_ID_L_S);
+ req->int_vector_id_h =
+ hnae3_get_field(vector_id,
+ HCLGE_VECTOR_ID_H_M,
+ HCLGE_VECTOR_ID_H_S);
}
}
@@ -6845,7 +6884,7 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
- req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
+ req->tqp_id = cpu_to_le16(tqp_id);
req->stream_id = cpu_to_le16(stream_id);
if (enable)
req->enable |= 1U << HCLGE_TQP_ENABLE_B;
@@ -9307,7 +9346,7 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
- req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
+ req->tqp_id = cpu_to_le16(queue_id);
if (enable)
hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
@@ -9330,7 +9369,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
- req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
+ req->tqp_id = cpu_to_le16(queue_id);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -9870,6 +9909,28 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
}
}
+static int hclge_dev_mem_map(struct hclge_dev *hdev)
+{
+#define HCLGE_MEM_BAR 4
+
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclge_hw *hw = &hdev->hw;
+
+ /* for device does not have device memory, return directly */
+ if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
+ return 0;
+
+ hw->mem_base = devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, HCLGE_MEM_BAR),
+ pci_resource_len(pdev, HCLGE_MEM_BAR));
+ if (!hw->mem_base) {
+ dev_err(&pdev->dev, "failed to map device memory\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static int hclge_pci_init(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -9908,9 +9969,16 @@ static int hclge_pci_init(struct hclge_dev *hdev)
goto err_clr_master;
}
+ ret = hclge_dev_mem_map(hdev);
+ if (ret)
+ goto err_unmap_io_base;
+
hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
return 0;
+
+err_unmap_io_base:
+ pcim_iounmap(pdev, hdev->hw.io_base);
err_clr_master:
pci_clear_master(pdev);
pci_release_regions(pdev);
@@ -9924,6 +9992,9 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
+ if (hdev->hw.mem_base)
+ devm_iounmap(&pdev->dev, hdev->hw.mem_base);
+
pcim_iounmap(pdev, hdev->hw.io_base);
pci_free_irq_vectors(pdev);
pci_clear_master(pdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 64e6afdb61b8..bd17685e4065 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -27,9 +27,11 @@
(HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
#define HCLGE_VECTOR_REG_BASE 0x20000
+#define HCLGE_VECTOR_EXT_REG_BASE 0x30000
#define HCLGE_MISC_VECTOR_REG_BASE 0x20400
#define HCLGE_VECTOR_REG_OFFSET 0x4
+#define HCLGE_VECTOR_REG_OFFSET_H 0x1000
#define HCLGE_VECTOR_VF_OFFSET 0x100000
#define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000
@@ -278,6 +280,7 @@ struct hclge_mac {
struct hclge_hw {
void __iomem *io_base;
+ void __iomem *mem_base;
struct hclge_mac mac;
int num_vec;
struct hclge_cmq cmq;
@@ -767,7 +770,6 @@ struct hclge_dev {
u16 num_msi;
u16 num_msi_left;
u16 num_msi_used;
- u16 roce_base_msix_offset;
u32 base_msi_vector;
u16 *vector_status;
int *vector_irq;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index e8495f58a1a8..b1026cd1ba0a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -302,12 +302,30 @@ static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
{
struct hclge_nq_to_qs_link_cmd *map;
struct hclge_desc desc;
+ u16 qs_id_l;
+ u16 qs_id_h;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
map->nq_id = cpu_to_le16(q_id);
+
+ /* convert qs_id to the following format to support qset_id >= 1024
+ * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 |
+ * / / \ \
+ * / / \ \
+ * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
+ * | qs_id_h | vld | qs_id_l |
+ */
+ qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK,
+ HCLGE_TM_QS_ID_L_S);
+ qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK,
+ HCLGE_TM_QS_ID_H_S);
+ hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
+ qs_id_l);
+ hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S,
+ qs_id_h);
map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
return hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -377,7 +395,7 @@ static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
enum hclge_shap_bucket bucket, u8 pg_id,
- u32 shapping_para)
+ u32 shapping_para, u32 rate)
{
struct hclge_pg_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode;
@@ -393,6 +411,10 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
+ hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
+
+ shap_cfg_cmd->pg_rate = cpu_to_le32(rate);
+
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -420,12 +442,16 @@ static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
+ hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
+
+ shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed);
+
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
enum hclge_shap_bucket bucket, u8 pri_id,
- u32 shapping_para)
+ u32 shapping_para, u32 rate)
{
struct hclge_pri_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode;
@@ -442,6 +468,10 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
+ hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
+
+ shap_cfg_cmd->pri_rate = cpu_to_le32(rate);
+
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -543,6 +573,9 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
+ hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
+ shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate);
+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -682,7 +715,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
}
}
-static void hclge_pfc_info_init(struct hclge_dev *hdev)
+static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
{
if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
@@ -700,6 +733,27 @@ static void hclge_pfc_info_init(struct hclge_dev *hdev)
}
}
+static void hclge_update_fc_mode(struct hclge_dev *hdev)
+{
+ if (!hdev->tm_info.pfc_en) {
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+ return;
+ }
+
+ if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
+ hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
+ hdev->tm_info.fc_mode = HCLGE_FC_PFC;
+ }
+}
+
+static void hclge_pfc_info_init(struct hclge_dev *hdev)
+{
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ hclge_update_fc_mode(hdev);
+ else
+ hclge_update_fc_mode_by_dcb_flag(hdev);
+}
+
static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
{
hclge_tm_pg_info_init(hdev);
@@ -744,9 +798,10 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
/* Pg to pri */
for (i = 0; i < hdev->tm_info.num_pg; i++) {
+ u32 rate = hdev->tm_info.pg_info[i].bw_limit;
+
/* Calc shaper para */
- ret = hclge_shaper_para_calc(hdev->tm_info.pg_info[i].bw_limit,
- HCLGE_SHAPER_LVL_PG,
+ ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG,
&ir_para, max_tm_rate);
if (ret)
return ret;
@@ -756,7 +811,7 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev,
HCLGE_TM_SHAP_C_BUCKET, i,
- shaper_para);
+ shaper_para, rate);
if (ret)
return ret;
@@ -767,7 +822,7 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev,
HCLGE_TM_SHAP_P_BUCKET, i,
- shaper_para);
+ shaper_para, rate);
if (ret)
return ret;
}
@@ -873,8 +928,9 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
u32 i;
for (i = 0; i < hdev->tm_info.num_tc; i++) {
- ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
- HCLGE_SHAPER_LVL_PRI,
+ u32 rate = hdev->tm_info.tc_info[i].bw_limit;
+
+ ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
&ir_para, max_tm_rate);
if (ret)
return ret;
@@ -883,7 +939,7 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
- shaper_para);
+ shaper_para, rate);
if (ret)
return ret;
@@ -893,7 +949,7 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
- shaper_para);
+ shaper_para, rate);
if (ret)
return ret;
}
@@ -918,7 +974,8 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
- vport->vport_id, shaper_para);
+ vport->vport_id, shaper_para,
+ vport->bw_limit);
if (ret)
return ret;
@@ -927,7 +984,8 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
- vport->vport_id, shaper_para);
+ vport->vport_id, shaper_para,
+ vport->bw_limit);
if (ret)
return ret;
@@ -1296,15 +1354,23 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
hdev->tm_info.pfc_en);
}
-/* Each Tc has a 1024 queue sets to backpress, it divides to
- * 32 group, each group contains 32 queue sets, which can be
- * represented by u32 bitmap.
+/* for the queues that use for backpress, divides to several groups,
+ * each group contains 32 queue sets, which can be represented by u32 bitmap.
*/
static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
{
+ u16 grp_id_shift = HCLGE_BP_GRP_ID_S;
+ u16 grp_id_mask = HCLGE_BP_GRP_ID_M;
+ u8 grp_num = HCLGE_BP_GRP_NUM;
int i;
- for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
+ if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) {
+ grp_num = HCLGE_BP_EXT_GRP_NUM;
+ grp_id_mask = HCLGE_BP_EXT_GRP_ID_M;
+ grp_id_shift = HCLGE_BP_EXT_GRP_ID_S;
+ }
+
+ for (i = 0; i < grp_num; i++) {
u32 qs_bitmap = 0;
int k, ret;
@@ -1313,8 +1379,7 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
u16 qs_id = vport->qs_offset + tc;
u8 grp, sub_grp;
- grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
- HCLGE_BP_GRP_ID_S);
+ grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift);
sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
HCLGE_BP_SUB_GRP_ID_S);
if (i == grp)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index bb2a2d8e9259..5498d73ed34b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -39,6 +39,12 @@ struct hclge_nq_to_qs_link_cmd {
__le16 nq_id;
__le16 rsvd;
#define HCLGE_TM_Q_QS_LINK_VLD_MSK BIT(10)
+#define HCLGE_TM_QS_ID_L_MSK GENMASK(9, 0)
+#define HCLGE_TM_QS_ID_L_S 0
+#define HCLGE_TM_QS_ID_H_MSK GENMASK(14, 10)
+#define HCLGE_TM_QS_ID_H_S 10
+#define HCLGE_TM_QS_ID_H_EXT_S 11
+#define HCLGE_TM_QS_ID_H_EXT_MSK GENMASK(15, 11)
__le16 qset_id;
};
@@ -86,22 +92,34 @@ enum hclge_shap_bucket {
HCLGE_TM_SHAP_P_BUCKET,
};
+/* set bit HCLGE_TM_RATE_VLD to 1 means use 'rate' to config shaping */
+#define HCLGE_TM_RATE_VLD 0
+
struct hclge_pri_shapping_cmd {
u8 pri_id;
u8 rsvd[3];
__le32 pri_shapping_para;
+ u8 flag;
+ u8 rsvd1[3];
+ __le32 pri_rate;
};
struct hclge_pg_shapping_cmd {
u8 pg_id;
u8 rsvd[3];
__le32 pg_shapping_para;
+ u8 flag;
+ u8 rsvd1[3];
+ __le32 pg_rate;
};
struct hclge_qs_shapping_cmd {
__le16 qs_id;
u8 rsvd[2];
__le32 qs_shapping_para;
+ u8 flag;
+ u8 rsvd1[3];
+ __le32 qs_rate;
};
#define HCLGE_BP_GRP_NUM 32
@@ -109,6 +127,11 @@ struct hclge_qs_shapping_cmd {
#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0)
#define HCLGE_BP_GRP_ID_S 5
#define HCLGE_BP_GRP_ID_M GENMASK(9, 5)
+
+#define HCLGE_BP_EXT_GRP_NUM 40
+#define HCLGE_BP_EXT_GRP_ID_S 5
+#define HCLGE_BP_EXT_GRP_ID_M GENMASK(10, 5)
+
struct hclge_bp_to_qs_map_cmd {
u8 tc_id;
u8 rsvd[2];
@@ -139,6 +162,9 @@ struct hclge_pfc_stats_cmd {
struct hclge_port_shapping_cmd {
__le32 port_shapping_para;
+ u8 flag;
+ u8 rsvd[3];
+ __le32 port_rate;
};
struct hclge_shaper_ir_para {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 66866c1cfb12..e04c0cfeb95c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -336,6 +336,10 @@ static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGEVF_CAP_TQP_TXRX_INDEP_B))
set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGEVF_CAP_HW_TX_CSUM_B))
+ set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_TUNNEL_CSUM_B))
+ set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
}
static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 9460c128c095..82eed258e8c1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -111,6 +111,9 @@ enum hclgevf_opcode_type {
#define HCLGEVF_TQP_REG_OFFSET 0x80000
#define HCLGEVF_TQP_REG_SIZE 0x200
+#define HCLGEVF_TQP_MAX_SIZE_DEV_V2 1024
+#define HCLGEVF_TQP_EXT_REG_OFFSET 0x100
+
struct hclgevf_tqp_map {
__le16 tqp_id; /* Absolute tqp id for in this pf */
u8 tqp_vf; /* VF id */
@@ -149,12 +152,13 @@ enum HCLGEVF_CAP_BITS {
HCLGEVF_CAP_FD_FORWARD_TC_B,
HCLGEVF_CAP_PTP_B,
HCLGEVF_CAP_INT_QL_B,
- HCLGEVF_CAP_SIMPLE_BD_B,
+ HCLGEVF_CAP_HW_TX_CSUM_B,
HCLGEVF_CAP_TX_PUSH_B,
HCLGEVF_CAP_PHY_IMP_B,
HCLGEVF_CAP_TQP_TXRX_INDEP_B,
HCLGEVF_CAP_HW_PAD_B,
HCLGEVF_CAP_STASH_B,
+ HCLGEVF_CAP_UDP_TUNNEL_CSUM_B,
};
#define HCLGEVF_QUERY_CAP_LENGTH 3
@@ -285,6 +289,14 @@ struct hclgevf_dev_specs_0_cmd {
u8 rsv1[5];
};
+#define HCLGEVF_DEF_MAX_INT_GL 0x1FE0U
+
+struct hclgevf_dev_specs_1_cmd {
+ __le32 rsv0;
+ __le16 max_int_gl;
+ u8 rsv1[18];
+};
+
static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
{
writel(value, base + reg);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index c8e3fdd5999c..5b2f9a56f1d8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -403,8 +403,20 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
tqp->q.buf_size = hdev->rx_buf_len;
tqp->q.tx_desc_num = hdev->num_tx_desc;
tqp->q.rx_desc_num = hdev->num_rx_desc;
- tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
- i * HCLGEVF_TQP_REG_SIZE;
+
+ /* need an extended offset to configure queues >=
+ * HCLGEVF_TQP_MAX_SIZE_DEV_V2.
+ */
+ if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2)
+ tqp->q.io_base = hdev->hw.io_base +
+ HCLGEVF_TQP_REG_OFFSET +
+ i * HCLGEVF_TQP_REG_SIZE;
+ else
+ tqp->q.io_base = hdev->hw.io_base +
+ HCLGEVF_TQP_REG_OFFSET +
+ HCLGEVF_TQP_EXT_REG_OFFSET +
+ (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
+ HCLGEVF_TQP_REG_SIZE;
tqp++;
}
@@ -2430,6 +2442,7 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
roce->rinfo.netdev = nic->kinfo.netdev;
roce->rinfo.roce_io_base = hdev->hw.io_base;
+ roce->rinfo.roce_mem_base = hdev->hw.mem_base;
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
@@ -2875,6 +2888,29 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client,
}
}
+static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_MEM_BAR 4
+
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclgevf_hw *hw = &hdev->hw;
+
+ /* for device does not have device memory, return directly */
+ if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR)))
+ return 0;
+
+ hw->mem_base = devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev,
+ HCLGEVF_MEM_BAR),
+ pci_resource_len(pdev, HCLGEVF_MEM_BAR));
+ if (!hw->mem_base) {
+ dev_err(&pdev->dev, "failed to map device memory\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static int hclgevf_pci_init(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -2909,8 +2945,14 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
goto err_clr_master;
}
+ ret = hclgevf_dev_mem_map(hdev);
+ if (ret)
+ goto err_unmap_io_base;
+
return 0;
+err_unmap_io_base:
+ pci_iounmap(pdev, hdev->hw.io_base);
err_clr_master:
pci_clear_master(pdev);
pci_release_regions(pdev);
@@ -2924,6 +2966,9 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
+ if (hdev->hw.mem_base)
+ devm_iounmap(&pdev->dev, hdev->hw.mem_base);
+
pci_iounmap(pdev, hdev->hw.io_base);
pci_clear_master(pdev);
pci_release_regions(pdev);
@@ -2991,6 +3036,7 @@ static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
HCLGEVF_MAX_NON_TSO_BD_NUM;
ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+ ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
}
static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
@@ -2998,13 +3044,17 @@ static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclgevf_dev_specs_0_cmd *req0;
+ struct hclgevf_dev_specs_1_cmd *req1;
req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
+ req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data;
ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
ae_dev->dev_specs.rss_ind_tbl_size =
le16_to_cpu(req0->rss_ind_tbl_size);
+ ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
+ ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
}
static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
@@ -3017,6 +3067,8 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
if (!dev_specs->rss_key_size)
dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+ if (!dev_specs->max_int_gl)
+ dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
}
static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index c5bcc3894fd5..1b183bc35604 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -164,6 +164,7 @@ struct hclgevf_mac {
struct hclgevf_hw {
void __iomem *io_base;
+ void __iomem *mem_base;
int num_vec;
struct hclgevf_cmq cmq;
struct hclgevf_mac mac;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index 2630d667f393..58d5646444b0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -285,18 +285,8 @@ static int hinic_devlink_flash_update(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct hinic_devlink_priv *priv = devlink_priv(devlink);
- const struct firmware *fw;
- int err;
-
- err = request_firmware_direct(&fw, params->file_name,
- &priv->hwdev->hwif->pdev->dev);
- if (err)
- return err;
-
- err = hinic_firmware_update(priv, fw, extack);
- release_firmware(fw);
- return err;
+ return hinic_firmware_update(priv, params->fw, extack);
}
static const struct devlink_ops hinic_devlink_ops = {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index 9c3cbe45c9ec..c9ae3d4dc547 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -8,6 +8,7 @@
#define HINIC_PORT_H
#include <linux/types.h>
+#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/bitops.h>
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index da9450f18717..cf55c5ea07cb 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -84,8 +84,6 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
-static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
- union sub_crq *sub_crq);
static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
static int enable_scrq_irq(struct ibmvnic_adapter *,
@@ -306,9 +304,11 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
int count = pool->size - atomic_read(&pool->available);
u64 handle = adapter->rx_scrq[pool->index]->handle;
struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_ind_xmit_queue *ind_bufp;
+ struct ibmvnic_sub_crq_queue *rx_scrq;
+ union sub_crq *sub_crq;
int buffers_added = 0;
unsigned long lpar_rc;
- union sub_crq sub_crq;
struct sk_buff *skb;
unsigned int offset;
dma_addr_t dma_addr;
@@ -320,8 +320,10 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
if (!pool->active)
return;
+ rx_scrq = adapter->rx_scrq[pool->index];
+ ind_bufp = &rx_scrq->ind_buf;
for (i = 0; i < count; ++i) {
- skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
+ skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
if (!skb) {
dev_err(dev, "Couldn't replenish rx buff\n");
adapter->replenish_no_mem++;
@@ -346,12 +348,13 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
pool->rx_buff[index].pool_index = pool->index;
pool->rx_buff[index].size = pool->buff_size;
- memset(&sub_crq, 0, sizeof(sub_crq));
- sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
- sub_crq.rx_add.correlator =
+ sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
+ memset(sub_crq, 0, sizeof(*sub_crq));
+ sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
+ sub_crq->rx_add.correlator =
cpu_to_be64((u64)&pool->rx_buff[index]);
- sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
- sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
+ sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
+ sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
/* The length field of the sCRQ is defined to be 24 bits so the
* buffer size needs to be left shifted by a byte before it is
@@ -361,15 +364,20 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
#ifdef __LITTLE_ENDIAN__
shift = 8;
#endif
- sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
-
- lpar_rc = send_subcrq(adapter, handle, &sub_crq);
- if (lpar_rc != H_SUCCESS)
- goto failure;
-
- buffers_added++;
- adapter->replenish_add_buff_success++;
+ sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
pool->next_free = (pool->next_free + 1) % pool->size;
+ if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
+ i == count - 1) {
+ lpar_rc =
+ send_subcrq_indirect(adapter, handle,
+ (u64)ind_bufp->indir_dma,
+ (u64)ind_bufp->index);
+ if (lpar_rc != H_SUCCESS)
+ goto failure;
+ buffers_added += ind_bufp->index;
+ adapter->replenish_add_buff_success += ind_bufp->index;
+ ind_bufp->index = 0;
+ }
}
atomic_add(buffers_added, &pool->available);
return;
@@ -377,13 +385,20 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
failure:
if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
- pool->free_map[pool->next_free] = index;
- pool->rx_buff[index].skb = NULL;
-
- dev_kfree_skb_any(skb);
- adapter->replenish_add_buff_failure++;
- atomic_add(buffers_added, &pool->available);
+ for (i = ind_bufp->index - 1; i >= 0; --i) {
+ struct ibmvnic_rx_buff *rx_buff;
+ pool->next_free = pool->next_free == 0 ?
+ pool->size - 1 : pool->next_free - 1;
+ sub_crq = &ind_bufp->indir_arr[i];
+ rx_buff = (struct ibmvnic_rx_buff *)
+ be64_to_cpu(sub_crq->rx_add.correlator);
+ index = (int)(rx_buff - pool->rx_buff);
+ pool->free_map[pool->next_free] = index;
+ dev_kfree_skb_any(pool->rx_buff[index].skb);
+ pool->rx_buff[index].skb = NULL;
+ }
+ ind_bufp->index = 0;
if (lpar_rc == H_CLOSED || adapter->failover_pending) {
/* Disable buffer pool replenishment and report carrier off if
* queue is closed or pending failover.
@@ -483,7 +498,7 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
if (rx_pool->buff_size != buff_size) {
free_long_term_buff(adapter, &rx_pool->long_term_buff);
- rx_pool->buff_size = buff_size;
+ rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
rc = alloc_long_term_buff(adapter,
&rx_pool->long_term_buff,
rx_pool->size *
@@ -577,7 +592,7 @@ static int init_rx_pools(struct net_device *netdev)
rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
rx_pool->index = i;
- rx_pool->buff_size = buff_size;
+ rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
rx_pool->active = 1;
rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
@@ -730,6 +745,7 @@ static int init_tx_pools(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int tx_subcrqs;
+ u64 buff_size;
int i, rc;
tx_subcrqs = adapter->num_active_tx_scrqs;
@@ -746,9 +762,11 @@ static int init_tx_pools(struct net_device *netdev)
adapter->num_active_tx_pools = tx_subcrqs;
for (i = 0; i < tx_subcrqs; i++) {
+ buff_size = adapter->req_mtu + VLAN_HLEN;
+ buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
adapter->req_tx_entries_per_subcrq,
- adapter->req_mtu + VLAN_HLEN);
+ buff_size);
if (rc) {
release_tx_pools(adapter);
return rc;
@@ -1146,6 +1164,7 @@ static int __ibmvnic_open(struct net_device *netdev)
if (prev_state == VNIC_CLOSED)
enable_irq(adapter->tx_scrq[i]->irq);
enable_scrq_irq(adapter, adapter->tx_scrq[i]);
+ netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
}
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
@@ -1476,17 +1495,18 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
* L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
*/
-static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
+static void build_hdr_descs_arr(struct sk_buff *skb,
+ union sub_crq *indir_arr,
int *num_entries, u8 hdr_field)
{
int hdr_len[3] = {0, 0, 0};
+ u8 hdr_data[140] = {0};
int tot_len;
- u8 *hdr_data = txbuff->hdr_data;
- tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
- txbuff->hdr_data);
+ tot_len = build_hdr_data(hdr_field, skb, hdr_len,
+ hdr_data);
*num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
- txbuff->indir_arr + 1);
+ indir_arr + 1);
}
static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
@@ -1504,17 +1524,95 @@ static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
return 0;
}
+static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *tx_scrq)
+{
+ struct ibmvnic_ind_xmit_queue *ind_bufp;
+ struct ibmvnic_tx_buff *tx_buff;
+ struct ibmvnic_tx_pool *tx_pool;
+ union sub_crq tx_scrq_entry;
+ int queue_num;
+ int entries;
+ int index;
+ int i;
+
+ ind_bufp = &tx_scrq->ind_buf;
+ entries = (u64)ind_bufp->index;
+ queue_num = tx_scrq->pool_index;
+
+ for (i = entries - 1; i >= 0; --i) {
+ tx_scrq_entry = ind_bufp->indir_arr[i];
+ if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
+ continue;
+ index = be32_to_cpu(tx_scrq_entry.v1.correlator);
+ if (index & IBMVNIC_TSO_POOL_MASK) {
+ tx_pool = &adapter->tso_pool[queue_num];
+ index &= ~IBMVNIC_TSO_POOL_MASK;
+ } else {
+ tx_pool = &adapter->tx_pool[queue_num];
+ }
+ tx_pool->free_map[tx_pool->consumer_index] = index;
+ tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
+ tx_pool->num_buffers - 1 :
+ tx_pool->consumer_index - 1;
+ tx_buff = &tx_pool->tx_buff[index];
+ adapter->netdev->stats.tx_packets--;
+ adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
+ adapter->tx_stats_buffers[queue_num].packets--;
+ adapter->tx_stats_buffers[queue_num].bytes -=
+ tx_buff->skb->len;
+ dev_kfree_skb_any(tx_buff->skb);
+ tx_buff->skb = NULL;
+ adapter->netdev->stats.tx_dropped++;
+ }
+ ind_bufp->index = 0;
+ if (atomic_sub_return(entries, &tx_scrq->used) <=
+ (adapter->req_tx_entries_per_subcrq / 2) &&
+ __netif_subqueue_stopped(adapter->netdev, queue_num)) {
+ netif_wake_subqueue(adapter->netdev, queue_num);
+ netdev_dbg(adapter->netdev, "Started queue %d\n",
+ queue_num);
+ }
+}
+
+static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *tx_scrq)
+{
+ struct ibmvnic_ind_xmit_queue *ind_bufp;
+ u64 dma_addr;
+ u64 entries;
+ u64 handle;
+ int rc;
+
+ ind_bufp = &tx_scrq->ind_buf;
+ dma_addr = (u64)ind_bufp->indir_dma;
+ entries = (u64)ind_bufp->index;
+ handle = tx_scrq->handle;
+
+ if (!entries)
+ return 0;
+ rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
+ if (rc)
+ ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
+ else
+ ind_bufp->index = 0;
+ return 0;
+}
+
static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int queue_num = skb_get_queue_mapping(skb);
u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_ind_xmit_queue *ind_bufp;
struct ibmvnic_tx_buff *tx_buff = NULL;
struct ibmvnic_sub_crq_queue *tx_scrq;
struct ibmvnic_tx_pool *tx_pool;
unsigned int tx_send_failed = 0;
+ netdev_tx_t ret = NETDEV_TX_OK;
unsigned int tx_map_failed = 0;
+ union sub_crq indir_arr[16];
unsigned int tx_dropped = 0;
unsigned int tx_packets = 0;
unsigned int tx_bytes = 0;
@@ -1527,8 +1625,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned char *dst;
int index = 0;
u8 proto = 0;
- u64 handle;
- netdev_tx_t ret = NETDEV_TX_OK;
+
+ tx_scrq = adapter->tx_scrq[queue_num];
+ txq = netdev_get_tx_queue(netdev, queue_num);
+ ind_bufp = &tx_scrq->ind_buf;
if (test_bit(0, &adapter->resetting)) {
if (!netif_subqueue_stopped(netdev, skb))
@@ -1538,6 +1638,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_send_failed++;
tx_dropped++;
ret = NETDEV_TX_OK;
+ ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
@@ -1545,6 +1646,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_dropped++;
tx_send_failed++;
ret = NETDEV_TX_OK;
+ ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
if (skb_is_gso(skb))
@@ -1552,10 +1654,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
else
tx_pool = &adapter->tx_pool[queue_num];
- tx_scrq = adapter->tx_scrq[queue_num];
- txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
- handle = tx_scrq->handle;
-
index = tx_pool->free_map[tx_pool->consumer_index];
if (index == IBMVNIC_INVALID_MAP) {
@@ -1563,6 +1661,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_send_failed++;
tx_dropped++;
ret = NETDEV_TX_OK;
+ ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
@@ -1598,11 +1697,8 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_buff = &tx_pool->tx_buff[index];
tx_buff->skb = skb;
- tx_buff->data_dma[0] = data_dma_addr;
- tx_buff->data_len[0] = skb->len;
tx_buff->index = index;
tx_buff->pool_index = queue_num;
- tx_buff->last_frag = true;
memset(&tx_crq, 0, sizeof(tx_crq));
tx_crq.v1.first = IBMVNIC_CRQ_CMD;
@@ -1647,55 +1743,29 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
hdrs += 2;
}
- /* determine if l2/3/4 headers are sent to firmware */
- if ((*hdrs >> 7) & 1) {
- build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
- tx_crq.v1.n_crq_elem = num_entries;
- tx_buff->num_entries = num_entries;
- tx_buff->indir_arr[0] = tx_crq;
- tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
- sizeof(tx_buff->indir_arr),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, tx_buff->indir_dma)) {
- dev_kfree_skb_any(skb);
- tx_buff->skb = NULL;
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(dev, "tx: unable to map descriptor array\n");
- tx_map_failed++;
- tx_dropped++;
- ret = NETDEV_TX_OK;
- goto tx_err_out;
- }
- lpar_rc = send_subcrq_indirect(adapter, handle,
- (u64)tx_buff->indir_dma,
- (u64)num_entries);
- dma_unmap_single(dev, tx_buff->indir_dma,
- sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
- } else {
- tx_buff->num_entries = num_entries;
- lpar_rc = send_subcrq(adapter, handle,
- &tx_crq);
- }
- if (lpar_rc != H_SUCCESS) {
- if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
- dev_err_ratelimited(dev, "tx: send failed\n");
- dev_kfree_skb_any(skb);
- tx_buff->skb = NULL;
- if (lpar_rc == H_CLOSED || adapter->failover_pending) {
- /* Disable TX and report carrier off if queue is closed
- * or pending failover.
- * Firmware guarantees that a signal will be sent to the
- * driver, triggering a reset or some other action.
- */
- netif_tx_stop_all_queues(netdev);
- netif_carrier_off(netdev);
- }
+ if ((*hdrs >> 7) & 1)
+ build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
- tx_send_failed++;
- tx_dropped++;
- ret = NETDEV_TX_OK;
- goto tx_err_out;
+ tx_crq.v1.n_crq_elem = num_entries;
+ tx_buff->num_entries = num_entries;
+ /* flush buffer if current entry can not fit */
+ if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_flush_err;
+ }
+
+ indir_arr[0] = tx_crq;
+ memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
+ num_entries * sizeof(struct ibmvnic_generic_scrq));
+ ind_bufp->index += num_entries;
+ if (__netdev_tx_sent_queue(txq, skb->len,
+ netdev_xmit_more() &&
+ ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_err;
}
if (atomic_add_return(num_entries, &tx_scrq->used)
@@ -1710,14 +1780,26 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = NETDEV_TX_OK;
goto out;
-tx_err_out:
- /* roll back consumer index and map array*/
- if (tx_pool->consumer_index == 0)
- tx_pool->consumer_index =
- tx_pool->num_buffers - 1;
- else
- tx_pool->consumer_index--;
- tx_pool->free_map[tx_pool->consumer_index] = index;
+tx_flush_err:
+ dev_kfree_skb_any(skb);
+ tx_buff->skb = NULL;
+ tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
+ tx_pool->num_buffers - 1 :
+ tx_pool->consumer_index - 1;
+ tx_dropped++;
+tx_err:
+ if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
+ dev_err_ratelimited(dev, "tx: send failed\n");
+
+ if (lpar_rc == H_CLOSED || adapter->failover_pending) {
+ /* Disable TX and report carrier off if queue is closed
+ * or pending failover.
+ * Firmware guarantees that a signal will be sent to the
+ * driver, triggering a reset or some other action.
+ */
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ }
out:
netdev->stats.tx_dropped += tx_dropped;
netdev->stats.tx_bytes += tx_bytes;
@@ -2381,10 +2463,17 @@ static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
static int ibmvnic_poll(struct napi_struct *napi, int budget)
{
- struct net_device *netdev = napi->dev;
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int scrq_num = (int)(napi - adapter->napi);
- int frames_processed = 0;
+ struct ibmvnic_sub_crq_queue *rx_scrq;
+ struct ibmvnic_adapter *adapter;
+ struct net_device *netdev;
+ int frames_processed;
+ int scrq_num;
+
+ netdev = napi->dev;
+ adapter = netdev_priv(netdev);
+ scrq_num = (int)(napi - adapter->napi);
+ frames_processed = 0;
+ rx_scrq = adapter->rx_scrq[scrq_num];
restart_poll:
while (frames_processed < budget) {
@@ -2397,12 +2486,12 @@ restart_poll:
if (unlikely(test_bit(0, &adapter->resetting) &&
adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
- enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
+ enable_scrq_irq(adapter, rx_scrq);
napi_complete_done(napi, frames_processed);
return frames_processed;
}
- if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
+ if (!pending_scrq(adapter, rx_scrq))
break;
/* The queue entry at the current index is peeked at above
* to determine that there is a valid descriptor awaiting
@@ -2410,7 +2499,7 @@ restart_poll:
* holds a valid descriptor before reading its contents.
*/
dma_rmb();
- next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
+ next = ibmvnic_next_scrq(adapter, rx_scrq);
rx_buff =
(struct ibmvnic_rx_buff *)be64_to_cpu(next->
rx_comp.correlator);
@@ -2467,16 +2556,21 @@ restart_poll:
frames_processed++;
}
- if (adapter->state != VNIC_CLOSING)
+ if (adapter->state != VNIC_CLOSING &&
+ ((atomic_read(&adapter->rx_pool[scrq_num].available) <
+ adapter->req_rx_add_entries_per_subcrq / 2) ||
+ frames_processed < budget))
replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
-
if (frames_processed < budget) {
- enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
- napi_complete_done(napi, frames_processed);
- if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
- napi_reschedule(napi)) {
- disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
- goto restart_poll;
+ if (napi_complete_done(napi, frames_processed)) {
+ enable_scrq_irq(adapter, rx_scrq);
+ if (pending_scrq(adapter, rx_scrq)) {
+ rmb();
+ if (napi_reschedule(napi)) {
+ disable_scrq_irq(adapter, rx_scrq);
+ goto restart_poll;
+ }
+ }
}
}
return frames_processed;
@@ -2880,10 +2974,12 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
irq_dispose_mapping(scrq->irq);
scrq->irq = 0;
}
+
if (scrq->msgs) {
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
atomic_set(&scrq->used, 0);
scrq->cur = 0;
+ scrq->ind_buf.index = 0;
} else {
netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
return -EINVAL;
@@ -2942,6 +3038,11 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
}
}
+ dma_free_coherent(dev,
+ IBMVNIC_IND_ARR_SZ,
+ scrq->ind_buf.indir_arr,
+ scrq->ind_buf.indir_dma);
+
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)scrq->msgs, 2);
@@ -2988,6 +3089,17 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
+ scrq->ind_buf.index = 0;
+
+ scrq->ind_buf.indir_arr =
+ dma_alloc_coherent(dev,
+ IBMVNIC_IND_ARR_SZ,
+ &scrq->ind_buf.indir_dma,
+ GFP_KERNEL);
+
+ if (!scrq->ind_buf.indir_arr)
+ goto indir_failed;
+
spin_lock_init(&scrq->lock);
netdev_dbg(adapter->netdev,
@@ -2996,6 +3108,12 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
return scrq;
+indir_failed:
+ do {
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
+ adapter->vdev->unit_address,
+ scrq->crq_num);
+ } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
reg_failed:
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
@@ -3110,14 +3228,17 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_tx_pool *tx_pool;
struct ibmvnic_tx_buff *txbuff;
+ struct netdev_queue *txq;
union sub_crq *next;
int index;
- int i, j;
+ int i;
restart_loop:
while (pending_scrq(adapter, scrq)) {
unsigned int pool = scrq->pool_index;
int num_entries = 0;
+ int total_bytes = 0;
+ int num_packets = 0;
/* The queue entry at the current index is peeked at above
* to determine that there is a valid descriptor awaiting
@@ -3140,21 +3261,16 @@ restart_loop:
}
txbuff = &tx_pool->tx_buff[index];
-
- for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
- if (!txbuff->data_dma[j])
- continue;
-
- txbuff->data_dma[j] = 0;
- }
-
- if (txbuff->last_frag) {
- dev_kfree_skb_any(txbuff->skb);
+ num_packets++;
+ num_entries += txbuff->num_entries;
+ if (txbuff->skb) {
+ total_bytes += txbuff->skb->len;
+ dev_consume_skb_irq(txbuff->skb);
txbuff->skb = NULL;
+ } else {
+ netdev_warn(adapter->netdev,
+ "TX completion received with NULL socket buffer\n");
}
-
- num_entries += txbuff->num_entries;
-
tx_pool->free_map[tx_pool->producer_index] = index;
tx_pool->producer_index =
(tx_pool->producer_index + 1) %
@@ -3163,6 +3279,9 @@ restart_loop:
/* remove tx_comp scrq*/
next->tx_comp.first = 0;
+ txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
+ netdev_tx_completed_queue(txq, num_packets, total_bytes);
+
if (atomic_sub_return(num_entries, &scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
__netif_subqueue_stopped(adapter->netdev,
@@ -3567,38 +3686,6 @@ static void print_subcrq_error(struct device *dev, int rc, const char *func)
}
}
-static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
- union sub_crq *sub_crq)
-{
- unsigned int ua = adapter->vdev->unit_address;
- struct device *dev = &adapter->vdev->dev;
- u64 *u64_crq = (u64 *)sub_crq;
- int rc;
-
- netdev_dbg(adapter->netdev,
- "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
- (unsigned long int)cpu_to_be64(remote_handle),
- (unsigned long int)cpu_to_be64(u64_crq[0]),
- (unsigned long int)cpu_to_be64(u64_crq[1]),
- (unsigned long int)cpu_to_be64(u64_crq[2]),
- (unsigned long int)cpu_to_be64(u64_crq[3]));
-
- /* Make sure the hypervisor sees the complete request */
- mb();
-
- rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
- cpu_to_be64(remote_handle),
- cpu_to_be64(u64_crq[0]),
- cpu_to_be64(u64_crq[1]),
- cpu_to_be64(u64_crq[2]),
- cpu_to_be64(u64_crq[3]));
-
- if (rc)
- print_subcrq_error(dev, rc, __func__);
-
- return rc;
-}
-
static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
u64 remote_handle, u64 ioba, u64 num_entries)
{
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 21e7ea858cda..c09c3f6bba9f 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -31,6 +31,8 @@
#define IBMVNIC_BUFFS_PER_POOL 100
#define IBMVNIC_MAX_QUEUES 16
#define IBMVNIC_MAX_QUEUE_SZ 4096
+#define IBMVNIC_MAX_IND_DESCS 128
+#define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
#define IBMVNIC_TSO_BUF_SZ 65536
#define IBMVNIC_TSO_BUFS 64
@@ -224,8 +226,6 @@ struct ibmvnic_tx_comp_desc {
#define IBMVNIC_TCP_CHKSUM 0x20
#define IBMVNIC_UDP_CHKSUM 0x08
-#define IBMVNIC_MAX_FRAGS_PER_CRQ 3
-
struct ibmvnic_tx_desc {
u8 first;
u8 type;
@@ -861,6 +861,12 @@ union sub_crq {
struct ibmvnic_rx_buff_add_desc rx_add;
};
+struct ibmvnic_ind_xmit_queue {
+ union sub_crq *indir_arr;
+ dma_addr_t indir_dma;
+ int index;
+};
+
struct ibmvnic_sub_crq_queue {
union sub_crq *msgs;
int size, cur;
@@ -873,10 +879,11 @@ struct ibmvnic_sub_crq_queue {
spinlock_t lock;
struct sk_buff *rx_skb_top;
struct ibmvnic_adapter *adapter;
+ struct ibmvnic_ind_xmit_queue ind_buf;
atomic_t used;
char name[32];
u64 handle;
-};
+} ____cacheline_aligned;
struct ibmvnic_long_term_buff {
unsigned char *buff;
@@ -887,14 +894,8 @@ struct ibmvnic_long_term_buff {
struct ibmvnic_tx_buff {
struct sk_buff *skb;
- dma_addr_t data_dma[IBMVNIC_MAX_FRAGS_PER_CRQ];
- unsigned int data_len[IBMVNIC_MAX_FRAGS_PER_CRQ];
int index;
int pool_index;
- bool last_frag;
- union sub_crq indir_arr[6];
- u8 hdr_data[140];
- dma_addr_t indir_dma;
int num_entries;
};
@@ -906,7 +907,7 @@ struct ibmvnic_tx_pool {
struct ibmvnic_long_term_buff long_term_buff;
int num_buffers;
int buf_size;
-};
+} ____cacheline_aligned;
struct ibmvnic_rx_buff {
struct sk_buff *skb;
@@ -927,7 +928,7 @@ struct ibmvnic_rx_pool {
int next_alloc;
int active;
struct ibmvnic_long_term_buff long_term_buff;
-};
+} ____cacheline_aligned;
struct ibmvnic_vpd {
unsigned char *buff;
@@ -1013,8 +1014,8 @@ struct ibmvnic_adapter {
atomic_t running_cap_crqs;
bool wait_capability;
- struct ibmvnic_sub_crq_queue **tx_scrq;
- struct ibmvnic_sub_crq_queue **rx_scrq;
+ struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned;
+ struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned;
/* rx structs */
struct napi_struct *napi;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 908fefaa6b85..66776ba7bfb6 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2019 Intel Corporation. */
+#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include "fm10k.h"
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 1b5390ec3d78..729c4f0d5ac5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -63,7 +63,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
} else if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed =
- (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
+ (vf->link_up ? i40e_virtchnl_link_speed(ls->link_speed) : 0);
} else {
pfe.event_data.link_event.link_status =
ls->link_info & I40E_AQ_LINK_UP;
@@ -4441,6 +4441,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
+ struct i40e_link_status *ls = &pf->hw.phy.link_info;
struct virtchnl_pf_event pfe;
struct i40e_hw *hw = &pf->hw;
struct i40e_vf *vf;
@@ -4478,7 +4479,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
vf->link_forced = true;
vf->link_up = true;
pfe.event_data.link_event.link_status = true;
- pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
+ pfe.event_data.link_event.link_speed = i40e_virtchnl_link_speed(ls->link_speed);
break;
case IFLA_VF_LINK_STATE_DISABLE:
vf->link_forced = true;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 567fd67e900e..67febc7b6798 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -311,7 +311,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
continue;
}
- bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
if (!size)
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 511da59bd6f2..29d6192b15f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -247,9 +247,7 @@ ice_devlink_flash_update(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
- struct device *dev = &pf->pdev->dev;
struct ice_hw *hw = &pf->hw;
- const struct firmware *fw;
u8 preservation;
int err;
@@ -277,22 +275,9 @@ ice_devlink_flash_update(struct devlink *devlink,
if (err)
return err;
- err = request_firmware(&fw, params->file_name, dev);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk");
- return err;
- }
-
- dev_dbg(dev, "Beginning flash update with file '%s'\n", params->file_name);
-
- devlink_flash_update_begin_notify(devlink);
devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0);
- err = ice_flash_pldm_image(pf, fw, preservation, extack);
- devlink_flash_update_end_notify(devlink);
-
- release_firmware(fw);
- return err;
+ return ice_flash_pldm_image(pf, params->fw, preservation, extack);
}
static const struct devlink_ops ice_devlink_ops = {
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index ee9f8c1dca83..30fdea24e94a 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1236,7 +1236,7 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev,
spin_lock_bh(&hw->mbx_lock);
if (hw->mac.ops.set_vfta(hw, vid, true)) {
- dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
+ dev_warn(&adapter->pdev->dev, "Vlan id %d\n is not added", vid);
spin_unlock_bh(&hw->mbx_lock);
return -EINVAL;
}
@@ -1520,7 +1520,7 @@ static void igbvf_reset(struct igbvf_adapter *adapter)
/* Allow time for pending master requests to run */
if (mac->ops.reset_hw(hw))
- dev_err(&adapter->pdev->dev, "PF still resetting\n");
+ dev_warn(&adapter->pdev->dev, "PF still resetting\n");
mac->ops.init_hw(hw);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 4a9041ee1b39..b40804e421a7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1834,8 +1834,13 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
struct netdev_queue *nq, bool napi)
{
unsigned int bytes_compl = 0, pkts_compl = 0;
+ struct xdp_frame_bulk bq;
int i;
+ xdp_frame_bulk_init(&bq);
+
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
+
for (i = 0; i < num; i++) {
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
struct mvneta_tx_desc *tx_desc = txq->descs +
@@ -1857,9 +1862,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
if (napi && buf->type == MVNETA_TYPE_XDP_TX)
xdp_return_frame_rx_napi(buf->xdpf);
else
- xdp_return_frame(buf->xdpf);
+ xdp_return_frame_bulk(buf->xdpf, &bq);
}
}
+ xdp_flush_frame_bulk(&bq);
+
+ rcu_read_unlock();
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
}
@@ -2025,16 +2033,16 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
static void
mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
- struct xdp_buff *xdp, int sync_len, bool napi)
+ struct xdp_buff *xdp, struct skb_shared_info *sinfo,
+ int sync_len)
{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i;
for (i = 0; i < sinfo->nr_frags; i++)
page_pool_put_full_page(rxq->page_pool,
- skb_frag_page(&sinfo->frags[i]), napi);
+ skb_frag_page(&sinfo->frags[i]), true);
page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
- sync_len, napi);
+ sync_len, true);
}
static int
@@ -2171,6 +2179,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp,
u32 frame_sz, struct mvneta_stats *stats)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
unsigned int len, data_len, sync;
u32 ret, act;
@@ -2191,7 +2200,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
err = xdp_do_redirect(pp->dev, xdp, prog);
if (unlikely(err)) {
- mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
ret = MVNETA_XDP_DROPPED;
} else {
ret = MVNETA_XDP_REDIR;
@@ -2202,7 +2211,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX)
- mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
break;
default:
bpf_warn_invalid_xdp_action(act);
@@ -2211,7 +2220,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
trace_xdp_exception(pp->dev, prog, act);
fallthrough;
case XDP_DROP:
- mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
ret = MVNETA_XDP_DROPPED;
stats->xdp_drop++;
break;
@@ -2269,9 +2278,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp, int *size,
+ struct skb_shared_info *xdp_sinfo,
struct page *page)
{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;
int data_len, len;
@@ -2289,13 +2298,22 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
len, dma_dir);
rx_desc->buf_phys_addr = 0;
- if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
- skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags];
+ if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
+ skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];
skb_frag_off_set(frag, pp->rx_offset_correction);
skb_frag_size_set(frag, data_len);
__skb_frag_set_page(frag, page);
- sinfo->nr_frags++;
+
+ /* last fragment */
+ if (len == *size) {
+ struct skb_shared_info *sinfo;
+
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ sinfo->nr_frags = xdp_sinfo->nr_frags;
+ memcpy(sinfo->frags, xdp_sinfo->frags,
+ sinfo->nr_frags * sizeof(skb_frag_t));
+ }
} else {
page_pool_put_full_page(rxq->page_pool, page, true);
}
@@ -2339,13 +2357,17 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
{
int rx_proc = 0, rx_todo, refill, size = 0;
struct net_device *dev = pp->dev;
- struct xdp_buff xdp_buf = {
- .frame_sz = PAGE_SIZE,
- .rxq = &rxq->xdp_rxq,
- };
+ struct skb_shared_info sinfo;
struct mvneta_stats ps = {};
struct bpf_prog *xdp_prog;
u32 desc_status, frame_sz;
+ struct xdp_buff xdp_buf;
+
+ xdp_buf.data_hard_start = NULL;
+ xdp_buf.frame_sz = PAGE_SIZE;
+ xdp_buf.rxq = &rxq->xdp_rxq;
+
+ sinfo.nr_frags = 0;
/* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -2385,11 +2407,11 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
rx_desc->buf_phys_addr = 0;
page_pool_put_full_page(rxq->page_pool, page,
true);
- continue;
+ goto next;
}
mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
- &size, page);
+ &size, &sinfo, page);
} /* Middle or Last descriptor */
if (!(rx_status & MVNETA_RXD_LAST_DESC))
@@ -2397,7 +2419,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
continue;
if (size) {
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
goto next;
}
@@ -2409,7 +2431,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
if (IS_ERR(skb)) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
u64_stats_update_begin(&stats->syncp);
stats->es.skb_alloc_error++;
@@ -2426,11 +2448,12 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
napi_gro_receive(napi, skb);
next:
xdp_buf.data_hard_start = NULL;
+ sinfo.nr_frags = 0;
}
rcu_read_unlock();
if (xdp_buf.data_hard_start)
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
if (ps.xdp_redirect)
xdp_do_flush_map();
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 834775843067..6bd7e405e830 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -695,6 +695,9 @@
/* Maximum number of supported ports */
#define MVPP2_MAX_PORTS 4
+/* Loopback port index */
+#define MVPP2_LOOPBACK_PORT_INDEX 3
+
/* Maximum number of TXQs used by single port */
#define MVPP2_MAX_TXQ 8
@@ -729,22 +732,21 @@
#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
/* RX FIFO constants */
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB 0xb000
#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000
#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000
#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000
-#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200
-#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size) ((data_size) >> 6)
#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40
#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
/* TX FIFO constants */
-#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa
-#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3
-#define MVPP2_TX_FIFO_THRESHOLD_MIN 256
-#define MVPP2_TX_FIFO_THRESHOLD_10KB \
- (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
-#define MVPP2_TX_FIFO_THRESHOLD_3KB \
- (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
+#define MVPP22_TX_FIFO_DATA_SIZE_16KB 16
+#define MVPP22_TX_FIFO_DATA_SIZE_10KB 10
+#define MVPP22_TX_FIFO_DATA_SIZE_3KB 3
+#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 /* Bytes */
+#define MVPP2_TX_FIFO_THRESHOLD(kb) \
+ ((kb) * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
/* RX buffer constants */
#define MVPP2_SKB_SHINFO_SIZE \
@@ -946,6 +948,9 @@ struct mvpp2 {
/* List of pointers to port structures */
int port_count;
struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
+ /* Map of enabled ports */
+ unsigned long port_map;
+
struct mvpp2_tai *tai;
/* Number of Tx threads used */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index cea886c5bcb5..adf337de00d1 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -2440,8 +2440,13 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq,
struct mvpp2_txq_pcpu *txq_pcpu, int num)
{
+ struct xdp_frame_bulk bq;
int i;
+ xdp_frame_bulk_init(&bq);
+
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
+
for (i = 0; i < num; i++) {
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
@@ -2454,10 +2459,13 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
dev_kfree_skb_any(tx_buf->skb);
else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
tx_buf->type == MVPP2_TYPE_XDP_NDO)
- xdp_return_frame(tx_buf->xdpf);
+ xdp_return_frame_bulk(tx_buf->xdpf, &bq);
mvpp2_txq_inc_get(txq_pcpu);
}
+ xdp_flush_frame_bulk(&bq);
+
+ rcu_read_unlock();
}
static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
@@ -6602,32 +6610,56 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
}
-static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
+static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size)
{
- int port;
+ int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size);
- /* The FIFO size parameters are set depending on the maximum speed a
- * given port can handle:
- * - Port 0: 10Gbps
- * - Port 1: 2.5Gbps
- * - Ports 2 and 3: 1Gbps
- */
+ mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size);
+ mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size);
+}
- mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
- MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
- mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
- MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
+/* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2.
+ * 4kB fixed space must be assigned for the loopback port.
+ * Redistribute remaining avialable 44kB space among all active ports.
+ * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G
+ * SGMII link.
+ */
+static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
+{
+ int remaining_ports_count;
+ unsigned long port_map;
+ int size_remainder;
+ int port, size;
+
+ /* The loopback requires fixed 4kB of the FIFO space assignment. */
+ mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
+ port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
+
+ /* Set RX FIFO size to 0 for inactive ports. */
+ for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
+ mvpp22_rx_fifo_set_hw(priv, port, 0);
+
+ /* Assign remaining RX FIFO space among all active ports. */
+ size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB;
+ remaining_ports_count = hweight_long(port_map);
+
+ for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
+ if (remaining_ports_count == 1)
+ size = size_remainder;
+ else if (port == 0)
+ size = max(size_remainder / remaining_ports_count,
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
+ else if (port == 1)
+ size = max(size_remainder / remaining_ports_count,
+ MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
+ else
+ size = size_remainder / remaining_ports_count;
- mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
- MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
- mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
- MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
+ size_remainder -= size;
+ remaining_ports_count--;
- for (port = 2; port < MVPP2_MAX_PORTS; port++) {
- mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
- MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
- mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
- MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
+ mvpp22_rx_fifo_set_hw(priv, port, size);
}
mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
@@ -6635,24 +6667,53 @@ static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
}
-/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
- * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
- * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
+static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
+{
+ int threshold = MVPP2_TX_FIFO_THRESHOLD(size);
+
+ mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
+ mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold);
+}
+
+/* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2.
+ * 3kB fixed space must be assigned for the loopback port.
+ * Redistribute remaining avialable 16kB space among all active ports.
+ * The 10G interface should use 10kB (which is maximum possible size
+ * per single port).
*/
static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
{
- int port, size, thrs;
-
- for (port = 0; port < MVPP2_MAX_PORTS; port++) {
- if (port == 0) {
+ int remaining_ports_count;
+ unsigned long port_map;
+ int size_remainder;
+ int port, size;
+
+ /* The loopback requires fixed 3kB of the FIFO space assignment. */
+ mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
+ MVPP22_TX_FIFO_DATA_SIZE_3KB);
+ port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
+
+ /* Set TX FIFO size to 0 for inactive ports. */
+ for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
+ mvpp22_tx_fifo_set_hw(priv, port, 0);
+
+ /* Assign remaining TX FIFO space among all active ports. */
+ size_remainder = MVPP22_TX_FIFO_DATA_SIZE_16KB;
+ remaining_ports_count = hweight_long(port_map);
+
+ for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
+ if (remaining_ports_count == 1)
+ size = min(size_remainder,
+ MVPP22_TX_FIFO_DATA_SIZE_10KB);
+ else if (port == 0)
size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
- thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
- } else {
- size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
- thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
- }
- mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
- mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
+ else
+ size = size_remainder / remaining_ports_count;
+
+ size_remainder -= size;
+ remaining_ports_count--;
+
+ mvpp22_tx_fifo_set_hw(priv, port, size);
}
}
@@ -6953,6 +7014,12 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_axi_clk;
}
+ /* Map DTS-active ports. Should be done before FIFO mvpp2_init */
+ fwnode_for_each_available_child_node(fwnode, port_fwnode) {
+ if (!fwnode_property_read_u32(port_fwnode, "port-id", &i))
+ priv->port_map |= BIT(i);
+ }
+
/* Initialize network controller */
err = mvpp2_init(pdev, priv);
if (err < 0) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 2f7a861d0c7b..7100d1dd856e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
octeontx2_mbox-y := mbox.o rvu_trace.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
- rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o
+ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
+ rvu_cpt.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 8f17e26dca53..7d0f96290943 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -145,6 +145,16 @@ int cgx_get_cgxid(void *cgxd)
return cgx->cgx_id;
}
+u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ u64 cfg;
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
+
+ return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
+}
+
/* Ensure the required lock for event queue(where asynchronous events are
* posted) is acquired before calling this API. Else an asynchronous event(with
* latest link status) can reach the destination before this function returns
@@ -814,8 +824,7 @@ static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
dev_dbg(dev, "Firmware command interface version = %d.%d\n",
major_ver, minor_ver);
- if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
- minor_ver != CGX_FIRMWARE_MINOR_VER)
+ if (major_ver != CGX_FIRMWARE_MAJOR_VER)
return -EIO;
else
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 27ca3291682b..bcfc3e5f66bb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -27,6 +27,10 @@
/* Registers */
#define CGXX_CMRX_CFG 0x00
+#define CMR_P2X_SEL_MASK GENMASK_ULL(61, 59)
+#define CMR_P2X_SEL_SHIFT 59ULL
+#define CMR_P2X_SEL_NIX0 1ULL
+#define CMR_P2X_SEL_NIX1 2ULL
#define CMR_EN BIT_ULL(55)
#define DATA_PKT_TX_EN BIT_ULL(53)
#define DATA_PKT_RX_EN BIT_ULL(54)
@@ -142,5 +146,6 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause);
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
+u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index f48eb66ed021..17f6f42f4453 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -162,6 +162,8 @@ enum nix_scheduler {
#define NIX_RX_ACTIONOP_UCAST_IPSEC (0x2ull)
#define NIX_RX_ACTIONOP_MCAST (0x3ull)
#define NIX_RX_ACTIONOP_RSS (0x4ull)
+/* Use the RX action set in the default unicast entry */
+#define NIX_RX_ACTION_DEFAULT (0xfull)
/* NIX TX action operation*/
#define NIX_TX_ACTIONOP_DROP (0x0ull)
@@ -174,8 +176,12 @@ enum nix_scheduler {
#define NPC_MCAM_KEY_X2 1
#define NPC_MCAM_KEY_X4 2
-#define NIX_INTF_RX 0
-#define NIX_INTF_TX 1
+#define NIX_INTFX_RX(a) (0x0ull | (a) << 1)
+#define NIX_INTFX_TX(a) (0x1ull | (a) << 1)
+
+/* Default interfaces are NIX0_RX and NIX0_TX */
+#define NIX_INTF_RX NIX_INTFX_RX(0)
+#define NIX_INTF_TX NIX_INTFX_TX(0)
#define NIX_INTF_TYPE_CGX 0
#define NIX_INTF_TYPE_LBK 1
@@ -206,6 +212,8 @@ enum ndc_idx_e {
NIX0_RX = 0x0,
NIX0_TX = 0x1,
NPA0_U = 0x2,
+ NIX1_RX = 0x4,
+ NIX1_TX = 0x5,
};
enum ndc_ctype_e {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 263a21129416..f919283ddc34 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -86,7 +86,7 @@ struct mbox_msghdr {
#define OTX2_MBOX_REQ_SIG (0xdead)
#define OTX2_MBOX_RSP_SIG (0xbeef)
u16 sig; /* Signature, for validating corrupted msgs */
-#define OTX2_MBOX_VERSION (0x0001)
+#define OTX2_MBOX_VERSION (0x0007)
u16 ver; /* Version of msg's structure for this ID */
u16 next_msgoff; /* Offset of next msg within mailbox region */
int rc; /* Msg process'ed response code */
@@ -158,6 +158,11 @@ M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
+M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \
+ msg_rsp) \
+M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \
+M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \
+ cpt_rd_wr_reg_msg) \
/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\
npc_mcam_alloc_entry_rsp) \
@@ -188,10 +193,19 @@ M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \
npc_mcam_alloc_and_write_entry_rsp) \
M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
msg_req, npc_get_kex_cfg_rsp) \
+M(NPC_INSTALL_FLOW, 0x600d, npc_install_flow, \
+ npc_install_flow_req, npc_install_flow_rsp) \
+M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
+ npc_delete_flow_req, msg_rsp) \
+M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
+ npc_mcam_read_entry_req, \
+ npc_mcam_read_entry_rsp) \
+M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
+ msg_req, npc_mcam_read_base_rule_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
nix_lf_alloc_req, nix_lf_alloc_rsp) \
-M(NIX_LF_FREE, 0x8001, nix_lf_free, msg_req, msg_rsp) \
+M(NIX_LF_FREE, 0x8001, nix_lf_free, nix_lf_free_req, msg_rsp) \
M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \
M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \
hwctx_disable_req, msg_rsp) \
@@ -200,7 +214,8 @@ M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \
M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \
M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
-M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \
+M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, \
+ nix_vtag_config_rsp) \
M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
nix_rss_flowkey_cfg, \
nix_rss_flowkey_cfg_rsp) \
@@ -216,7 +231,6 @@ M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
-M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \
M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \
M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
@@ -271,6 +285,17 @@ struct ready_msg_rsp {
* or to detach partial of a cetain resource type.
* Rest of the fields specify how many of what type to
* be attached.
+ * To request LFs from two blocks of same type this mailbox
+ * can be sent twice as below:
+ * struct rsrc_attach *attach;
+ * .. Allocate memory for message ..
+ * attach->cptlfs = 3; <3 LFs from CPT0>
+ * .. Send message ..
+ * .. Allocate memory for message ..
+ * attach->modify = 1;
+ * attach->cpt_blkaddr = BLKADDR_CPT1;
+ * attach->cptlfs = 2; <2 LFs from CPT1>
+ * .. Send message ..
*/
struct rsrc_attach {
struct mbox_msghdr hdr;
@@ -281,6 +306,7 @@ struct rsrc_attach {
u16 ssow;
u16 timlfs;
u16 cptlfs;
+ int cpt_blkaddr; /* BLKADDR_CPT0/BLKADDR_CPT1 or 0 for BLKADDR_CPT0 */
};
/* Structure for relinquishing resources.
@@ -314,6 +340,8 @@ struct msix_offset_rsp {
u16 ssow_msixoff[MAX_RVU_BLKLF_CNT];
u16 timlf_msixoff[MAX_RVU_BLKLF_CNT];
u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT];
+ u8 cpt1_lfs;
+ u16 cpt1_lf_msixoff[MAX_RVU_BLKLF_CNT];
};
struct get_hw_cap_rsp {
@@ -459,6 +487,20 @@ enum nix_af_status {
NIX_AF_ERR_LSO_CFG_FAIL = -418,
NIX_AF_INVAL_NPA_PF_FUNC = -419,
NIX_AF_INVAL_SSO_PF_FUNC = -420,
+ NIX_AF_ERR_TX_VTAG_NOSPC = -421,
+ NIX_AF_ERR_RX_VTAG_INUSE = -422,
+};
+
+/* For NIX RX vtag action */
+enum nix_rx_vtag0_type {
+ NIX_AF_LFX_RX_VTAG_TYPE0, /* reserved for rx vlan offload */
+ NIX_AF_LFX_RX_VTAG_TYPE1,
+ NIX_AF_LFX_RX_VTAG_TYPE2,
+ NIX_AF_LFX_RX_VTAG_TYPE3,
+ NIX_AF_LFX_RX_VTAG_TYPE4,
+ NIX_AF_LFX_RX_VTAG_TYPE5,
+ NIX_AF_LFX_RX_VTAG_TYPE6,
+ NIX_AF_LFX_RX_VTAG_TYPE7,
};
/* For NIX LF context alloc and init */
@@ -491,6 +533,16 @@ struct nix_lf_alloc_rsp {
u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
u16 cints; /* NIX_AF_CONST2::CINTS */
u16 qints; /* NIX_AF_CONST2::QINTS */
+ u8 cgx_links; /* No. of CGX links present in HW */
+ u8 lbk_links; /* No. of LBK links present in HW */
+ u8 sdp_links; /* No. of SDP links present in HW */
+};
+
+struct nix_lf_free_req {
+ struct mbox_msghdr hdr;
+#define NIX_LF_DISABLE_FLOWS BIT_ULL(0)
+#define NIX_LF_DONT_FREE_TX_VTAG BIT_ULL(1)
+ u64 flags;
};
/* NIX AQ enqueue msg */
@@ -583,14 +635,40 @@ struct nix_vtag_config {
union {
/* valid when cfg_type is '0' */
struct {
- /* tx vlan0 tag(C-VLAN) */
- u64 vlan0;
- /* tx vlan1 tag(S-VLAN) */
- u64 vlan1;
- /* insert tx vlan tag */
- u8 insert_vlan :1;
- /* insert tx double vlan tag */
- u8 double_vlan :1;
+ u64 vtag0;
+ u64 vtag1;
+
+ /* cfg_vtag0 & cfg_vtag1 fields are valid
+ * when free_vtag0 & free_vtag1 are '0's.
+ */
+ /* cfg_vtag0 = 1 to configure vtag0 */
+ u8 cfg_vtag0 :1;
+ /* cfg_vtag1 = 1 to configure vtag1 */
+ u8 cfg_vtag1 :1;
+
+ /* vtag0_idx & vtag1_idx are only valid when
+ * both cfg_vtag0 & cfg_vtag1 are '0's,
+ * these fields are used along with free_vtag0
+ * & free_vtag1 to free the nix lf's tx_vlan
+ * configuration.
+ *
+ * Denotes the indices of tx_vtag def registers
+ * that needs to be cleared and freed.
+ */
+ int vtag0_idx;
+ int vtag1_idx;
+
+ /* free_vtag0 & free_vtag1 fields are valid
+ * when cfg_vtag0 & cfg_vtag1 are '0's.
+ */
+ /* free_vtag0 = 1 clears vtag0 configuration
+ * vtag0_idx denotes the index to be cleared.
+ */
+ u8 free_vtag0 :1;
+ /* free_vtag1 = 1 clears vtag1 configuration
+ * vtag1_idx denotes the index to be cleared.
+ */
+ u8 free_vtag1 :1;
} tx;
/* valid when cfg_type is '1' */
@@ -605,6 +683,17 @@ struct nix_vtag_config {
};
};
+struct nix_vtag_config_rsp {
+ struct mbox_msghdr hdr;
+ int vtag0_idx;
+ int vtag1_idx;
+ /* Indices of tx_vtag def registers used to configure
+ * tx vtag0 & vtag1 headers, these indices are valid
+ * when nix_vtag_config mbox requested for vtag0 and/
+ * or vtag1 configuration.
+ */
+};
+
struct nix_rss_flowkey_cfg {
struct mbox_msghdr hdr;
int mcam_index; /* MCAM entry index to modify */
@@ -627,6 +716,7 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
+#define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
@@ -865,6 +955,87 @@ struct npc_get_kex_cfg_rsp {
u8 mkex_pfl_name[MKEX_NAME_LEN];
};
+struct flow_msg {
+ unsigned char dmac[6];
+ unsigned char smac[6];
+ __be16 etype;
+ __be16 vlan_etype;
+ __be16 vlan_tci;
+ union {
+ __be32 ip4src;
+ __be32 ip6src[4];
+ };
+ union {
+ __be32 ip4dst;
+ __be32 ip6dst[4];
+ };
+ u8 tos;
+ u8 ip_ver;
+ u8 ip_proto;
+ u8 tc;
+ __be16 sport;
+ __be16 dport;
+};
+
+struct npc_install_flow_req {
+ struct mbox_msghdr hdr;
+ struct flow_msg packet;
+ struct flow_msg mask;
+ u64 features;
+ u16 entry;
+ u16 channel;
+ u8 intf;
+ u8 set_cntr; /* If counter is available set counter for this entry ? */
+ u8 default_rule;
+ u8 append; /* overwrite(0) or append(1) flow to default rule? */
+ u16 vf;
+ /* action */
+ u32 index;
+ u16 match_id;
+ u8 flow_key_alg;
+ u8 op;
+ /* vtag rx action */
+ u8 vtag0_type;
+ u8 vtag0_valid;
+ u8 vtag1_type;
+ u8 vtag1_valid;
+ /* vtag tx action */
+ u16 vtag0_def;
+ u8 vtag0_op;
+ u16 vtag1_def;
+ u8 vtag1_op;
+};
+
+struct npc_install_flow_rsp {
+ struct mbox_msghdr hdr;
+ int counter; /* negative if no counter else counter number */
+};
+
+struct npc_delete_flow_req {
+ struct mbox_msghdr hdr;
+ u16 entry;
+ u16 start;/*Disable range of entries */
+ u16 end;
+ u8 all; /* PF + VFs */
+};
+
+struct npc_mcam_read_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* MCAM entry to read */
+};
+
+struct npc_mcam_read_entry_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u8 intf;
+ u8 enable;
+};
+
+struct npc_mcam_read_base_rule_rsp {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry;
+};
+
enum ptp_op {
PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1,
@@ -881,4 +1052,32 @@ struct ptp_rsp {
u64 clk;
};
+/* CPT mailbox error codes
+ * Range 901 - 1000.
+ */
+enum cpt_af_status {
+ CPT_AF_ERR_PARAM = -901,
+ CPT_AF_ERR_GRP_INVALID = -902,
+ CPT_AF_ERR_LF_INVALID = -903,
+ CPT_AF_ERR_ACCESS_DENIED = -904,
+ CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905,
+ CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906
+};
+
+/* CPT mbox message formats */
+struct cpt_rd_wr_reg_msg {
+ struct mbox_msghdr hdr;
+ u64 reg_offset;
+ u64 *ret_val;
+ u64 val;
+ u8 is_write;
+};
+
+struct cpt_lf_alloc_req_msg {
+ struct mbox_msghdr hdr;
+ u16 nix_pf_func;
+ u16 sso_pf_func;
+ u16 eng_grpmsk;
+};
+
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 91a9d00e4fb5..a1f79445db71 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -140,6 +140,63 @@ enum npc_kpu_lh_ltype {
NPC_LT_LH_CUSTOM1 = 0xF,
};
+/* NPC port kind defines how the incoming or outgoing packets
+ * are processed. NPC accepts packets from up to 64 pkinds.
+ * Software assigns pkind for each incoming port such as CGX
+ * Ethernet interfaces, LBK interfaces, etc.
+ */
+enum npc_pkind_type {
+ NPC_TX_DEF_PKIND = 63ULL, /* NIX-TX PKIND */
+};
+
+/* list of known and supported fields in packet header and
+ * fields present in key structure.
+ */
+enum key_fields {
+ NPC_DMAC,
+ NPC_SMAC,
+ NPC_ETYPE,
+ NPC_OUTER_VID,
+ NPC_TOS,
+ NPC_SIP_IPV4,
+ NPC_DIP_IPV4,
+ NPC_SIP_IPV6,
+ NPC_DIP_IPV6,
+ NPC_SPORT_TCP,
+ NPC_DPORT_TCP,
+ NPC_SPORT_UDP,
+ NPC_DPORT_UDP,
+ NPC_SPORT_SCTP,
+ NPC_DPORT_SCTP,
+ NPC_HEADER_FIELDS_MAX,
+ NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
+ NPC_PF_FUNC, /* Valid when Tx */
+ NPC_ERRLEV,
+ NPC_ERRCODE,
+ NPC_LXMB,
+ NPC_LA,
+ NPC_LB,
+ NPC_LC,
+ NPC_LD,
+ NPC_LE,
+ NPC_LF,
+ NPC_LG,
+ NPC_LH,
+ /* Ethertype for untagged frame */
+ NPC_ETYPE_ETHER,
+ /* Ethertype for single tagged frame */
+ NPC_ETYPE_TAG1,
+ /* Ethertype for double tagged frame */
+ NPC_ETYPE_TAG2,
+ /* outer vlan tci for single tagged frame */
+ NPC_VLAN_TAG1,
+ /* outer vlan tci for double tagged frame */
+ NPC_VLAN_TAG2,
+ /* other header fields programmed to extract but not of our interest */
+ NPC_UNKNOWN,
+ NPC_KEY_FIELDS_MAX,
+};
+
struct npc_kpu_profile_cam {
u8 state;
u8 state_mask;
@@ -300,11 +357,63 @@ struct nix_rx_action {
/* NPC_AF_INTFX_KEX_CFG field masks */
#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
+/* NPC_PARSE_KEX_S nibble definitions for each field */
+#define NPC_PARSE_NIBBLE_CHAN GENMASK_ULL(2, 0)
+#define NPC_PARSE_NIBBLE_ERRLEV BIT_ULL(3)
+#define NPC_PARSE_NIBBLE_ERRCODE GENMASK_ULL(5, 4)
+#define NPC_PARSE_NIBBLE_L2L3_BCAST BIT_ULL(6)
+#define NPC_PARSE_NIBBLE_LA_FLAGS GENMASK_ULL(8, 7)
+#define NPC_PARSE_NIBBLE_LA_LTYPE BIT_ULL(9)
+#define NPC_PARSE_NIBBLE_LB_FLAGS GENMASK_ULL(11, 10)
+#define NPC_PARSE_NIBBLE_LB_LTYPE BIT_ULL(12)
+#define NPC_PARSE_NIBBLE_LC_FLAGS GENMASK_ULL(14, 13)
+#define NPC_PARSE_NIBBLE_LC_LTYPE BIT_ULL(15)
+#define NPC_PARSE_NIBBLE_LD_FLAGS GENMASK_ULL(17, 16)
+#define NPC_PARSE_NIBBLE_LD_LTYPE BIT_ULL(18)
+#define NPC_PARSE_NIBBLE_LE_FLAGS GENMASK_ULL(20, 19)
+#define NPC_PARSE_NIBBLE_LE_LTYPE BIT_ULL(21)
+#define NPC_PARSE_NIBBLE_LF_FLAGS GENMASK_ULL(23, 22)
+#define NPC_PARSE_NIBBLE_LF_LTYPE BIT_ULL(24)
+#define NPC_PARSE_NIBBLE_LG_FLAGS GENMASK_ULL(26, 25)
+#define NPC_PARSE_NIBBLE_LG_LTYPE BIT_ULL(27)
+#define NPC_PARSE_NIBBLE_LH_FLAGS GENMASK_ULL(29, 28)
+#define NPC_PARSE_NIBBLE_LH_LTYPE BIT_ULL(30)
+
+struct nix_tx_action {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd_63_48 :16;
+ u64 match_id :16;
+ u64 index :20;
+ u64 rsvd_11_8 :8;
+ u64 op :4;
+#else
+ u64 op :4;
+ u64 rsvd_11_8 :8;
+ u64 index :20;
+ u64 match_id :16;
+ u64 rsvd_63_48 :16;
+#endif
+};
+
/* NIX Receive Vtag Action Structure */
-#define VTAG0_VALID_BIT BIT_ULL(15)
-#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
-#define VTAG0_LID_MASK GENMASK_ULL(10, 8)
-#define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define RX_VTAG0_VALID_BIT BIT_ULL(15)
+#define RX_VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
+#define RX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define RX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define RX_VTAG1_VALID_BIT BIT_ULL(47)
+#define RX_VTAG1_TYPE_MASK GENMASK_ULL(46, 44)
+#define RX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
+#define RX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
+
+/* NIX Transmit Vtag Action Structure */
+#define TX_VTAG0_DEF_MASK GENMASK_ULL(25, 16)
+#define TX_VTAG0_OP_MASK GENMASK_ULL(13, 12)
+#define TX_VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define TX_VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+#define TX_VTAG1_DEF_MASK GENMASK_ULL(57, 48)
+#define TX_VTAG1_OP_MASK GENMASK_ULL(45, 44)
+#define TX_VTAG1_LID_MASK GENMASK_ULL(42, 40)
+#define TX_VTAG1_RELPTR_MASK GENMASK_ULL(39, 32)
struct npc_mcam_kex {
/* MKEX Profle Header */
@@ -357,4 +466,24 @@ struct npc_lt_def_cfg {
struct npc_lt_def pck_iip4;
};
+struct rvu_npc_mcam_rule {
+ struct flow_msg packet;
+ struct flow_msg mask;
+ u8 intf;
+ union {
+ struct nix_tx_action tx_action;
+ struct nix_rx_action rx_action;
+ };
+ u64 vtag_action;
+ struct list_head list;
+ u64 features;
+ u16 owner;
+ u16 entry;
+ u16 cntr;
+ bool has_cntr;
+ u8 default_rule;
+ bool enable;
+ bool vfvlan_cfg;
+};
+
#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 77bb4ed32600..b192692b4fc4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -148,6 +148,20 @@
(((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
((flags_ena) << 6) | ((key_ofs) & 0x3F))
+/* Rx parse key extract nibble enable */
+#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
+ NPC_PARSE_NIBBLE_LA_LTYPE | \
+ NPC_PARSE_NIBBLE_LB_LTYPE | \
+ NPC_PARSE_NIBBLE_LC_LTYPE | \
+ NPC_PARSE_NIBBLE_LD_LTYPE | \
+ NPC_PARSE_NIBBLE_LE_LTYPE)
+/* Tx parse key extract nibble enable */
+#define NPC_PARSE_NIBBLE_INTF_TX (NPC_PARSE_NIBBLE_LA_LTYPE | \
+ NPC_PARSE_NIBBLE_LB_LTYPE | \
+ NPC_PARSE_NIBBLE_LC_LTYPE | \
+ NPC_PARSE_NIBBLE_LD_LTYPE | \
+ NPC_PARSE_NIBBLE_LE_LTYPE)
+
enum npc_kpu_parser_state {
NPC_S_NA = 0,
NPC_S_KPU1_ETHER,
@@ -13380,14 +13394,15 @@ static const struct npc_lt_def_cfg npc_lt_defaults = {
},
};
-static const struct npc_mcam_kex npc_mkex_default = {
+static struct npc_mcam_kex npc_mkex_default = {
.mkex_sign = MKEX_SIGN,
.name = "default",
.kpu_version = NPC_KPU_PROFILE_VER,
.keyx_cfg = {
- /* nibble: LA..LE (ltype only) + Channel */
- [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | 0x49247,
- [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | ((1ULL << 19) - 1),
+ /* nibble: LA..LE (ltype only) + channel */
+ [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX,
+ /* nibble: LA..LE (ltype only) */
+ [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX,
},
.intf_lid_lt_ld = {
/* Default RX MCAM KEX profile */
@@ -13405,12 +13420,14 @@ static const struct npc_mcam_kex npc_mkex_default = {
/* Layer B: Single VLAN (CTAG) */
/* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
[NPC_LT_LB_CTAG] = {
- KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4),
+ KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4),
},
/* Layer B: Stacked VLAN (STAG|QinQ) */
[NPC_LT_LB_STAG_QINQ] = {
- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
- KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4),
+ /* Outer VLAN: 2 bytes, KW0[63:48] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
+ /* Ethertype: 2 bytes, KW0[47:32] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x4),
},
[NPC_LT_LB_FDSA] = {
/* SWITCH PORT: 1 byte, KW0[63:48] */
@@ -13436,17 +13453,71 @@ static const struct npc_mcam_kex npc_mkex_default = {
[NPC_LID_LD] = {
/* Layer D:UDP */
[NPC_LT_LD_UDP] = {
- /* SPORT: 2 bytes, KW3[15:0] */
- KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
- /* DPORT: 2 bytes, KW3[31:16] */
- KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
+ },
+ /* Layer D:TCP */
+ [NPC_LT_LD_TCP] = {
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
+ },
+ },
+ },
+
+ /* Default TX MCAM KEX profile */
+ [NIX_INTF_TX] = {
+ [NPC_LID_LA] = {
+ /* Layer A: NIX_INST_HDR_S + Ethernet */
+ /* NIX appends 8 bytes of NIX_INST_HDR_S at the
+ * start of each TX packet supplied to NPC.
+ */
+ [NPC_LT_LA_IH_NIX_ETHER] = {
+ /* PF_FUNC: 2B , KW0 [47:32] */
+ KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
+ /* DMAC: 6 bytes, KW1[63:16] */
+ KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa),
+ },
+ },
+ [NPC_LID_LB] = {
+ /* Layer B: Single VLAN (CTAG) */
+ [NPC_LT_LB_CTAG] = {
+ /* CTAG VLAN[2..3] KW0[63:48] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
+ /* CTAG VLAN[2..3] KW1[15:0] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x8),
+ },
+ /* Layer B: Stacked VLAN (STAG|QinQ) */
+ [NPC_LT_LB_STAG_QINQ] = {
+ /* Outer VLAN: 2 bytes, KW0[63:48] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
+ /* Outer VLAN: 2 Bytes, KW1[15:0] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x8),
+ },
+ },
+ [NPC_LID_LC] = {
+ /* Layer C: IPv4 */
+ [NPC_LT_LC_IP] = {
+ /* SIP+DIP: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
+ /* TOS: 1 byte, KW1[63:56] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf),
+ },
+ /* Layer C: IPv6 */
+ [NPC_LT_LC_IP6] = {
+ /* Everything up to SADDR: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10),
+ },
+ },
+ [NPC_LID_LD] = {
+ /* Layer D:UDP */
+ [NPC_LT_LD_UDP] = {
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
},
/* Layer D:TCP */
[NPC_LT_LD_TCP] = {
- /* SPORT: 2 bytes, KW3[15:0] */
- KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
- /* DPORT: 2 bytes, KW3[31:16] */
- KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
+ /* SPORT+DPORT: 4 bytes, KW3[31:0] */
+ KEX_LD_CFG(0x3, 0x0, 0x1, 0x0, 0x18),
},
},
},
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index e1f918960730..9f901c0edcbb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -66,6 +66,7 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
hw->cap.nix_shaping = true;
hw->cap.nix_tx_link_bp = true;
hw->cap.nix_rx_multicast = true;
+ hw->rvu = rvu;
if (is_rvu_96xx_B0(rvu)) {
hw->cap.nix_fixed_txschq_mapping = true;
@@ -210,6 +211,9 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
* multiple blocks of same type.
*
* @pcifunc has to be zero when no LF is yet attached.
+ *
+ * For a pcifunc if LFs are attached from multiple blocks of same type, then
+ * return blkaddr of first encountered block.
*/
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
{
@@ -258,20 +262,39 @@ int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc)
devnum = rvu_get_pf(pcifunc);
}
- /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' */
+ /* Check if the 'pcifunc' has a NIX LF from 'BLKADDR_NIX0' or
+ * 'BLKADDR_NIX1'.
+ */
if (blktype == BLKTYPE_NIX) {
- reg = is_pf ? RVU_PRIV_PFX_NIX0_CFG : RVU_PRIV_HWVFX_NIX0_CFG;
+ reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(0) :
+ RVU_PRIV_HWVFX_NIXX_CFG(0);
cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
- if (cfg)
+ if (cfg) {
blkaddr = BLKADDR_NIX0;
+ goto exit;
+ }
+
+ reg = is_pf ? RVU_PRIV_PFX_NIXX_CFG(1) :
+ RVU_PRIV_HWVFX_NIXX_CFG(1);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_NIX1;
}
- /* Check if the 'pcifunc' has a CPT LF from 'BLKADDR_CPT0' */
if (blktype == BLKTYPE_CPT) {
- reg = is_pf ? RVU_PRIV_PFX_CPT0_CFG : RVU_PRIV_HWVFX_CPT0_CFG;
+ reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(0) :
+ RVU_PRIV_HWVFX_CPTX_CFG(0);
cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
- if (cfg)
+ if (cfg) {
blkaddr = BLKADDR_CPT0;
+ goto exit;
+ }
+
+ reg = is_pf ? RVU_PRIV_PFX_CPTX_CFG(1) :
+ RVU_PRIV_HWVFX_CPTX_CFG(1);
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, reg | (devnum << 16));
+ if (cfg)
+ blkaddr = BLKADDR_CPT1;
}
exit:
@@ -306,31 +329,36 @@ static void rvu_update_rsrc_map(struct rvu *rvu, struct rvu_pfvf *pfvf,
block->fn_map[lf] = attach ? pcifunc : 0;
- switch (block->type) {
- case BLKTYPE_NPA:
+ switch (block->addr) {
+ case BLKADDR_NPA:
pfvf->npalf = attach ? true : false;
num_lfs = pfvf->npalf;
break;
- case BLKTYPE_NIX:
+ case BLKADDR_NIX0:
+ case BLKADDR_NIX1:
pfvf->nixlf = attach ? true : false;
num_lfs = pfvf->nixlf;
break;
- case BLKTYPE_SSO:
+ case BLKADDR_SSO:
attach ? pfvf->sso++ : pfvf->sso--;
num_lfs = pfvf->sso;
break;
- case BLKTYPE_SSOW:
+ case BLKADDR_SSOW:
attach ? pfvf->ssow++ : pfvf->ssow--;
num_lfs = pfvf->ssow;
break;
- case BLKTYPE_TIM:
+ case BLKADDR_TIM:
attach ? pfvf->timlfs++ : pfvf->timlfs--;
num_lfs = pfvf->timlfs;
break;
- case BLKTYPE_CPT:
+ case BLKADDR_CPT0:
attach ? pfvf->cptlfs++ : pfvf->cptlfs--;
num_lfs = pfvf->cptlfs;
break;
+ case BLKADDR_CPT1:
+ attach ? pfvf->cpt1_lfs++ : pfvf->cpt1_lfs--;
+ num_lfs = pfvf->cpt1_lfs;
+ break;
}
reg = is_pf ? block->pf_lfcnt_reg : block->vf_lfcnt_reg;
@@ -466,12 +494,16 @@ static void rvu_reset_all_blocks(struct rvu *rvu)
/* Do a HW reset of all RVU blocks */
rvu_block_reset(rvu, BLKADDR_NPA, NPA_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NIX0, NIX_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NIX1, NIX_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NPC, NPC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_SSO, SSO_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_TIM, TIM_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_CPT0, CPT_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_CPT1, CPT_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC_NIX0_RX, NDC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC_NIX0_TX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX1_RX, NDC_AF_BLK_RST);
+ rvu_block_reset(rvu, BLKADDR_NDC_NIX1_TX, NDC_AF_BLK_RST);
rvu_block_reset(rvu, BLKADDR_NDC_NPA0, NDC_AF_BLK_RST);
}
@@ -695,6 +727,10 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
u64 *mac;
for (pf = 0; pf < hw->total_pfs; pf++) {
+ /* For PF0(AF), Assign MAC address to only VFs (LBKVFs) */
+ if (!pf)
+ goto lbkvf;
+
if (!is_pf_cgxmapped(rvu, pf))
continue;
/* Assign MAC address to PF */
@@ -708,8 +744,10 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
- /* Assign MAC address to VFs */
+lbkvf:
+ /* Assign MAC address to VFs*/
rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
for (vf = 0; vf < numvfs; vf++, hwvf++) {
pfvf = &rvu->hwvf[hwvf];
@@ -722,6 +760,7 @@ static void rvu_setup_pfvf_macaddress(struct rvu *rvu)
} else {
eth_random_addr(pfvf->mac_addr);
}
+ ether_addr_copy(pfvf->default_mac, pfvf->mac_addr);
}
}
}
@@ -757,6 +796,62 @@ static void rvu_fwdata_exit(struct rvu *rvu)
iounmap(rvu->fwdata);
}
+static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* Init NIX LF's bitmap */
+ block = &hw->block[blkaddr];
+ if (!block->implemented)
+ return 0;
+ blkid = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ block->lf.max = cfg & 0xFFF;
+ block->addr = blkaddr;
+ block->type = BLKTYPE_NIX;
+ block->lfshift = 8;
+ block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_NIXX_CFG(blkid);
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIXX_CFG(blkid);
+ block->lfcfg_reg = NIX_PRIV_LFX_CFG;
+ block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = NIX_AF_LF_RST;
+ sprintf(block->name, "NIX%d", blkid);
+ rvu->nix_blkaddr[blkid] = blkaddr;
+ return rvu_alloc_bitmap(&block->lf);
+}
+
+static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkid;
+ u64 cfg;
+
+ /* Init CPT LF's bitmap */
+ block = &hw->block[blkaddr];
+ if (!block->implemented)
+ return 0;
+ blkid = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
+ cfg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
+ block->lf.max = cfg & 0xFF;
+ block->addr = blkaddr;
+ block->type = BLKTYPE_CPT;
+ block->multislot = true;
+ block->lfshift = 3;
+ block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
+ block->pf_lfcnt_reg = RVU_PRIV_PFX_CPTX_CFG(blkid);
+ block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPTX_CFG(blkid);
+ block->lfcfg_reg = CPT_PRIV_LFX_CFG;
+ block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
+ block->lfreset_reg = CPT_AF_LF_RST;
+ sprintf(block->name, "CPT%d", blkid);
+ return rvu_alloc_bitmap(&block->lf);
+}
+
static int rvu_setup_hw_resources(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -791,27 +886,13 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
return err;
nix:
- /* Init NIX LF's bitmap */
- block = &hw->block[BLKADDR_NIX0];
- if (!block->implemented)
- goto sso;
- cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
- block->lf.max = cfg & 0xFFF;
- block->addr = BLKADDR_NIX0;
- block->type = BLKTYPE_NIX;
- block->lfshift = 8;
- block->lookup_reg = NIX_AF_RVU_LF_CFG_DEBUG;
- block->pf_lfcnt_reg = RVU_PRIV_PFX_NIX0_CFG;
- block->vf_lfcnt_reg = RVU_PRIV_HWVFX_NIX0_CFG;
- block->lfcfg_reg = NIX_PRIV_LFX_CFG;
- block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
- block->lfreset_reg = NIX_AF_LF_RST;
- sprintf(block->name, "NIX");
- err = rvu_alloc_bitmap(&block->lf);
+ err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0);
+ if (err)
+ return err;
+ err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1);
if (err)
return err;
-sso:
/* Init SSO group's bitmap */
block = &hw->block[BLKADDR_SSO];
if (!block->implemented)
@@ -877,28 +958,13 @@ tim:
return err;
cpt:
- /* Init CPT LF's bitmap */
- block = &hw->block[BLKADDR_CPT0];
- if (!block->implemented)
- goto init;
- cfg = rvu_read64(rvu, BLKADDR_CPT0, CPT_AF_CONSTANTS0);
- block->lf.max = cfg & 0xFF;
- block->addr = BLKADDR_CPT0;
- block->type = BLKTYPE_CPT;
- block->multislot = true;
- block->lfshift = 3;
- block->lookup_reg = CPT_AF_RVU_LF_CFG_DEBUG;
- block->pf_lfcnt_reg = RVU_PRIV_PFX_CPT0_CFG;
- block->vf_lfcnt_reg = RVU_PRIV_HWVFX_CPT0_CFG;
- block->lfcfg_reg = CPT_PRIV_LFX_CFG;
- block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
- block->lfreset_reg = CPT_AF_LF_RST;
- sprintf(block->name, "CPT");
- err = rvu_alloc_bitmap(&block->lf);
+ err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0);
+ if (err)
+ return err;
+ err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1);
if (err)
return err;
-init:
/* Allocate memory for PFVF data */
rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs,
sizeof(struct rvu_pfvf), GFP_KERNEL);
@@ -1025,7 +1091,30 @@ int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
/* Get current count of a RVU block's LF/slots
* provisioned to a given RVU func.
*/
-static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
+u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr)
+{
+ switch (blkaddr) {
+ case BLKADDR_NPA:
+ return pfvf->npalf ? 1 : 0;
+ case BLKADDR_NIX0:
+ case BLKADDR_NIX1:
+ return pfvf->nixlf ? 1 : 0;
+ case BLKADDR_SSO:
+ return pfvf->sso;
+ case BLKADDR_SSOW:
+ return pfvf->ssow;
+ case BLKADDR_TIM:
+ return pfvf->timlfs;
+ case BLKADDR_CPT0:
+ return pfvf->cptlfs;
+ case BLKADDR_CPT1:
+ return pfvf->cpt1_lfs;
+ }
+ return 0;
+}
+
+/* Return true if LFs of block type are attached to pcifunc */
+static bool is_blktype_attached(struct rvu_pfvf *pfvf, int blktype)
{
switch (blktype) {
case BLKTYPE_NPA:
@@ -1033,15 +1122,16 @@ static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
case BLKTYPE_NIX:
return pfvf->nixlf ? 1 : 0;
case BLKTYPE_SSO:
- return pfvf->sso;
+ return !!pfvf->sso;
case BLKTYPE_SSOW:
- return pfvf->ssow;
+ return !!pfvf->ssow;
case BLKTYPE_TIM:
- return pfvf->timlfs;
+ return !!pfvf->timlfs;
case BLKTYPE_CPT:
- return pfvf->cptlfs;
+ return pfvf->cptlfs || pfvf->cpt1_lfs;
}
- return 0;
+
+ return false;
}
bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
@@ -1054,7 +1144,7 @@ bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
pfvf = rvu_get_pfvf(rvu, pcifunc);
/* Check if this PFFUNC has a LF of type blktype attached */
- if (!rvu_get_rsrc_mapcount(pfvf, blktype))
+ if (!is_blktype_attached(pfvf, blktype))
return false;
return true;
@@ -1093,9 +1183,12 @@ static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
if (blkaddr < 0)
return;
+ if (blktype == BLKTYPE_NIX)
+ rvu_nix_reset_mac(pfvf, pcifunc);
+
block = &hw->block[blkaddr];
- num_lfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
if (!num_lfs)
return;
@@ -1146,6 +1239,8 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
continue;
else if ((blkid == BLKADDR_NIX0) && !detach->nixlf)
continue;
+ else if ((blkid == BLKADDR_NIX1) && !detach->nixlf)
+ continue;
else if ((blkid == BLKADDR_SSO) && !detach->sso)
continue;
else if ((blkid == BLKADDR_SSOW) && !detach->ssow)
@@ -1154,6 +1249,8 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
continue;
else if ((blkid == BLKADDR_CPT0) && !detach->cptlfs)
continue;
+ else if ((blkid == BLKADDR_CPT1) && !detach->cptlfs)
+ continue;
}
rvu_detach_block(rvu, pcifunc, block->type);
}
@@ -1169,8 +1266,73 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu,
return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc);
}
-static void rvu_attach_block(struct rvu *rvu, int pcifunc,
- int blktype, int num_lfs)
+static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr = BLKADDR_NIX0, vf;
+ struct rvu_pfvf *pf;
+
+ /* All CGX mapped PFs are set with assigned NIX block during init */
+ if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ blkaddr = pf->nix_blkaddr;
+ } else if (is_afvf(pcifunc)) {
+ vf = pcifunc - 1;
+ /* Assign NIX based on VF number. All even numbered VFs get
+ * NIX0 and odd numbered gets NIX1
+ */
+ blkaddr = (vf & 1) ? BLKADDR_NIX1 : BLKADDR_NIX0;
+ /* NIX1 is not present on all silicons */
+ if (!is_block_implemented(rvu->hw, BLKADDR_NIX1))
+ blkaddr = BLKADDR_NIX0;
+ }
+
+ switch (blkaddr) {
+ case BLKADDR_NIX1:
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+ pfvf->nix_rx_intf = NIX_INTFX_RX(1);
+ pfvf->nix_tx_intf = NIX_INTFX_TX(1);
+ break;
+ case BLKADDR_NIX0:
+ default:
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+ pfvf->nix_rx_intf = NIX_INTFX_RX(0);
+ pfvf->nix_tx_intf = NIX_INTFX_TX(0);
+ break;
+ }
+
+ return pfvf->nix_blkaddr;
+}
+
+static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
+ u16 pcifunc, struct rsrc_attach *attach)
+{
+ int blkaddr;
+
+ switch (blktype) {
+ case BLKTYPE_NIX:
+ blkaddr = rvu_get_nix_blkaddr(rvu, pcifunc);
+ break;
+ case BLKTYPE_CPT:
+ if (attach->hdr.ver < RVU_MULTI_BLK_VER)
+ return rvu_get_blkaddr(rvu, blktype, 0);
+ blkaddr = attach->cpt_blkaddr ? attach->cpt_blkaddr :
+ BLKADDR_CPT0;
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -ENODEV;
+ break;
+ default:
+ return rvu_get_blkaddr(rvu, blktype, 0);
+ };
+
+ if (is_block_implemented(rvu->hw, blkaddr))
+ return blkaddr;
+
+ return -ENODEV;
+}
+
+static void rvu_attach_block(struct rvu *rvu, int pcifunc, int blktype,
+ int num_lfs, struct rsrc_attach *attach)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct rvu_hwinfo *hw = rvu->hw;
@@ -1182,7 +1344,7 @@ static void rvu_attach_block(struct rvu *rvu, int pcifunc,
if (!num_lfs)
return;
- blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
+ blkaddr = rvu_get_attach_blkaddr(rvu, blktype, pcifunc, attach);
if (blkaddr < 0)
return;
@@ -1211,12 +1373,12 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
struct rsrc_attach *req, u16 pcifunc)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int free_lfs, mappedlfs, blkaddr;
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
- int free_lfs, mappedlfs;
/* Only one NPA LF can be attached */
- if (req->npalf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NPA)) {
+ if (req->npalf && !is_blktype_attached(pfvf, BLKTYPE_NPA)) {
block = &hw->block[BLKADDR_NPA];
free_lfs = rvu_rsrc_free_count(&block->lf);
if (!free_lfs)
@@ -1229,8 +1391,12 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
}
/* Only one NIX LF can be attached */
- if (req->nixlf && !rvu_get_rsrc_mapcount(pfvf, BLKTYPE_NIX)) {
- block = &hw->block[BLKADDR_NIX0];
+ if (req->nixlf && !is_blktype_attached(pfvf, BLKTYPE_NIX)) {
+ blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_NIX,
+ pcifunc, req);
+ if (blkaddr < 0)
+ return blkaddr;
+ block = &hw->block[blkaddr];
free_lfs = rvu_rsrc_free_count(&block->lf);
if (!free_lfs)
goto fail;
@@ -1250,7 +1416,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
pcifunc, req->sso, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
/* Check if additional resources are available */
if (req->sso > mappedlfs &&
@@ -1266,7 +1432,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
pcifunc, req->sso, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
if (req->ssow > mappedlfs &&
((req->ssow - mappedlfs) > free_lfs))
@@ -1281,7 +1447,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
pcifunc, req->timlfs, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
if (req->timlfs > mappedlfs &&
((req->timlfs - mappedlfs) > free_lfs))
@@ -1289,14 +1455,18 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
}
if (req->cptlfs) {
- block = &hw->block[BLKADDR_CPT0];
+ blkaddr = rvu_get_attach_blkaddr(rvu, BLKTYPE_CPT,
+ pcifunc, req);
+ if (blkaddr < 0)
+ return blkaddr;
+ block = &hw->block[blkaddr];
if (req->cptlfs > block->lf.max) {
dev_err(&rvu->pdev->dev,
"Func 0x%x: Invalid CPTLF req, %d > max %d\n",
pcifunc, req->cptlfs, block->lf.max);
return -EINVAL;
}
- mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->type);
+ mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
free_lfs = rvu_rsrc_free_count(&block->lf);
if (req->cptlfs > mappedlfs &&
((req->cptlfs - mappedlfs) > free_lfs))
@@ -1310,6 +1480,22 @@ fail:
return -ENOSPC;
}
+static bool rvu_attach_from_same_block(struct rvu *rvu, int blktype,
+ struct rsrc_attach *attach)
+{
+ int blkaddr, num_lfs;
+
+ blkaddr = rvu_get_attach_blkaddr(rvu, blktype,
+ attach->hdr.pcifunc, attach);
+ if (blkaddr < 0)
+ return false;
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, attach->hdr.pcifunc),
+ blkaddr);
+ /* Requester already has LFs from given block ? */
+ return !!num_lfs;
+}
+
int rvu_mbox_handler_attach_resources(struct rvu *rvu,
struct rsrc_attach *attach,
struct msg_rsp *rsp)
@@ -1330,10 +1516,10 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
/* Now attach the requested resources */
if (attach->npalf)
- rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NPA, 1, attach);
if (attach->nixlf)
- rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_NIX, 1, attach);
if (attach->sso) {
/* RVU func doesn't know which exact LF or slot is attached
@@ -1343,25 +1529,30 @@ int rvu_mbox_handler_attach_resources(struct rvu *rvu,
*/
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSO);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO, attach->sso);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSO,
+ attach->sso, attach);
}
if (attach->ssow) {
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_SSOW);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW, attach->ssow);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_SSOW,
+ attach->ssow, attach);
}
if (attach->timlfs) {
if (attach->modify)
rvu_detach_block(rvu, pcifunc, BLKTYPE_TIM);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM, attach->timlfs);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_TIM,
+ attach->timlfs, attach);
}
if (attach->cptlfs) {
- if (attach->modify)
+ if (attach->modify &&
+ rvu_attach_from_same_block(rvu, BLKTYPE_CPT, attach))
rvu_detach_block(rvu, pcifunc, BLKTYPE_CPT);
- rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT, attach->cptlfs);
+ rvu_attach_block(rvu, pcifunc, BLKTYPE_CPT,
+ attach->cptlfs, attach);
}
exit:
@@ -1439,7 +1630,7 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_pfvf *pfvf;
- int lf, slot;
+ int lf, slot, blkaddr;
pfvf = rvu_get_pfvf(rvu, pcifunc);
if (!pfvf->msix.bmap)
@@ -1449,8 +1640,14 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NPA], pcifunc, 0);
rsp->npa_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NPA, lf);
- lf = rvu_get_lf(rvu, &hw->block[BLKADDR_NIX0], pcifunc, 0);
- rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, BLKADDR_NIX0, lf);
+ /* Get BLKADDR from which LFs are attached to pcifunc */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0) {
+ rsp->nix_msixoff = MSIX_VECTOR_INVALID;
+ } else {
+ lf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ rsp->nix_msixoff = rvu_get_msix_offset(rvu, pfvf, blkaddr, lf);
+ }
rsp->sso = pfvf->sso;
for (slot = 0; slot < rsp->sso; slot++) {
@@ -1479,6 +1676,14 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
rsp->cptlf_msixoff[slot] =
rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT0, lf);
}
+
+ rsp->cpt1_lfs = pfvf->cpt1_lfs;
+ for (slot = 0; slot < rsp->cpt1_lfs; slot++) {
+ lf = rvu_get_lf(rvu, &hw->block[BLKADDR_CPT1], pcifunc, slot);
+ rsp->cpt1_lf_msixoff[slot] =
+ rvu_get_msix_offset(rvu, pfvf, BLKADDR_CPT1, lf);
+ }
+
return 0;
}
@@ -1932,7 +2137,7 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
block = &rvu->hw->block[blkaddr];
num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
- block->type);
+ block->addr);
if (!num_lfs)
return;
for (slot = 0; slot < num_lfs; slot++) {
@@ -1941,7 +2146,7 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
continue;
/* Cleanup LF and reset it */
- if (block->addr == BLKADDR_NIX0)
+ if (block->addr == BLKADDR_NIX0 || block->addr == BLKADDR_NIX1)
rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
else if (block->addr == BLKADDR_NPA)
rvu_npa_lf_teardown(rvu, pcifunc, lf);
@@ -1963,7 +2168,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* 3. Cleanup pools (NPA)
*/
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT1);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
@@ -2445,7 +2652,7 @@ static void rvu_enable_afvf_intr(struct rvu *rvu)
#define PCI_DEVID_OCTEONTX2_LBK 0xA061
-static int lbk_get_num_chans(void)
+int rvu_get_num_lbk_chans(void)
{
struct pci_dev *pdev;
void __iomem *base;
@@ -2480,7 +2687,7 @@ static int rvu_enable_sriov(struct rvu *rvu)
return 0;
}
- chans = lbk_get_num_chans();
+ chans = rvu_get_num_lbk_chans();
if (chans < 0)
return chans;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 90eed3160915..b6c0977499ab 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -15,6 +15,7 @@
#include "rvu_struct.h"
#include "common.h"
#include "mbox.h"
+#include "npc.h"
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
@@ -28,6 +29,7 @@
#define PCI_MBOX_BAR_NUM 4
#define NAME_SIZE 32
+#define MAX_NIX_BLKS 2
/* PF_FUNC */
#define RVU_PFVF_PF_SHIFT 10
@@ -50,6 +52,7 @@ struct rvu_debugfs {
struct dentry *npa;
struct dentry *nix;
struct dentry *npc;
+ struct dentry *cpt;
struct dump_ctx npa_aura_ctx;
struct dump_ctx npa_pool_ctx;
struct dump_ctx nix_cq_ctx;
@@ -104,6 +107,36 @@ struct nix_mce_list {
int max;
};
+/* layer metadata to uniquely identify a packet header field */
+struct npc_layer_mdata {
+ u8 lid;
+ u8 ltype;
+ u8 hdr;
+ u8 key;
+ u8 len;
+};
+
+/* Structure to represent a field present in the
+ * generated key. A key field may present anywhere and can
+ * be of any size in the generated key. Once this structure
+ * is populated for fields of interest then field's presence
+ * and location (if present) can be known.
+ */
+struct npc_key_field {
+ /* Masks where all set bits indicate position
+ * of a field in the key
+ */
+ u64 kw_mask[NPC_MAX_KWS_IN_KEY];
+ /* Number of words in the key a field spans. If a field is
+ * of 16 bytes and key offset is 4 then the field will use
+ * 4 bytes in KW0, 8 bytes in KW1 and 4 bytes in KW2 and
+ * nr_kws will be 3(KW0, KW1 and KW2).
+ */
+ int nr_kws;
+ /* used by packet header fields */
+ struct npc_layer_mdata layer_mdata;
+};
+
struct npc_mcam {
struct rsrc_bmap counters;
struct mutex lock; /* MCAM entries and counters update lock */
@@ -115,6 +148,7 @@ struct npc_mcam {
u16 *entry2cntr_map;
u16 *cntr2pfvf_map;
u16 *cntr_refcnt;
+ u16 *entry2target_pffunc;
u8 keysize; /* MCAM keysize 112/224/448 bits */
u8 banks; /* Number of MCAM banks */
u8 banks_per_entry;/* Number of keywords in key */
@@ -127,6 +161,12 @@ struct npc_mcam {
u16 hprio_count;
u16 hprio_end;
u16 rx_miss_act_cntr; /* Counter for RX MISS action */
+ /* fields present in the generated key */
+ struct npc_key_field tx_key_fields[NPC_KEY_FIELDS_MAX];
+ struct npc_key_field rx_key_fields[NPC_KEY_FIELDS_MAX];
+ u64 tx_features;
+ u64 rx_features;
+ struct list_head mcam_rules;
};
/* Structure for per RVU func info ie PF/VF */
@@ -137,6 +177,7 @@ struct rvu_pfvf {
u16 ssow;
u16 cptlfs;
u16 timlfs;
+ u16 cpt1_lfs;
u8 cgx_lmac;
/* Block LF's MSIX vector info */
@@ -169,19 +210,22 @@ struct rvu_pfvf {
u16 maxlen;
u16 minlen;
+ u8 pf_set_vf_cfg;
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
+ u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
/* Broadcast pkt replication info */
u16 bcast_mce_idx;
struct nix_mce_list bcast_mce_list;
- /* VLAN offload */
- struct mcam_entry entry;
- int rxvlan_index;
- bool rxvlan;
+ struct rvu_npc_mcam_rule *def_ucast_rule;
bool cgx_in_use; /* this PF/VF using CGX? */
int cgx_users; /* number of cgx users - used only by PFs */
+
+ u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
+ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
+ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
};
struct nix_txsch {
@@ -218,12 +262,22 @@ struct nix_lso {
u8 in_use;
};
+struct nix_txvlan {
+#define NIX_TX_VTAG_DEF_MAX 0x400
+ struct rsrc_bmap rsrc;
+ u16 *entry2pfvf_map;
+ struct mutex rsrc_lock; /* Serialize resource alloc/free */
+};
+
struct nix_hw {
+ int blkaddr;
+ struct rvu *rvu;
struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
struct nix_mcast mcast;
struct nix_flowkey flowkey;
struct nix_mark_format mark_format;
struct nix_lso lso;
+ struct nix_txvlan txvlan;
};
/* RVU block's capabilities or functionality,
@@ -251,10 +305,16 @@ struct rvu_hwinfo {
u8 lbk_links;
u8 sdp_links;
u8 npc_kpus; /* No of parser units */
+ u8 npc_pkinds; /* No of port kinds */
+ u8 npc_intfs; /* No of interfaces */
+ u8 npc_kpu_entries; /* No of KPU entries */
+ u16 npc_counters; /* No of match stats counters */
+ bool npc_ext_set; /* Extended register set */
struct hw_cap cap;
struct rvu_block block[BLK_COUNT]; /* Block info */
- struct nix_hw *nix0;
+ struct nix_hw *nix;
+ struct rvu *rvu;
struct npc_pkind pkind;
struct npc_mcam mcam;
};
@@ -300,7 +360,7 @@ struct npc_kpu_profile_adapter {
const struct npc_lt_def_cfg *lt_def;
const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
const struct npc_kpu_profile *kpu; /* array[kpus] */
- const struct npc_mcam_kex *mkex;
+ struct npc_mcam_kex *mkex;
size_t pkinds;
size_t kpus;
};
@@ -315,6 +375,7 @@ struct rvu {
struct rvu_pfvf *hwvf;
struct mutex rsrc_lock; /* Serialize resource alloc/free */
int vfs; /* Number of VFs attached to RVU */
+ int nix_blkaddr[MAX_NIX_BLKS];
/* Mbox */
struct mbox_wq_info afpf_wq_info;
@@ -420,6 +481,7 @@ void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
+u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
@@ -429,6 +491,7 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
+int rvu_get_num_lbk_chans(void);
/* RVU HW reg validation */
enum regmap_block {
@@ -485,6 +548,9 @@ int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
+struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr);
+int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr);
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
@@ -501,8 +567,8 @@ void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable);
-int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
@@ -513,6 +579,24 @@ void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
void rvu_npc_get_mcam_counter_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
+bool is_npc_intf_tx(u8 intf);
+bool is_npc_intf_rx(u8 intf);
+bool is_npc_interface_valid(struct rvu *rvu, u8 intf);
+int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena);
+int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel);
+int npc_flow_steering_init(struct rvu *rvu, int blkaddr);
+const char *npc_get_field_name(u8 hdr);
+bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf,
+ u16 pcifunc, u8 intf, struct mcam_entry *entry,
+ int *entry_index);
+int npc_get_bank(struct npc_mcam *mcam, int index);
+void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
+void npc_mcam_disable_flows(struct rvu *rvu, u16 target);
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable);
+void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src, struct mcam_entry *entry,
+ u8 *intf, u8 *ena);
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index fa9152ff5e2a..d298b9357177 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -74,6 +74,20 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
return rvu->cgx_idmap[cgx_id];
}
+/* Based on P2X connectivity find mapped NIX block for a PF */
+static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
+ int cgx_id, int lmac_id)
+{
+ struct rvu_pfvf *pfvf = &rvu->pf[pf];
+ u8 p2x;
+
+ p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
+ /* Firmware sets P2X_SELECT as either NIX0 or NIX1 */
+ pfvf->nix_blkaddr = BLKADDR_NIX0;
+ if (p2x == CMR_P2X_SEL_NIX1)
+ pfvf->nix_blkaddr = BLKADDR_NIX1;
+}
+
static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
@@ -117,6 +131,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
+ rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
rvu->cgx_mapped_pfs++;
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
new file mode 100644
index 000000000000..35261d52c997
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Marvell. */
+
+#include <linux/pci.h>
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "mbox.h"
+#include "rvu.h"
+
+/* CPT PF device id */
+#define PCI_DEVID_OTX2_CPT_PF 0xA0FD
+
+static int get_cpt_pf_num(struct rvu *rvu)
+{
+ int i, domain_nr, cpt_pf_num = -1;
+ struct pci_dev *pdev;
+
+ domain_nr = pci_domain_nr(rvu->pdev->bus);
+ for (i = 0; i < rvu->hw->total_pfs; i++) {
+ pdev = pci_get_domain_bus_and_slot(domain_nr, i + 1, 0);
+ if (!pdev)
+ continue;
+
+ if (pdev->device == PCI_DEVID_OTX2_CPT_PF) {
+ cpt_pf_num = i;
+ put_device(&pdev->dev);
+ break;
+ }
+ put_device(&pdev->dev);
+ }
+ return cpt_pf_num;
+}
+
+static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
+{
+ int cpt_pf_num = get_cpt_pf_num(rvu);
+
+ if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ return false;
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return false;
+
+ return true;
+}
+
+static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
+{
+ int cpt_pf_num = get_cpt_pf_num(rvu);
+
+ if (rvu_get_pf(pcifunc) != cpt_pf_num)
+ return false;
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return false;
+
+ return true;
+}
+
+int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
+ struct cpt_lf_alloc_req_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr;
+ int num_lfs, slot;
+ u64 val;
+
+ if (req->eng_grpmsk == 0x0)
+ return CPT_AF_ERR_GRP_INVALID;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
+ if (req->nix_pf_func) {
+ /* If default, use 'this' CPTLF's PFFUNC */
+ if (req->nix_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->nix_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->nix_pf_func, BLKTYPE_NIX))
+ return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
+ }
+
+ /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
+ if (req->sso_pf_func) {
+ /* If default, use 'this' CPTLF's PFFUNC */
+ if (req->sso_pf_func == RVU_DEFAULT_PF_FUNC)
+ req->sso_pf_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->sso_pf_func, BLKTYPE_SSO))
+ return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
+ }
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Set CPT LF group and priority */
+ val = (u64)req->eng_grpmsk << 48 | 1;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC */
+ val = (u64)req->nix_pf_func << 48 |
+ (u64)req->sso_pf_func << 32;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr;
+ int num_lfs, slot;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->addr);
+ if (!num_lfs)
+ return CPT_AF_ERR_LF_INVALID;
+
+ for (slot = 0; slot < num_lfs; slot++) {
+ cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ /* Reset CPT LF group and priority */
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), 0x0);
+ /* Reset CPT LF NIX_PF_FUNC and SSO_PF_FUNC */
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), 0x0);
+ }
+
+ return 0;
+}
+
+static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
+{
+ u64 offset = req->reg_offset;
+ int blkaddr, num_lfs, lf;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+
+ /* Registers that can be accessed from PF/VF */
+ if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) ||
+ (offset & 0xFF000) == CPT_AF_LFX_CTL2(0)) {
+ if (offset & 7)
+ return false;
+
+ lf = (offset & 0xFFF) >> 3;
+ block = &rvu->hw->block[blkaddr];
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ num_lfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
+ if (lf >= num_lfs)
+ /* Slot is not valid for that PF/VF */
+ return false;
+
+ /* Translate local LF used by VFs to global CPT LF */
+ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr],
+ req->hdr.pcifunc, lf);
+ if (lf < 0)
+ return false;
+
+ return true;
+ } else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
+ /* Registers that can be accessed from PF */
+ switch (offset) {
+ case CPT_AF_CTL:
+ case CPT_AF_PF_FUNC:
+ case CPT_AF_BLK_RST:
+ case CPT_AF_CONSTANTS1:
+ return true;
+ }
+
+ switch (offset & 0xFF000) {
+ case CPT_AF_EXEX_STS(0):
+ case CPT_AF_EXEX_CTL(0):
+ case CPT_AF_EXEX_CTL2(0):
+ case CPT_AF_EXEX_UCODE_BASE(0):
+ if (offset & 7)
+ return false;
+ break;
+ default:
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+
+int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
+ struct cpt_rd_wr_reg_msg *req,
+ struct cpt_rd_wr_reg_msg *rsp)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* This message is accepted only if sent from CPT PF/VF */
+ if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
+ !is_cpt_vf(rvu, req->hdr.pcifunc))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ rsp->reg_offset = req->reg_offset;
+ rsp->ret_val = req->ret_val;
+ rsp->is_write = req->is_write;
+
+ if (!is_valid_offset(rvu, req))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
+ if (req->is_write)
+ rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
+ else
+ rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 77adad4adb1b..d27543c1a166 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -109,6 +109,12 @@ static char *cgx_tx_stats_fields[] = {
[CGX_STAT17] = "Control/PAUSE packets sent",
};
+enum cpt_eng_type {
+ CPT_AE_TYPE = 1,
+ CPT_SE_TYPE = 2,
+ CPT_IE_TYPE = 3,
+};
+
#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
blk_addr, NDC_AF_CONST) & 0xFF)
@@ -224,18 +230,53 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
-static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blktype, int lf,
+static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct pci_dev *pdev = NULL;
+ char cgx[10], lmac[10];
+ struct rvu_pfvf *pfvf;
+ int pf, domain, blkid;
+ u8 cgx_id, lmac_id;
+ u16 pcifunc;
+
+ domain = 2;
+ seq_puts(filp, "PCI dev\t\tRVU PF Func\tNIX block\tCGX\tLMAC\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (!is_pf_cgxmapped(rvu, pf))
+ continue;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ continue;
+
+ cgx[0] = 0;
+ lmac[0] = 0;
+ pcifunc = pf << 10;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ if (pfvf->nix_blkaddr == BLKADDR_NIX0)
+ blkid = 0;
+ else
+ blkid = 1;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
+ &lmac_id);
+ sprintf(cgx, "CGX%d", cgx_id);
+ sprintf(lmac, "LMAC%d", lmac_id);
+ seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
+ dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(rvu_pf_cgx_map, rvu_pf_cgx_map_display, NULL);
+
+static bool rvu_dbg_is_valid_lf(struct rvu *rvu, int blkaddr, int lf,
u16 *pcifunc)
{
struct rvu_block *block;
struct rvu_hwinfo *hw;
- int blkaddr;
-
- blkaddr = rvu_get_blkaddr(rvu, blktype, 0);
- if (blkaddr < 0) {
- dev_warn(rvu->dev, "Invalid blktype\n");
- return false;
- }
hw = rvu->hw;
block = &hw->block[blkaddr];
@@ -291,10 +332,12 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
{
void (*print_qsize)(struct seq_file *filp,
struct rvu_pfvf *pfvf) = NULL;
+ struct dentry *current_dir;
struct rvu_pfvf *pfvf;
struct rvu *rvu;
int qsize_id;
u16 pcifunc;
+ int blkaddr;
rvu = filp->private;
switch (blktype) {
@@ -312,7 +355,15 @@ static int rvu_dbg_qsize_display(struct seq_file *filp, void *unsused,
return -EINVAL;
}
- if (!rvu_dbg_is_valid_lf(rvu, blktype, qsize_id, &pcifunc))
+ if (blktype == BLKTYPE_NPA) {
+ blkaddr = BLKADDR_NPA;
+ } else {
+ current_dir = filp->file->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
+ BLKADDR_NIX1 : BLKADDR_NIX0);
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blkaddr, qsize_id, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -329,6 +380,8 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
struct seq_file *seqfile = filp->private_data;
char *cmd_buf, *cmd_buf_tmp, *subtoken;
struct rvu *rvu = seqfile->private;
+ struct dentry *current_dir;
+ int blkaddr;
u16 pcifunc;
int ret, lf;
@@ -355,7 +408,15 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
goto qsize_write_done;
}
- if (!rvu_dbg_is_valid_lf(rvu, blktype, lf, &pcifunc)) {
+ if (blktype == BLKTYPE_NPA) {
+ blkaddr = BLKADDR_NPA;
+ } else {
+ current_dir = filp->f_path.dentry->d_parent;
+ blkaddr = (!strcmp(current_dir->d_name.name, "nix1") ?
+ BLKADDR_NIX1 : BLKADDR_NIX0);
+ }
+
+ if (!rvu_dbg_is_valid_lf(rvu, blkaddr, lf, &pcifunc)) {
ret = -EINVAL;
goto qsize_write_done;
}
@@ -498,7 +559,7 @@ static int rvu_dbg_npa_ctx_display(struct seq_file *m, void *unused, int ctype)
return -EINVAL;
}
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -556,7 +617,7 @@ static int write_npa_ctx(struct rvu *rvu, bool all,
int max_id = 0;
u16 pcifunc;
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NPA, npalf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, BLKADDR_NPA, npalf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -704,9 +765,17 @@ static void ndc_cache_stats(struct seq_file *s, int blk_addr,
int ctype, int transaction)
{
u64 req, out_req, lat, cant_alloc;
- struct rvu *rvu = s->private;
+ struct nix_hw *nix_hw;
+ struct rvu *rvu;
int port;
+ if (blk_addr == BLKADDR_NDC_NPA0) {
+ rvu = s->private;
+ } else {
+ nix_hw = s->private;
+ rvu = nix_hw->rvu;
+ }
+
for (port = 0; port < NDC_MAX_PORT; port++) {
req = rvu_read64(rvu, blk_addr, NDC_AF_PORTX_RTX_RWX_REQ_PC
(port, ctype, transaction));
@@ -749,9 +818,17 @@ RVU_DEBUG_SEQ_FOPS(npa_ndc_cache, npa_ndc_cache_display, NULL);
static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
{
- struct rvu *rvu = s->private;
+ struct nix_hw *nix_hw;
+ struct rvu *rvu;
int bank, max_bank;
+ if (blk_addr == BLKADDR_NDC_NPA0) {
+ rvu = s->private;
+ } else {
+ nix_hw = s->private;
+ rvu = nix_hw->rvu;
+ }
+
max_bank = NDC_MAX_BANK(rvu, blk_addr);
for (bank = 0; bank < max_bank; bank++) {
seq_printf(s, "BANK:%d\n", bank);
@@ -767,16 +844,30 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
static int rvu_dbg_nix_ndc_rx_cache_display(struct seq_file *filp, void *unused)
{
- return ndc_blk_cache_stats(filp, NIX0_RX,
- BLKADDR_NDC_NIX0_RX);
+ struct nix_hw *nix_hw = filp->private;
+ int blkaddr = 0;
+ int ndc_idx = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
+ ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_RX : NIX0_RX);
+
+ return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_cache, nix_ndc_rx_cache_display, NULL);
static int rvu_dbg_nix_ndc_tx_cache_display(struct seq_file *filp, void *unused)
{
- return ndc_blk_cache_stats(filp, NIX0_TX,
- BLKADDR_NDC_NIX0_TX);
+ struct nix_hw *nix_hw = filp->private;
+ int blkaddr = 0;
+ int ndc_idx = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
+ ndc_idx = (nix_hw->blkaddr == BLKADDR_NIX1 ? NIX1_TX : NIX0_TX);
+
+ return ndc_blk_cache_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_cache, nix_ndc_tx_cache_display, NULL);
@@ -792,8 +883,14 @@ RVU_DEBUG_SEQ_FOPS(npa_ndc_hits_miss, npa_ndc_hits_miss_display, NULL);
static int rvu_dbg_nix_ndc_rx_hits_miss_display(struct seq_file *filp,
void *unused)
{
- return ndc_blk_hits_miss_stats(filp,
- NPA0_U, BLKADDR_NDC_NIX0_RX);
+ struct nix_hw *nix_hw = filp->private;
+ int ndc_idx = NPA0_U;
+ int blkaddr = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_RX : BLKADDR_NDC_NIX0_RX);
+
+ return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
@@ -801,8 +898,14 @@ RVU_DEBUG_SEQ_FOPS(nix_ndc_rx_hits_miss, nix_ndc_rx_hits_miss_display, NULL);
static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
void *unused)
{
- return ndc_blk_hits_miss_stats(filp,
- NPA0_U, BLKADDR_NDC_NIX0_TX);
+ struct nix_hw *nix_hw = filp->private;
+ int ndc_idx = NPA0_U;
+ int blkaddr = 0;
+
+ blkaddr = (nix_hw->blkaddr == BLKADDR_NIX1 ?
+ BLKADDR_NDC_NIX1_TX : BLKADDR_NDC_NIX0_TX);
+
+ return ndc_blk_hits_miss_stats(filp, ndc_idx, blkaddr);
}
RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
@@ -969,7 +1072,8 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
{
void (*print_nix_ctx)(struct seq_file *filp,
struct nix_aq_enq_rsp *rsp) = NULL;
- struct rvu *rvu = filp->private;
+ struct nix_hw *nix_hw = filp->private;
+ struct rvu *rvu = nix_hw->rvu;
struct nix_aq_enq_req aq_req;
struct nix_aq_enq_rsp rsp;
char *ctype_string = NULL;
@@ -1001,7 +1105,7 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
return -EINVAL;
}
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -1053,13 +1157,15 @@ static int rvu_dbg_nix_queue_ctx_display(struct seq_file *filp,
}
static int write_nix_queue_ctx(struct rvu *rvu, bool all, int nixlf,
- int id, int ctype, char *ctype_string)
+ int id, int ctype, char *ctype_string,
+ struct seq_file *m)
{
+ struct nix_hw *nix_hw = m->private;
struct rvu_pfvf *pfvf;
int max_id = 0;
u16 pcifunc;
- if (!rvu_dbg_is_valid_lf(rvu, BLKTYPE_NIX, nixlf, &pcifunc))
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
return -EINVAL;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -1119,7 +1225,8 @@ static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
int ctype)
{
struct seq_file *m = filp->private_data;
- struct rvu *rvu = m->private;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
char *cmd_buf, *ctype_string;
int nixlf, id = 0, ret;
bool all = false;
@@ -1155,7 +1262,7 @@ static ssize_t rvu_dbg_nix_queue_ctx_write(struct file *filp,
goto done;
} else {
ret = write_nix_queue_ctx(rvu, all, nixlf, id, ctype,
- ctype_string);
+ ctype_string, m);
}
done:
kfree(cmd_buf);
@@ -1259,102 +1366,54 @@ static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
-static void rvu_dbg_nix_init(struct rvu *rvu)
+static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ struct nix_hw *nix_hw;
- rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
- if (!rvu->rvu_dbg.nix) {
- dev_err(rvu->dev, "create debugfs dir failed for nix\n");
+ if (!is_block_implemented(rvu->hw, blkaddr))
return;
- }
-
- pfile = debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_sq_ctx_fops);
- if (!pfile)
- goto create_failed;
-
- pfile = debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_rq_ctx_fops);
- if (!pfile)
- goto create_failed;
-
- pfile = debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_cq_ctx_fops);
- if (!pfile)
- goto create_failed;
- pfile = debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_ndc_tx_cache_fops);
- if (!pfile)
- goto create_failed;
-
- pfile = debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_ndc_rx_cache_fops);
- if (!pfile)
- goto create_failed;
-
- pfile = debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix,
- rvu, &rvu_dbg_nix_ndc_tx_hits_miss_fops);
- if (!pfile)
- goto create_failed;
-
- pfile = debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix,
- rvu, &rvu_dbg_nix_ndc_rx_hits_miss_fops);
- if (!pfile)
- goto create_failed;
-
- pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
- &rvu_dbg_nix_qsize_fops);
- if (!pfile)
- goto create_failed;
+ if (blkaddr == BLKADDR_NIX0) {
+ rvu->rvu_dbg.nix = debugfs_create_dir("nix", rvu->rvu_dbg.root);
+ nix_hw = &rvu->hw->nix[0];
+ } else {
+ rvu->rvu_dbg.nix = debugfs_create_dir("nix1",
+ rvu->rvu_dbg.root);
+ nix_hw = &rvu->hw->nix[1];
+ }
- return;
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for NIX\n");
- debugfs_remove_recursive(rvu->rvu_dbg.nix);
+ debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_sq_ctx_fops);
+ debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_rq_ctx_fops);
+ debugfs_create_file("cq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_cq_ctx_fops);
+ debugfs_create_file("ndc_tx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_tx_cache_fops);
+ debugfs_create_file("ndc_rx_cache", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_rx_cache_fops);
+ debugfs_create_file("ndc_tx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_tx_hits_miss_fops);
+ debugfs_create_file("ndc_rx_hits_miss", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_ndc_rx_hits_miss_fops);
+ debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
+ &rvu_dbg_nix_qsize_fops);
}
static void rvu_dbg_npa_init(struct rvu *rvu)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
-
rvu->rvu_dbg.npa = debugfs_create_dir("npa", rvu->rvu_dbg.root);
- if (!rvu->rvu_dbg.npa)
- return;
-
- pfile = debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_qsize_fops);
- if (!pfile)
- goto create_failed;
-
- pfile = debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_aura_ctx_fops);
- if (!pfile)
- goto create_failed;
-
- pfile = debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_pool_ctx_fops);
- if (!pfile)
- goto create_failed;
-
- pfile = debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
- &rvu_dbg_npa_ndc_cache_fops);
- if (!pfile)
- goto create_failed;
-
- pfile = debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa,
- rvu, &rvu_dbg_npa_ndc_hits_miss_fops);
- if (!pfile)
- goto create_failed;
-
- return;
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for NPA\n");
- debugfs_remove_recursive(rvu->rvu_dbg.npa);
+ debugfs_create_file("qsize", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_qsize_fops);
+ debugfs_create_file("aura_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_aura_ctx_fops);
+ debugfs_create_file("pool_ctx", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_pool_ctx_fops);
+ debugfs_create_file("ndc_cache", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_ndc_cache_fops);
+ debugfs_create_file("ndc_hits_miss", 0600, rvu->rvu_dbg.npa, rvu,
+ &rvu_dbg_npa_ndc_hits_miss_fops);
}
#define PRINT_CGX_CUML_NIXRX_STATUS(idx, name) \
@@ -1488,8 +1547,6 @@ RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
int i, lmac_id;
char dname[20];
void *cgx;
@@ -1510,18 +1567,10 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
rvu->rvu_dbg.lmac =
debugfs_create_dir(dname, rvu->rvu_dbg.cgx);
- pfile = debugfs_create_file("stats", 0600,
- rvu->rvu_dbg.lmac, cgx,
- &rvu_dbg_cgx_stat_fops);
- if (!pfile)
- goto create_failed;
+ debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
+ cgx, &rvu_dbg_cgx_stat_fops);
}
}
- return;
-
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for CGX\n");
- debugfs_remove_recursive(rvu->rvu_dbg.cgx_root);
}
/* NPC debugfs APIs */
@@ -1565,7 +1614,7 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
struct rvu *rvu = filp->private;
int pf, vf, numvfs, blkaddr;
struct npc_mcam *mcam;
- u16 pcifunc;
+ u16 pcifunc, counters;
u64 cfg;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -1573,6 +1622,7 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
return -ENODEV;
mcam = &rvu->hw->mcam;
+ counters = rvu->hw->npc_counters;
seq_puts(filp, "\nNPC MCAM info:\n");
/* MCAM keywidth on receive and transmit sides */
@@ -1595,10 +1645,9 @@ static int rvu_dbg_npc_mcam_info_display(struct seq_file *filp, void *unsued)
seq_printf(filp, "\t\t Available \t: %d\n", mcam->bmap_fcnt);
/* MCAM counters */
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- cfg = (cfg >> 48) & 0xFFFF;
- seq_printf(filp, "\n\t\t MCAM counters \t: %lld\n", cfg);
- seq_printf(filp, "\t\t Reserved \t: %lld\n", cfg - mcam->counters.max);
+ seq_printf(filp, "\n\t\t MCAM counters \t: %d\n", counters);
+ seq_printf(filp, "\t\t Reserved \t: %d\n",
+ counters - mcam->counters.max);
seq_printf(filp, "\t\t Available \t: %d\n",
rvu_rsrc_free_count(&mcam->counters));
@@ -1650,57 +1699,453 @@ static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
-static void rvu_dbg_npc_init(struct rvu *rvu)
+static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
{
- const struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ u8 bit;
+
+ for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
+ seq_printf(s, "\t%s ", npc_get_field_name(bit));
+ switch (bit) {
+ case NPC_DMAC:
+ seq_printf(s, "%pM ", rule->packet.dmac);
+ seq_printf(s, "mask %pM\n", rule->mask.dmac);
+ break;
+ case NPC_SMAC:
+ seq_printf(s, "%pM ", rule->packet.smac);
+ seq_printf(s, "mask %pM\n", rule->mask.smac);
+ break;
+ case NPC_ETYPE:
+ seq_printf(s, "0x%x ", ntohs(rule->packet.etype));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.etype));
+ break;
+ case NPC_OUTER_VID:
+ seq_printf(s, "%d ", ntohs(rule->packet.vlan_tci));
+ seq_printf(s, "mask 0x%x\n",
+ ntohs(rule->mask.vlan_tci));
+ break;
+ case NPC_TOS:
+ seq_printf(s, "%d ", rule->packet.tos);
+ seq_printf(s, "mask 0x%x\n", rule->mask.tos);
+ break;
+ case NPC_SIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4src);
+ seq_printf(s, "mask %pI4\n", &rule->mask.ip4src);
+ break;
+ case NPC_DIP_IPV4:
+ seq_printf(s, "%pI4 ", &rule->packet.ip4dst);
+ seq_printf(s, "mask %pI4\n", &rule->mask.ip4dst);
+ break;
+ case NPC_SIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6src);
+ seq_printf(s, "mask %pI6\n", rule->mask.ip6src);
+ break;
+ case NPC_DIP_IPV6:
+ seq_printf(s, "%pI6 ", rule->packet.ip6dst);
+ seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
+ break;
+ case NPC_SPORT_TCP:
+ case NPC_SPORT_UDP:
+ case NPC_SPORT_SCTP:
+ seq_printf(s, "%d ", ntohs(rule->packet.sport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.sport));
+ break;
+ case NPC_DPORT_TCP:
+ case NPC_DPORT_UDP:
+ case NPC_DPORT_SCTP:
+ seq_printf(s, "%d ", ntohs(rule->packet.dport));
+ seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
+ struct rvu_npc_mcam_rule *rule)
+{
+ if (rule->intf == NIX_INTF_TX) {
+ switch (rule->tx_action.op) {
+ case NIX_TX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_DEFAULT:
+ seq_puts(s, "\taction: Unicast to default channel\n");
+ break;
+ case NIX_TX_ACTIONOP_UCAST_CHAN:
+ seq_printf(s, "\taction: Unicast to channel %d\n",
+ rule->tx_action.index);
+ break;
+ case NIX_TX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ case NIX_TX_ACTIONOP_DROP_VIOL:
+ seq_puts(s, "\taction: Lockdown Violation Drop\n");
+ break;
+ default:
+ break;
+ };
+ } else {
+ switch (rule->rx_action.op) {
+ case NIX_RX_ACTIONOP_DROP:
+ seq_puts(s, "\taction: Drop\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST:
+ seq_printf(s, "\taction: Direct to queue %d\n",
+ rule->rx_action.index);
+ break;
+ case NIX_RX_ACTIONOP_RSS:
+ seq_puts(s, "\taction: RSS\n");
+ break;
+ case NIX_RX_ACTIONOP_UCAST_IPSEC:
+ seq_puts(s, "\taction: Unicast ipsec\n");
+ break;
+ case NIX_RX_ACTIONOP_MCAST:
+ seq_puts(s, "\taction: Multicast\n");
+ break;
+ default:
+ break;
+ };
+ }
+}
+
+static const char *rvu_dbg_get_intf_name(int intf)
+{
+ switch (intf) {
+ case NIX_INTFX_RX(0):
+ return "NIX0_RX";
+ case NIX_INTFX_RX(1):
+ return "NIX1_RX";
+ case NIX_INTFX_TX(0):
+ return "NIX0_TX";
+ case NIX_INTFX_TX(1):
+ return "NIX1_TX";
+ default:
+ break;
+ }
+
+ return "unknown";
+}
+
+static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
+{
+ struct rvu_npc_mcam_rule *iter;
+ struct rvu *rvu = s->private;
+ struct npc_mcam *mcam;
+ int pf, vf = -1;
+ int blkaddr;
+ u16 target;
+ u64 hits;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return 0;
+
+ mcam = &rvu->hw->mcam;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ pf = (iter->owner >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\n\tInstalled by: PF%d ", pf);
+
+ if (iter->owner & RVU_PFVF_FUNC_MASK) {
+ vf = (iter->owner & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+
+ seq_printf(s, "\tdirection: %s\n", is_npc_intf_rx(iter->intf) ?
+ "RX" : "TX");
+ seq_printf(s, "\tinterface: %s\n",
+ rvu_dbg_get_intf_name(iter->intf));
+ seq_printf(s, "\tmcam entry: %d\n", iter->entry);
+
+ rvu_dbg_npc_mcam_show_flows(s, iter);
+ if (iter->intf == NIX_INTF_RX) {
+ target = iter->rx_action.pf_func;
+ pf = (target >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK;
+ seq_printf(s, "\tForward to: PF%d ", pf);
+
+ if (target & RVU_PFVF_FUNC_MASK) {
+ vf = (target & RVU_PFVF_FUNC_MASK) - 1;
+ seq_printf(s, "VF%d", vf);
+ }
+ seq_puts(s, "\n");
+ }
+
+ rvu_dbg_npc_mcam_show_action(s, iter);
+ seq_printf(s, "\tenabled: %s\n", iter->enable ? "yes" : "no");
+
+ if (!iter->has_cntr)
+ continue;
+ seq_printf(s, "\tcounter: %d\n", iter->cntr);
+ hits = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(iter->cntr));
+ seq_printf(s, "\thits: %lld\n", hits);
+ }
+ mutex_unlock(&mcam->lock);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
+
+static void rvu_dbg_npc_init(struct rvu *rvu)
+{
rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
- if (!rvu->rvu_dbg.npc)
- return;
- pfile = debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc,
- rvu, &rvu_dbg_npc_mcam_info_fops);
- if (!pfile)
- goto create_failed;
+ debugfs_create_file("mcam_info", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_mcam_info_fops);
+ debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_mcam_rules_fops);
+ debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_rx_miss_act_fops);
+}
- pfile = debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc,
- rvu, &rvu_dbg_npc_rx_miss_act_fops);
- if (!pfile)
- goto create_failed;
+/* CPT debugfs APIs */
+static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
+{
+ struct rvu *rvu = filp->private;
+ u64 busy_sts = 0, free_sts = 0;
+ u32 e_min = 0, e_max = 0, e, i;
+ u16 max_ses, max_ies, max_aes;
+ int blkaddr;
+ u64 reg;
- return;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ switch (eng_type) {
+ case CPT_AE_TYPE:
+ e_min = max_ses + max_ies;
+ e_max = max_ses + max_ies + max_aes;
+ break;
+ case CPT_SE_TYPE:
+ e_min = 0;
+ e_max = max_ses;
+ break;
+ case CPT_IE_TYPE:
+ e_min = max_ses;
+ e_max = max_ses + max_ies;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (e = e_min, i = 0; e < e_max; e++, i++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(e));
+ if (reg & 0x1)
+ busy_sts |= 1ULL << i;
+
+ if (reg & 0x2)
+ free_sts |= 1ULL << i;
+ }
+ seq_printf(filp, "FREE STS : 0x%016llx\n", free_sts);
+ seq_printf(filp, "BUSY STS : 0x%016llx\n", busy_sts);
-create_failed:
- dev_err(dev, "Failed to create debugfs dir/file for NPC\n");
- debugfs_remove_recursive(rvu->rvu_dbg.npc);
+ return 0;
}
-void rvu_dbg_init(struct rvu *rvu)
+static int rvu_dbg_cpt_ae_sts_display(struct seq_file *filp, void *unused)
{
- struct device *dev = &rvu->pdev->dev;
- struct dentry *pfile;
+ return cpt_eng_sts_display(filp, CPT_AE_TYPE);
+}
- rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
- if (!rvu->rvu_dbg.root) {
- dev_err(rvu->dev, "%s failed\n", __func__);
- return;
+RVU_DEBUG_SEQ_FOPS(cpt_ae_sts, cpt_ae_sts_display, NULL);
+
+static int rvu_dbg_cpt_se_sts_display(struct seq_file *filp, void *unused)
+{
+ return cpt_eng_sts_display(filp, CPT_SE_TYPE);
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_se_sts, cpt_se_sts_display, NULL);
+
+static int rvu_dbg_cpt_ie_sts_display(struct seq_file *filp, void *unused)
+{
+ return cpt_eng_sts_display(filp, CPT_IE_TYPE);
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
+
+static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ u16 max_ses, max_ies, max_aes;
+ u32 e_max, e;
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
+ max_ses = reg & 0xffff;
+ max_ies = (reg >> 16) & 0xffff;
+ max_aes = (reg >> 32) & 0xffff;
+
+ e_max = max_ses + max_ies + max_aes;
+
+ seq_puts(filp, "===========================================\n");
+ for (e = 0; e < e_max; e++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(e));
+ seq_printf(filp, "CPT Engine[%u] Group Enable 0x%02llx\n", e,
+ reg & 0xff);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_ACTIVE(e));
+ seq_printf(filp, "CPT Engine[%u] Active Info 0x%llx\n", e,
+ reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(e));
+ seq_printf(filp, "CPT Engine[%u] Control 0x%llx\n", e,
+ reg);
+ seq_puts(filp, "===========================================\n");
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
+
+static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+ u64 reg;
+ u32 lf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ block = &hw->block[blkaddr];
+ if (!block->lf.bmap)
+ return -ENODEV;
+
+ seq_puts(filp, "===========================================\n");
+ for (lf = 0; lf < block->lf.max; lf++) {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(lf));
+ seq_printf(filp, "CPT Lf[%u] CTL 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(lf));
+ seq_printf(filp, "CPT Lf[%u] CTL2 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_LFX_PTR_CTL(lf));
+ seq_printf(filp, "CPT Lf[%u] PTR_CTL 0x%llx\n", lf, reg);
+ reg = rvu_read64(rvu, blkaddr, block->lfcfg_reg |
+ (lf << block->lfshift));
+ seq_printf(filp, "CPT Lf[%u] CFG 0x%llx\n", lf, reg);
+ seq_puts(filp, "===========================================\n");
}
- pfile = debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
- &rvu_dbg_rsrc_status_fops);
- if (!pfile)
- goto create_failed;
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
+
+static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu = filp->private;
+ u64 reg0, reg1;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+ seq_printf(filp, "CPT_AF_FLTX_INT: 0x%llx 0x%llx\n", reg0, reg1);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_EXE(1));
+ seq_printf(filp, "CPT_AF_PSNX_EXE: 0x%llx 0x%llx\n", reg0, reg1);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_PSNX_LF(0));
+ seq_printf(filp, "CPT_AF_PSNX_LF: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ seq_printf(filp, "CPT_AF_RVU_INT: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ seq_printf(filp, "CPT_AF_RAS_INT: 0x%llx\n", reg0);
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_EXE_ERR_INFO);
+ seq_printf(filp, "CPT_AF_EXE_ERR_INFO: 0x%llx\n", reg0);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
+
+static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
+{
+ struct rvu *rvu;
+ int blkaddr;
+ u64 reg;
+
+ rvu = filp->private;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
+ if (blkaddr < 0)
+ return -ENODEV;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
+ seq_printf(filp, "CPT instruction requests %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
+ seq_printf(filp, "CPT instruction latency %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_REQ_PC);
+ seq_printf(filp, "CPT NCB read requests %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_LATENCY_PC);
+ seq_printf(filp, "CPT NCB read latency %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RD_UC_PC);
+ seq_printf(filp, "CPT read requests caused by UC fills %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_ACTIVE_CYCLES_PC);
+ seq_printf(filp, "CPT active cycles pc %llu\n", reg);
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CPTCLK_CNT);
+ seq_printf(filp, "CPT clock count pc %llu\n", reg);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
+
+static void rvu_dbg_cpt_init(struct rvu *rvu)
+{
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return;
+
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
+
+ debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_pc_fops);
+ debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_ae_sts_fops);
+ debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_se_sts_fops);
+ debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_ie_sts_fops);
+ debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_engines_info_fops);
+ debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_lfs_info_fops);
+ debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ &rvu_dbg_cpt_err_info_fops);
+}
+
+void rvu_dbg_init(struct rvu *rvu)
+{
+ rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
+
+ debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
+ &rvu_dbg_rsrc_status_fops);
+ debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root, rvu,
+ &rvu_dbg_rvu_pf_cgx_map_fops);
rvu_dbg_npa_init(rvu);
- rvu_dbg_nix_init(rvu);
+ rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
+
+ rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
rvu_dbg_cgx_init(rvu);
rvu_dbg_npc_init(rvu);
-
- return;
-
-create_failed:
- dev_err(dev, "Failed to create debugfs dir\n");
- debugfs_remove_recursive(rvu->rvu_dbg.root);
+ rvu_dbg_cpt_init(rvu);
}
void rvu_dbg_exit(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 21a89dd76d3c..a8dfbb6d1774 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -17,6 +17,7 @@
#include "npc.h"
#include "cgx.h"
+static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id);
@@ -68,6 +69,23 @@ struct mce {
u16 pcifunc;
};
+int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr)
+{
+ int i = 0;
+
+ /*If blkaddr is 0, return the first nix block address*/
+ if (blkaddr == 0)
+ return rvu->nix_blkaddr[blkaddr];
+
+ while (i + 1 < MAX_NIX_BLKS) {
+ if (rvu->nix_blkaddr[i] == blkaddr)
+ return rvu->nix_blkaddr[i + 1];
+ i++;
+ }
+
+ return 0;
+}
+
bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -81,14 +99,16 @@ bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
int rvu_get_nixlf_count(struct rvu *rvu)
{
+ int blkaddr = 0, max = 0;
struct rvu_block *block;
- int blkaddr;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
- if (blkaddr < 0)
- return 0;
- block = &rvu->hw->block[blkaddr];
- return block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ block = &rvu->hw->block[blkaddr];
+ max += block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+ return max;
}
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
@@ -130,11 +150,18 @@ static u16 nix_alloc_mce_list(struct nix_mcast *mcast, int count)
return idx;
}
-static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
+struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
{
- if (blkaddr == BLKADDR_NIX0 && hw->nix0)
- return hw->nix0;
+ int nix_blkaddr = 0, i = 0;
+ struct rvu *rvu = hw->rvu;
+ nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
+ while (nix_blkaddr) {
+ if (blkaddr == nix_blkaddr && hw->nix)
+ return &hw->nix[i];
+ nix_blkaddr = rvu_get_next_nix_blkaddr(rvu, nix_blkaddr);
+ i++;
+ }
return NULL;
}
@@ -187,8 +214,8 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int pkind, pf, vf, lbkid;
u8 cgx_id, lmac_id;
- int pkind, pf, vf;
int err;
pf = rvu_get_pf(pcifunc);
@@ -221,13 +248,24 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ /* If NIX1 block is present on the silicon then NIXes are
+ * assigned alternatively for lbk interfaces. NIX0 should
+ * send packets on lbk link 1 channels and NIX1 should send
+ * on lbk link 0 channels for the communication between
+ * NIX0 and NIX1.
+ */
+ lbkid = 0;
+ if (rvu->hw->lbk_links > 1)
+ lbkid = vf & 0x1 ? 0 : 1;
+
/* Note that AF's VFs work in pairs and talk over consecutive
* loopback channels.Therefore if odd number of AF VFs are
* enabled then the last VF remains with no pair.
*/
- pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
- pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
- NIX_CHAN_LBK_CHX(0, vf + 1);
+ pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(lbkid, vf);
+ pfvf->tx_chan_base = vf & 0x1 ?
+ NIX_CHAN_LBK_CHX(lbkid, vf - 1) :
+ NIX_CHAN_LBK_CHX(lbkid, vf + 1);
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
@@ -265,7 +303,6 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
pfvf->maxlen = 0;
pfvf->minlen = 0;
- pfvf->rxvlan = false;
/* Remove this PF_FUNC from bcast pkt replication list */
err = nix_update_bcast_mce_list(rvu, pcifunc, false);
@@ -612,8 +649,9 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return 0;
}
-static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
- struct nix_aq_enq_rsp *rsp)
+static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
@@ -626,10 +664,7 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
bool ena;
u64 cfg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
+ blkaddr = nix_hw->blkaddr;
block = &hw->block[blkaddr];
aq = block->aq;
if (!aq) {
@@ -669,8 +704,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
break;
case NIX_AQ_CTYPE_MCE:
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_MCAST_CFG);
+
/* Check if index exceeds MCE list length */
- if (!hw->nix0->mcast.mce_ctx ||
+ if (!nix_hw->mcast.mce_ctx ||
(req->qidx >= (256UL << (cfg & 0xF))))
rc = NIX_AF_ERR_AQ_ENQUEUE;
@@ -832,6 +868,23 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
return 0;
}
+static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
+ struct nix_aq_enq_rsp *rsp)
+{
+ struct nix_hw *nix_hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
+}
+
static const char *nix_get_ctx_name(int ctype)
{
switch (ctype) {
@@ -1129,6 +1182,10 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
/* Config Rx pkt length, csum checks and apad enable / disable */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
+ /* Configure pkind for TX parse config */
+ cfg = NPC_TX_DEF_PKIND;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
+
intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
err = nix_interface_init(rvu, pcifunc, intf, nixlf);
if (err)
@@ -1137,6 +1194,11 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
/* Disable NPC entries as NIXLF's contexts are not initialized yet */
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+ /* Configure RX VTAG Type 7 (strip) for vf vlan */
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, NIX_AF_LFX_RX_VTAG_TYPE7),
+ VTAGSIZE_T4 | VTAG_STRIP);
+
goto exit;
free_mem:
@@ -1164,10 +1226,14 @@ exit:
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
rsp->qints = ((cfg >> 12) & 0xFFF);
rsp->cints = ((cfg >> 24) & 0xFFF);
+ rsp->cgx_links = hw->cgx_links;
+ rsp->lbk_links = hw->lbk_links;
+ rsp->sdp_links = hw->sdp_links;
+
return rc;
}
-int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct nix_lf_free_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1186,6 +1252,15 @@ int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
+ if (req->flags & NIX_LF_DISABLE_FLOWS)
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ else
+ rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
+
+ /* Free any tx vtag def entries used by this NIX LF */
+ if (!(req->flags & NIX_LF_DONT_FREE_TX_VTAG))
+ nix_free_tx_vtag_entries(rvu, pcifunc);
+
nix_interface_deinit(rvu, pcifunc, nixlf);
/* Reset this NIX LF */
@@ -1914,9 +1989,14 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
{
u64 regval = req->vtag_size;
- if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
+ if (req->rx.vtag_type > NIX_AF_LFX_RX_VTAG_TYPE7 ||
+ req->vtag_size > VTAGSIZE_T8)
return -EINVAL;
+ /* RX VTAG Type 7 reserved for vf vlan */
+ if (req->rx.vtag_type == NIX_AF_LFX_RX_VTAG_TYPE7)
+ return NIX_AF_ERR_RX_VTAG_INUSE;
+
if (req->rx.capture_vtag)
regval |= BIT_ULL(5);
if (req->rx.strip_vtag)
@@ -1927,9 +2007,149 @@ static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
return 0;
}
+static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr,
+ u16 pcifunc, int index)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+
+ if (vlan->entry2pfvf_map[index] != pcifunc)
+ return NIX_AF_ERR_PARAM;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_DATA(index), 0x0ull);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_CTL(index), 0x0ull);
+
+ vlan->entry2pfvf_map[index] = 0;
+ rvu_free_rsrc(&vlan->rsrc, index);
+
+ return 0;
+}
+
+static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
+{
+ struct nix_txvlan *vlan;
+ struct nix_hw *nix_hw;
+ int index, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ vlan = &nix_hw->txvlan;
+
+ mutex_lock(&vlan->rsrc_lock);
+ /* Scan all the entries and free the ones mapped to 'pcifunc' */
+ for (index = 0; index < vlan->rsrc.max; index++) {
+ if (vlan->entry2pfvf_map[index] == pcifunc)
+ nix_tx_vtag_free(rvu, blkaddr, pcifunc, index);
+ }
+ mutex_unlock(&vlan->rsrc_lock);
+}
+
+static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr,
+ u64 vtag, u8 size)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ u64 regval;
+ int index;
+
+ mutex_lock(&vlan->rsrc_lock);
+
+ index = rvu_alloc_rsrc(&vlan->rsrc);
+ if (index < 0) {
+ mutex_unlock(&vlan->rsrc_lock);
+ return index;
+ }
+
+ mutex_unlock(&vlan->rsrc_lock);
+
+ regval = size ? vtag : vtag << 32;
+
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_DATA(index), regval);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_VTAG_DEFX_CTL(index), size);
+
+ return index;
+}
+
+static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr,
+ struct nix_vtag_config *req)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ u16 pcifunc = req->hdr.pcifunc;
+ int idx0 = req->tx.vtag0_idx;
+ int idx1 = req->tx.vtag1_idx;
+ int err = 0;
+
+ if (req->tx.free_vtag0 && req->tx.free_vtag1)
+ if (vlan->entry2pfvf_map[idx0] != pcifunc ||
+ vlan->entry2pfvf_map[idx1] != pcifunc)
+ return NIX_AF_ERR_PARAM;
+
+ mutex_lock(&vlan->rsrc_lock);
+
+ if (req->tx.free_vtag0) {
+ err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx0);
+ if (err)
+ goto exit;
+ }
+
+ if (req->tx.free_vtag1)
+ err = nix_tx_vtag_free(rvu, blkaddr, pcifunc, idx1);
+
+exit:
+ mutex_unlock(&vlan->rsrc_lock);
+ return err;
+}
+
+static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr,
+ struct nix_vtag_config *req,
+ struct nix_vtag_config_rsp *rsp)
+{
+ struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ u16 pcifunc = req->hdr.pcifunc;
+
+ if (req->tx.cfg_vtag0) {
+ rsp->vtag0_idx =
+ nix_tx_vtag_alloc(rvu, blkaddr,
+ req->tx.vtag0, req->vtag_size);
+
+ if (rsp->vtag0_idx < 0)
+ return NIX_AF_ERR_TX_VTAG_NOSPC;
+
+ vlan->entry2pfvf_map[rsp->vtag0_idx] = pcifunc;
+ }
+
+ if (req->tx.cfg_vtag1) {
+ rsp->vtag1_idx =
+ nix_tx_vtag_alloc(rvu, blkaddr,
+ req->tx.vtag1, req->vtag_size);
+
+ if (rsp->vtag1_idx < 0)
+ goto err_free;
+
+ vlan->entry2pfvf_map[rsp->vtag1_idx] = pcifunc;
+ }
+
+ return 0;
+
+err_free:
+ if (req->tx.cfg_vtag0)
+ nix_tx_vtag_free(rvu, blkaddr, pcifunc, rsp->vtag0_idx);
+
+ return NIX_AF_ERR_TX_VTAG_NOSPC;
+}
+
int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
struct nix_vtag_config *req,
- struct msg_rsp *rsp)
+ struct nix_vtag_config_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, nixlf, err;
@@ -1939,19 +2159,28 @@ int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
return err;
if (req->cfg_type) {
+ /* rx vtag configuration */
err = nix_rx_vtag_cfg(rvu, nixlf, blkaddr, req);
if (err)
return NIX_AF_ERR_PARAM;
} else {
- /* TODO: handle tx vtag configuration */
- return 0;
+ /* tx vtag configuration */
+ if ((req->tx.cfg_vtag0 || req->tx.cfg_vtag1) &&
+ (req->tx.free_vtag0 || req->tx.free_vtag1))
+ return NIX_AF_ERR_PARAM;
+
+ if (req->tx.cfg_vtag0 || req->tx.cfg_vtag1)
+ return nix_tx_vtag_cfg(rvu, blkaddr, req, rsp);
+
+ if (req->tx.free_vtag0 || req->tx.free_vtag1)
+ return nix_tx_vtag_decfg(rvu, blkaddr, req);
}
return 0;
}
-static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
- u16 pcifunc, int next, bool eol)
+static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
+ int mce, u8 op, u16 pcifunc, int next, bool eol)
{
struct nix_aq_enq_req aq_req;
int err;
@@ -1971,7 +2200,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
/* All fields valid */
*(u64 *)(&aq_req.mce_mask) = ~0ULL;
- err = rvu_nix_aq_enq_inst(rvu, &aq_req, NULL);
+ err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, &aq_req, NULL);
if (err) {
dev_err(rvu->dev, "Failed to setup Bcast MCE for PF%d:VF%d\n",
rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
@@ -2077,9 +2306,9 @@ int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
next_idx = idx + 1;
/* EOL should be set in last MCE */
- err = nix_setup_mce(rvu, idx, NIX_AQ_INSTOP_WRITE,
- mce->pcifunc, next_idx,
- (next_idx > last_idx) ? true : false);
+ err = nix_blk_setup_mce(rvu, nix_hw, idx, NIX_AQ_INSTOP_WRITE,
+ mce->pcifunc, next_idx,
+ (next_idx > last_idx) ? true : false);
if (err)
goto end;
idx++;
@@ -2108,6 +2337,11 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
numvfs = (cfg >> 12) & 0xFF;
pfvf = &rvu->pf[pf];
+
+ /* This NIX0/1 block mapped to PF ? */
+ if (pfvf->nix_blkaddr != nix_hw->blkaddr)
+ continue;
+
/* Save the start MCE */
pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
@@ -2122,9 +2356,10 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
* Will be updated when a NIXLF is attached/detached to
* these PF/VFs.
*/
- err = nix_setup_mce(rvu, pfvf->bcast_mce_idx + idx,
- NIX_AQ_INSTOP_INIT,
- pcifunc, 0, true);
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->bcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
if (err)
return err;
}
@@ -2176,6 +2411,31 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
return nix_setup_bcast_tables(rvu, nix_hw);
}
+static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ struct nix_txvlan *vlan = &nix_hw->txvlan;
+ int err;
+
+ /* Allocate resource bimap for tx vtag def registers*/
+ vlan->rsrc.max = NIX_TX_VTAG_DEF_MAX;
+ err = rvu_alloc_bitmap(&vlan->rsrc);
+ if (err)
+ return -ENOMEM;
+
+ /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
+ vlan->entry2pfvf_map = devm_kcalloc(rvu->dev, vlan->rsrc.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!vlan->entry2pfvf_map)
+ goto free_mem;
+
+ mutex_init(&vlan->rsrc_lock);
+ return 0;
+
+free_mem:
+ kfree(vlan->rsrc.bmap);
+ return -ENOMEM;
+}
+
static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
{
struct nix_txsch *txsch;
@@ -2366,6 +2626,13 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
/* This should be set to 1, when SEL_CHAN is set */
field->bytesm1 = 1;
break;
+ case NIX_FLOW_KEY_TYPE_IPV4_PROTO:
+ field->lid = NPC_LID_LC;
+ field->hdr_offset = 9; /* offset */
+ field->bytesm1 = 0; /* 1 byte */
+ field->ltype_match = NPC_LT_LC_IP;
+ field->ltype_mask = 0xF;
+ break;
case NIX_FLOW_KEY_TYPE_IPV4:
case NIX_FLOW_KEY_TYPE_INNR_IPV4:
field->lid = NPC_LID_LC;
@@ -2680,6 +2947,7 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp)
{
+ bool from_vf = req->hdr.pcifunc & RVU_PFVF_FUNC_MASK;
u16 pcifunc = req->hdr.pcifunc;
int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
@@ -2690,13 +2958,15 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* VF can't overwrite admin(PF) changes */
+ if (from_vf && pfvf->pf_set_vf_cfg)
+ return -EPERM;
+
ether_addr_copy(pfvf->mac_addr, req->mac_addr);
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, req->mac_addr);
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
-
return 0;
}
@@ -2743,9 +3013,6 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
else
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, allmulti);
-
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
-
return 0;
}
@@ -2882,65 +3149,6 @@ linkcfg:
return 0;
}
-int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp)
-{
- struct npc_mcam_alloc_entry_req alloc_req = { };
- struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
- struct npc_mcam_free_entry_req free_req = { };
- u16 pcifunc = req->hdr.pcifunc;
- int blkaddr, nixlf, err;
- struct rvu_pfvf *pfvf;
-
- /* LBK VFs do not have separate MCAM UCAST entry hence
- * skip allocating rxvlan for them
- */
- if (is_afvf(pcifunc))
- return 0;
-
- pfvf = rvu_get_pfvf(rvu, pcifunc);
- if (pfvf->rxvlan)
- return 0;
-
- /* alloc new mcam entry */
- alloc_req.hdr.pcifunc = pcifunc;
- alloc_req.count = 1;
-
- err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
- &alloc_rsp);
- if (err)
- return err;
-
- /* update entry to enable rxvlan offload */
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0) {
- err = NIX_AF_ERR_AF_LF_INVALID;
- goto free_entry;
- }
-
- nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0) {
- err = NIX_AF_ERR_AF_LF_INVALID;
- goto free_entry;
- }
-
- pfvf->rxvlan_index = alloc_rsp.entry_list[0];
- /* all it means is that rxvlan_index is valid */
- pfvf->rxvlan = true;
-
- err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
- if (err)
- goto free_entry;
-
- return 0;
-free_entry:
- free_req.hdr.pcifunc = pcifunc;
- free_req.entry = alloc_rsp.entry_list[0];
- rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
- pfvf->rxvlan = false;
- return err;
-}
-
int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
struct msg_rsp *rsp)
{
@@ -3109,17 +3317,15 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
return 0;
}
-int rvu_nix_init(struct rvu *rvu)
+static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
{
const struct npc_lt_def_cfg *ltdefs;
struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr = nix_hw->blkaddr;
struct rvu_block *block;
- int blkaddr, err;
+ int err;
u64 cfg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
- if (blkaddr < 0)
- return 0;
block = &hw->block[blkaddr];
if (is_rvu_96xx_B0(rvu)) {
@@ -3153,7 +3359,7 @@ int rvu_nix_init(struct rvu *rvu)
hw->cgx = (cfg >> 12) & 0xF;
hw->lmac_per_cgx = (cfg >> 8) & 0xF;
hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
- hw->lbk_links = 1;
+ hw->lbk_links = (cfg >> 24) & 0xF;
hw->sdp_links = 1;
/* Initialize admin queue */
@@ -3164,26 +3370,25 @@ int rvu_nix_init(struct rvu *rvu)
/* Restore CINT timer delay to HW reset values */
rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
- if (blkaddr == BLKADDR_NIX0) {
- hw->nix0 = devm_kzalloc(rvu->dev,
- sizeof(struct nix_hw), GFP_KERNEL);
- if (!hw->nix0)
- return -ENOMEM;
+ if (is_block_implemented(hw, blkaddr)) {
+ err = nix_setup_txschq(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
- err = nix_setup_txschq(rvu, hw->nix0, blkaddr);
+ err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
if (err)
return err;
- err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
+ err = nix_setup_mcast(rvu, nix_hw, blkaddr);
if (err)
return err;
- err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
+ err = nix_setup_txvlan(rvu, nix_hw);
if (err)
return err;
/* Configure segmentation offload formats */
- nix_setup_lso(rvu, hw->nix0, blkaddr);
+ nix_setup_lso(rvu, nix_hw, blkaddr);
/* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
* This helps HW protocol checker to identify headers
@@ -3236,23 +3441,45 @@ int rvu_nix_init(struct rvu *rvu)
return 0;
}
-void rvu_nix_freemem(struct rvu *rvu)
+int rvu_nix_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
- struct rvu_block *block;
+ struct nix_hw *nix_hw;
+ int blkaddr = 0, err;
+ int i = 0;
+
+ hw->nix = devm_kcalloc(rvu->dev, MAX_NIX_BLKS, sizeof(struct nix_hw),
+ GFP_KERNEL);
+ if (!hw->nix)
+ return -ENOMEM;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ nix_hw = &hw->nix[i];
+ nix_hw->rvu = rvu;
+ nix_hw->blkaddr = blkaddr;
+ err = rvu_nix_block_init(rvu, nix_hw);
+ if (err)
+ return err;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ i++;
+ }
+
+ return 0;
+}
+
+static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
+ struct rvu_block *block)
+{
struct nix_txsch *txsch;
struct nix_mcast *mcast;
+ struct nix_txvlan *vlan;
struct nix_hw *nix_hw;
- int blkaddr, lvl;
+ int lvl;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
- if (blkaddr < 0)
- return;
-
- block = &hw->block[blkaddr];
rvu_aq_free(rvu, block->aq);
- if (blkaddr == BLKADDR_NIX0) {
+ if (is_block_implemented(rvu->hw, blkaddr)) {
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return;
@@ -3262,6 +3489,11 @@ void rvu_nix_freemem(struct rvu *rvu)
kfree(txsch->schq.bmap);
}
+ vlan = &nix_hw->txvlan;
+ kfree(vlan->rsrc.bmap);
+ mutex_destroy(&vlan->rsrc_lock);
+ devm_kfree(rvu->dev, vlan->entry2pfvf_map);
+
mcast = &nix_hw->mcast;
qmem_free(rvu->dev, mcast->mce_ctx);
qmem_free(rvu->dev, mcast->mcast_buf);
@@ -3269,6 +3501,20 @@ void rvu_nix_freemem(struct rvu *rvu)
}
}
+void rvu_nix_freemem(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr = 0;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ block = &hw->block[blkaddr];
+ rvu_nix_block_freemem(rvu, blkaddr, block);
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+}
+
int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@@ -3281,6 +3527,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
+ npc_mcam_enable_flows(rvu, pcifunc);
+
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
@@ -3296,6 +3544,8 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+ npc_mcam_disable_flows(rvu, pcifunc);
+
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
@@ -3308,6 +3558,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
ctx_req.hdr.pcifunc = pcifunc;
/* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
+ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ rvu_npc_free_mcam_entries(rvu, pcifunc, nixlf);
nix_interface_deinit(rvu, pcifunc, nixlf);
nix_rx_sync(rvu, blkaddr);
nix_txschq_free(rvu, pcifunc);
@@ -3431,3 +3683,12 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
return 0;
}
+
+void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
+{
+ bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
+
+ /* overwrite vf mac address with default_mac */
+ if (from_vf)
+ ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 511b01dd03ed..5cf9b7a907ae 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -28,6 +28,8 @@
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
#define NPC_HW_TSTAMP_OFFSET 8
+#define NPC_KEX_CHAN_MASK 0xFFFULL
+#define NPC_KEX_PF_FUNC_MASK 0xFFFFULL
static const char def_pfl_name[] = "default";
@@ -36,6 +38,81 @@ static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
u16 pcifunc);
+bool is_npc_intf_tx(u8 intf)
+{
+ return !!(intf & 0x1);
+}
+
+bool is_npc_intf_rx(u8 intf)
+{
+ return !(intf & 0x1);
+}
+
+bool is_npc_interface_valid(struct rvu *rvu, u8 intf)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ return intf < hw->npc_intfs;
+}
+
+int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena)
+{
+ /* Due to a HW issue in these silicon versions, parse nibble enable
+ * configuration has to be identical for both Rx and Tx interfaces.
+ */
+ if (is_rvu_96xx_B0(rvu))
+ return nibble_ena;
+ return 0;
+}
+
+static int npc_mcam_verify_pf_func(struct rvu *rvu,
+ struct mcam_entry *entry_data, u8 intf,
+ u16 pcifunc)
+{
+ u16 pf_func, pf_func_mask;
+
+ if (is_npc_intf_rx(intf))
+ return 0;
+
+ pf_func_mask = (entry_data->kw_mask[0] >> 32) &
+ NPC_KEX_PF_FUNC_MASK;
+ pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK;
+
+ pf_func = be16_to_cpu((__force __be16)pf_func);
+ if (pf_func_mask != NPC_KEX_PF_FUNC_MASK ||
+ ((pf_func & ~RVU_PFVF_FUNC_MASK) !=
+ (pcifunc & ~RVU_PFVF_FUNC_MASK)))
+ return -EINVAL;
+
+ return 0;
+}
+
+int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ int base = 0, end;
+
+ if (is_npc_intf_tx(intf))
+ return 0;
+
+ if (is_afvf(pcifunc)) {
+ end = rvu_get_num_lbk_chans();
+ if (end < 0)
+ return -EINVAL;
+ } else {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0x0);
+ /* CGX mapped functions has maximum of 16 channels */
+ end = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0xF);
+ }
+
+ if (channel < base || channel > end)
+ return -EINVAL;
+
+ return 0;
+}
+
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{
int blkaddr;
@@ -94,6 +171,31 @@ int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable)
return 0;
}
+static int npc_get_ucast_mcam_index(struct npc_mcam *mcam, u16 pcifunc,
+ int nixlf)
+{
+ struct rvu_hwinfo *hw = container_of(mcam, struct rvu_hwinfo, mcam);
+ struct rvu *rvu = hw->rvu;
+ int blkaddr = 0, max = 0;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ /* Given a PF/VF and NIX LF number calculate the unicast mcam
+ * entry index based on the NIX block assigned to the PF/VF.
+ */
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ if (pfvf->nix_blkaddr == blkaddr)
+ break;
+ block = &rvu->hw->block[blkaddr];
+ max += block->lf.max;
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+
+ return mcam->nixlf_offset + (max + nixlf) * RSVD_MCAM_ENTRIES_PER_NIXLF;
+}
+
static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
u16 pcifunc, int nixlf, int type)
{
@@ -114,10 +216,10 @@ static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
return index + 1;
}
- return (mcam->nixlf_offset + (nixlf * RSVD_MCAM_ENTRIES_PER_NIXLF));
+ return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf);
}
-static int npc_get_bank(struct npc_mcam *mcam, int index)
+int npc_get_bank(struct npc_mcam *mcam, int index)
{
int bank = index / mcam->banksize;
@@ -139,8 +241,8 @@ static bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam,
return (cfg & 1);
}
-static void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, int index, bool enable)
+void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, bool enable)
{
int bank = npc_get_bank(mcam, index);
int actbank = bank;
@@ -257,6 +359,93 @@ static void npc_get_keyword(struct mcam_entry *entry, int idx,
*cam0 = ~*cam1 & kw_mask;
}
+static void npc_fill_entryword(struct mcam_entry *entry, int idx,
+ u64 cam0, u64 cam1)
+{
+ /* Similar to npc_get_keyword, but fills mcam_entry structure from
+ * CAM registers.
+ */
+ switch (idx) {
+ case 0:
+ entry->kw[0] = cam1;
+ entry->kw_mask[0] = cam1 ^ cam0;
+ break;
+ case 1:
+ entry->kw[1] = cam1;
+ entry->kw_mask[1] = cam1 ^ cam0;
+ break;
+ case 2:
+ entry->kw[1] |= (cam1 & CAM_MASK(16)) << 48;
+ entry->kw[2] = (cam1 >> 16) & CAM_MASK(48);
+ entry->kw_mask[1] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
+ entry->kw_mask[2] = ((cam1 ^ cam0) >> 16) & CAM_MASK(48);
+ break;
+ case 3:
+ entry->kw[2] |= (cam1 & CAM_MASK(16)) << 48;
+ entry->kw[3] = (cam1 >> 16) & CAM_MASK(32);
+ entry->kw_mask[2] |= ((cam1 ^ cam0) & CAM_MASK(16)) << 48;
+ entry->kw_mask[3] = ((cam1 ^ cam0) >> 16) & CAM_MASK(32);
+ break;
+ case 4:
+ entry->kw[3] |= (cam1 & CAM_MASK(32)) << 32;
+ entry->kw[4] = (cam1 >> 32) & CAM_MASK(32);
+ entry->kw_mask[3] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
+ entry->kw_mask[4] = ((cam1 ^ cam0) >> 32) & CAM_MASK(32);
+ break;
+ case 5:
+ entry->kw[4] |= (cam1 & CAM_MASK(32)) << 32;
+ entry->kw[5] = (cam1 >> 32) & CAM_MASK(16);
+ entry->kw_mask[4] |= ((cam1 ^ cam0) & CAM_MASK(32)) << 32;
+ entry->kw_mask[5] = ((cam1 ^ cam0) >> 32) & CAM_MASK(16);
+ break;
+ case 6:
+ entry->kw[5] |= (cam1 & CAM_MASK(48)) << 16;
+ entry->kw[6] = (cam1 >> 48) & CAM_MASK(16);
+ entry->kw_mask[5] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
+ entry->kw_mask[6] = ((cam1 ^ cam0) >> 48) & CAM_MASK(16);
+ break;
+ case 7:
+ entry->kw[6] |= (cam1 & CAM_MASK(48)) << 16;
+ entry->kw_mask[6] |= ((cam1 ^ cam0) & CAM_MASK(48)) << 16;
+ break;
+ }
+}
+
+static void npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index,
+ struct mcam_entry *entry)
+{
+ u16 owner, target_func;
+ struct rvu_pfvf *pfvf;
+ int bank, nixlf;
+ u64 rx_action;
+
+ owner = mcam->entry2pfvf_map[index];
+ target_func = (entry->action >> 4) & 0xffff;
+ /* return incase target is PF or LBK or rule owner is not PF */
+ if (is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) ||
+ !(target_func & RVU_PFVF_FUNC_MASK))
+ return;
+
+ pfvf = rvu_get_pfvf(rvu, target_func);
+ mcam->entry2target_pffunc[index] = target_func;
+ /* return if nixlf is not attached or initialized */
+ if (!is_nixlf_attached(rvu, target_func) || !pfvf->def_ucast_rule)
+ return;
+
+ /* get VF ucast entry rule */
+ nix_get_nixlf(rvu, target_func, &nixlf, NULL);
+ index = npc_get_nixlf_mcam_index(mcam, target_func,
+ nixlf, NIXLF_UCAST_ENTRY);
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ rx_action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+ if (rx_action)
+ entry->action = rx_action;
+}
+
static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, u8 intf,
struct mcam_entry *entry, bool enable)
@@ -304,6 +493,11 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0);
}
+ /* copy VF default entry action to the VF mcam entry */
+ if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries)
+ npc_get_default_entry_action(rvu, mcam, blkaddr, actindex,
+ entry);
+
/* Set 'action' */
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, actbank), entry->action);
@@ -317,6 +511,42 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, true);
}
+void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src,
+ struct mcam_entry *entry, u8 *intf, u8 *ena)
+{
+ int sbank = npc_get_bank(mcam, src);
+ int bank, kw = 0;
+ u64 cam0, cam1;
+
+ src &= (mcam->banksize - 1);
+ bank = sbank;
+
+ for (; bank < (sbank + mcam->banks_per_entry); bank++, kw = kw + 2) {
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 1));
+ cam0 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(src, bank, 0));
+ npc_fill_entryword(entry, kw, cam0, cam1);
+
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 1));
+ cam0 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W1(src, bank, 0));
+ npc_fill_entryword(entry, kw + 1, cam0, cam1);
+ }
+
+ entry->action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(src, sbank));
+ entry->vtag_action =
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank));
+ *intf = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank, 1)) & 3;
+ *ena = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(src, sbank)) & 1;
+}
+
static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, u16 dest)
{
@@ -371,11 +601,11 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct mcam_entry entry = { {0} };
struct nix_rx_action action;
- int blkaddr, index, kwi;
- u64 mac = 0;
+ int blkaddr, index;
/* AF's VFs work in promiscuous mode */
if (is_afvf(pcifunc))
@@ -385,20 +615,9 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
if (blkaddr < 0)
return;
- for (index = ETH_ALEN - 1; index >= 0; index--)
- mac |= ((u64)*mac_addr++) << (8 * index);
-
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
- /* Match ingress channel and DMAC */
- entry.kw[0] = chan;
- entry.kw_mask[0] = 0xFFFULL;
-
- kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
- entry.kw[kwi] = mac;
- entry.kw_mask[kwi] = BIT_ULL(48) - 1;
-
/* Don't change the action if entry is already enabled
* Otherwise RSS action may get overwritten.
*/
@@ -411,25 +630,26 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
action.pf_func = pcifunc;
}
- entry.action = *(u64 *)&action;
- npc_config_mcam_entry(rvu, mcam, blkaddr, index,
- NIX_INTF_RX, &entry, true);
-
- /* add VLAN matching, setup action and save entry back for later */
- entry.kw[0] |= (NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG) << 20;
- entry.kw_mask[0] |= (NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG) << 20;
+ req.default_rule = 1;
+ ether_addr_copy(req.packet.dmac, mac_addr);
+ eth_broadcast_addr((u8 *)&req.mask.dmac);
+ req.features = BIT_ULL(NPC_DMAC);
+ req.channel = chan;
+ req.intf = pfvf->nix_rx_intf;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = action.pf_func;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = action.flow_key_alg;
- entry.vtag_action = VTAG0_VALID_BIT |
- FIELD_PREP(VTAG0_TYPE_MASK, 0) |
- FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) |
- FIELD_PREP(VTAG0_RELPTR_MASK, 12);
-
- memcpy(&pfvf->entry, &entry, sizeof(entry));
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, bool allmulti)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, ucast_idx, index, kwi;
struct mcam_entry entry = { {0} };
@@ -473,7 +693,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
entry.action = *(u64 *)&action;
npc_config_mcam_entry(rvu, mcam, blkaddr, index,
- NIX_INTF_RX, &entry, true);
+ pfvf->nix_rx_intf, &entry, true);
}
static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
@@ -531,6 +751,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
/* Get 'pcifunc' of PF device */
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
@@ -553,14 +774,13 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
action.op = NIX_RX_ACTIONOP_UCAST;
action.pf_func = pcifunc;
} else {
- pfvf = rvu_get_pfvf(rvu, pcifunc);
action.index = pfvf->bcast_mce_idx;
action.op = NIX_RX_ACTIONOP_MCAST;
}
entry.action = *(u64 *)&action;
npc_config_mcam_entry(rvu, mcam, blkaddr, index,
- NIX_INTF_RX, &entry, true);
+ pfvf->nix_rx_intf, &entry, true);
}
void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
@@ -579,12 +799,47 @@ void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
+static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc, u64 rx_action)
+{
+ int actindex, index, bank;
+ bool enable;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return;
+
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == pcifunc) {
+ bank = npc_get_bank(mcam, index);
+ actindex = index;
+ index &= (mcam->banksize - 1);
+
+ /* read vf flow entry enable status */
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr,
+ actindex);
+ /* disable before mcam entry update */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex,
+ false);
+ /* update 'action' */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
+ rx_action);
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ actindex, true);
+ }
+ }
+ mutex_unlock(&mcam->lock);
+}
+
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct nix_rx_action action;
int blkaddr, index, bank;
+ struct rvu_pfvf *pfvf;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -621,6 +876,16 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
+ /* update the VF flow rule action with the VF default entry action */
+ if (mcam_index < 0)
+ npc_update_vf_flow_entry(rvu, mcam, blkaddr, pcifunc,
+ *(u64 *)&action);
+
+ /* update the action change in default rule */
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (pfvf->def_ucast_rule)
+ pfvf->def_ucast_rule->rx_action = action;
+
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
@@ -635,8 +900,6 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
*(u64 *)&action);
}
-
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
}
static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
@@ -688,8 +951,6 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf);
else
rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
-
- rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
}
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
@@ -704,7 +965,41 @@ void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+
+ /* Disable MCAM entries directing traffic to this 'pcifunc' */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (is_npc_intf_rx(rule->intf) &&
+ rule->rx_action.pf_func == pcifunc) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, false);
+ rule->enable = false;
+ /* Indicate that default rule is disabled */
+ if (rule->default_rule)
+ pfvf->def_ucast_rule = NULL;
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+
+ npc_mcam_disable_flows(rvu, pcifunc);
+
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+}
+
+void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -713,12 +1008,20 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
mutex_lock(&mcam->lock);
- /* Disable and free all MCAM entries mapped to this 'pcifunc' */
+ /* Free all MCAM entries owned by this 'pcifunc' */
npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
- /* Free all MCAM counters mapped to this 'pcifunc' */
+ /* Free all MCAM counters owned by this 'pcifunc' */
npc_mcam_free_all_counters(rvu, mcam, pcifunc);
+ /* Delete MCAM entries owned by this 'pcifunc' */
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (rule->owner == pcifunc && !rule->default_rule) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ }
+
mutex_unlock(&mcam->lock);
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
@@ -732,44 +1035,78 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
rvu_write64(rvu, blkaddr, \
NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
-static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
- const struct npc_mcam_kex *mkex)
+static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr,
+ struct npc_mcam_kex *mkex, u8 intf)
{
int lid, lt, ld, fl;
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
- mkex->keyx_cfg[NIX_INTF_RX]);
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
- mkex->keyx_cfg[NIX_INTF_TX]);
+ if (is_npc_intf_tx(intf))
+ return;
- for (ld = 0; ld < NPC_MAX_LD; ld++)
- rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld),
- mkex->kex_ld_flags[ld]);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ mkex->keyx_cfg[NIX_INTF_RX]);
+ /* Program LDATA */
for (lid = 0; lid < NPC_MAX_LID; lid++) {
for (lt = 0; lt < NPC_MAX_LT; lt++) {
- for (ld = 0; ld < NPC_MAX_LD; ld++) {
- SET_KEX_LD(NIX_INTF_RX, lid, lt, ld,
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ SET_KEX_LD(intf, lid, lt, ld,
mkex->intf_lid_lt_ld[NIX_INTF_RX]
[lid][lt][ld]);
-
- SET_KEX_LD(NIX_INTF_TX, lid, lt, ld,
- mkex->intf_lid_lt_ld[NIX_INTF_TX]
- [lid][lt][ld]);
- }
}
}
-
+ /* Program LFLAGS */
for (ld = 0; ld < NPC_MAX_LD; ld++) {
- for (fl = 0; fl < NPC_MAX_LFL; fl++) {
- SET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl,
+ for (fl = 0; fl < NPC_MAX_LFL; fl++)
+ SET_KEX_LDFLAGS(intf, ld, fl,
mkex->intf_ld_flags[NIX_INTF_RX]
[ld][fl]);
+ }
+}
- SET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl,
+static void npc_program_mkex_tx(struct rvu *rvu, int blkaddr,
+ struct npc_mcam_kex *mkex, u8 intf)
+{
+ int lid, lt, ld, fl;
+
+ if (is_npc_intf_rx(intf))
+ return;
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ mkex->keyx_cfg[NIX_INTF_TX]);
+
+ /* Program LDATA */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ SET_KEX_LD(intf, lid, lt, ld,
+ mkex->intf_lid_lt_ld[NIX_INTF_TX]
+ [lid][lt][ld]);
+ }
+ }
+ /* Program LFLAGS */
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++)
+ SET_KEX_LDFLAGS(intf, ld, fl,
mkex->intf_ld_flags[NIX_INTF_TX]
[ld][fl]);
- }
+ }
+}
+
+static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
+ struct npc_mcam_kex *mkex)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+ int ld;
+
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld),
+ mkex->kex_ld_flags[ld]);
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ npc_program_mkex_rx(rvu, blkaddr, mkex, intf);
+ npc_program_mkex_tx(rvu, blkaddr, mkex, intf);
}
}
@@ -909,7 +1246,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
kpu, profile->cam_entries, profile->action_entries);
}
- max_entries = rvu_read64(rvu, blkaddr, NPC_AF_CONST1) & 0xFFF;
+ max_entries = rvu->hw->npc_kpu_entries;
/* Program CAM match entries for previous KPU extracted data */
num_entries = min_t(int, profile->cam_entries, max_entries);
@@ -964,9 +1301,6 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
int num_pkinds, num_kpus, idx;
struct npc_pkind *pkind;
- /* Get HW limits */
- hw->npc_kpus = (rvu_read64(rvu, blkaddr, NPC_AF_CONST) >> 8) & 0x1F;
-
/* Disable all KPUs and their entries */
for (idx = 0; idx < hw->npc_kpus; idx++) {
rvu_write64(rvu, blkaddr,
@@ -1005,12 +1339,6 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
int rsvd, err;
u64 cfg;
- /* Get HW limits */
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- mcam->banks = (cfg >> 44) & 0xF;
- mcam->banksize = (cfg >> 28) & 0xFFFF;
- mcam->counters.max = (cfg >> 48) & 0xFFFF;
-
/* Actual number of MCAM entries vary by entry size */
cfg = (rvu_read64(rvu, blkaddr,
NPC_AF_INTFX_KEX_CFG(0)) >> 32) & 0x07;
@@ -1077,12 +1405,6 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->hprio_count = mcam->lprio_count;
mcam->hprio_end = mcam->hprio_count;
- /* Reserve last counter for MCAM RX miss action which is set to
- * drop pkt. This way we will know how many pkts didn't match
- * any MCAM entry.
- */
- mcam->counters.max--;
- mcam->rx_miss_act_cntr = mcam->counters.max;
/* Allocate bitmap for managing MCAM counters and memory
* for saving counter to RVU PFFUNC allocation mapping.
@@ -1109,6 +1431,12 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
if (!mcam->cntr_refcnt)
goto free_mem;
+ /* Alloc memory for saving target device of mcam rule */
+ mcam->entry2target_pffunc = devm_kcalloc(rvu->dev, mcam->total_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2target_pffunc)
+ goto free_mem;
+
mutex_init(&mcam->lock);
return 0;
@@ -1118,12 +1446,110 @@ free_mem:
return -ENOMEM;
}
+static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
+{
+ struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 npc_const, npc_const1;
+ u64 npc_const2 = 0;
+
+ npc_const = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ npc_const1 = rvu_read64(rvu, blkaddr, NPC_AF_CONST1);
+ if (npc_const1 & BIT_ULL(63))
+ npc_const2 = rvu_read64(rvu, blkaddr, NPC_AF_CONST2);
+
+ pkind->rsrc.max = (npc_const1 >> 12) & 0xFFULL;
+ hw->npc_kpu_entries = npc_const1 & 0xFFFULL;
+ hw->npc_kpus = (npc_const >> 8) & 0x1FULL;
+ hw->npc_intfs = npc_const & 0xFULL;
+ hw->npc_counters = (npc_const >> 48) & 0xFFFFULL;
+
+ mcam->banks = (npc_const >> 44) & 0xFULL;
+ mcam->banksize = (npc_const >> 28) & 0xFFFFULL;
+ /* Extended set */
+ if (npc_const2) {
+ hw->npc_ext_set = true;
+ hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL;
+ mcam->banksize = npc_const2 & 0xFFFFULL;
+ }
+
+ mcam->counters.max = hw->npc_counters;
+}
+
+static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 nibble_ena, rx_kex, tx_kex;
+ u8 intf;
+
+ /* Reserve last counter for MCAM RX miss action which is set to
+ * drop packet. This way we will know how many pkts didn't match
+ * any MCAM entry.
+ */
+ mcam->counters.max--;
+ mcam->rx_miss_act_cntr = mcam->counters.max;
+
+ rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
+ tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
+ nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
+
+ nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena);
+ if (nibble_ena) {
+ tx_kex &= ~NPC_PARSE_NIBBLE;
+ tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
+ npc_mkex_default.keyx_cfg[NIX_INTF_TX] = tx_kex;
+ }
+
+ /* Configure RX interfaces */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ if (is_npc_intf_tx(intf))
+ continue;
+
+ /* Set RX MCAM search key size. LA..LE (ltype only) + Channel */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ rx_kex);
+
+ /* If MCAM lookup doesn't result in a match, drop the received
+ * packet. And map this action to a counter to count dropped
+ * packets.
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_ACT(intf), NIX_RX_ACTIONOP_DROP);
+
+ /* NPC_AF_INTFX_MISS_STAT_ACT[14:12] - counter[11:9]
+ * NPC_AF_INTFX_MISS_STAT_ACT[8:0] - counter[8:0]
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_STAT_ACT(intf),
+ ((mcam->rx_miss_act_cntr >> 9) << 12) |
+ BIT_ULL(9) | mcam->rx_miss_act_cntr);
+ }
+
+ /* Configure TX interfaces */
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ if (is_npc_intf_rx(intf))
+ continue;
+
+ /* Extract Ltypes LID_LA to LID_LE */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf),
+ tx_kex);
+
+ /* Set TX miss action to UCAST_DEFAULT i.e
+ * transmit the packet on NIX LF SQ's default channel.
+ */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_INTFX_MISS_ACT(intf),
+ NIX_TX_ACTIONOP_UCAST_DEFAULT);
+ }
+}
+
int rvu_npc_init(struct rvu *rvu)
{
struct npc_kpu_profile_adapter *kpu = &rvu->kpu;
struct npc_pkind *pkind = &rvu->hw->pkind;
struct npc_mcam *mcam = &rvu->hw->mcam;
- u64 cfg, nibble_ena, rx_kex, tx_kex;
int blkaddr, entry, bank, err;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
@@ -1132,17 +1558,15 @@ int rvu_npc_init(struct rvu *rvu)
return -ENODEV;
}
+ rvu_npc_hw_init(rvu, blkaddr);
+
/* First disable all MCAM entries, to stop traffic towards NIXLFs */
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- for (bank = 0; bank < ((cfg >> 44) & 0xF); bank++) {
- for (entry = 0; entry < ((cfg >> 28) & 0xFFFF); entry++)
+ for (bank = 0; bank < mcam->banks; bank++) {
+ for (entry = 0; entry < mcam->banksize; entry++)
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0);
}
- /* Allocate resource bimap for pkind*/
- pkind->rsrc.max = (rvu_read64(rvu, blkaddr,
- NPC_AF_CONST1) >> 12) & 0xFF;
err = rvu_alloc_bitmap(&pkind->rsrc);
if (err)
return err;
@@ -1180,42 +1604,21 @@ int rvu_npc_init(struct rvu *rvu)
BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) |
BIT_ULL(2) | BIT_ULL(1));
- /* Set RX and TX side MCAM search key size.
- * LA..LD (ltype only) + Channel
- */
- rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
- tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
- nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), rx_kex);
- /* Due to an errata (35786) in A0 pass silicon, parse nibble enable
- * configuration has to be identical for both Rx and Tx interfaces.
- */
- if (is_rvu_96xx_B0(rvu)) {
- tx_kex &= ~NPC_PARSE_NIBBLE;
- tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
- }
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), tx_kex);
-
- err = npc_mcam_rsrcs_init(rvu, blkaddr);
- if (err)
- return err;
+ rvu_npc_setup_interfaces(rvu, blkaddr);
/* Configure MKEX profile */
npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
- /* Set TX miss action to UCAST_DEFAULT i.e
- * transmit the packet on NIX LF SQ's default channel.
- */
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_TX),
- NIX_TX_ACTIONOP_UCAST_DEFAULT);
+ err = npc_mcam_rsrcs_init(rvu, blkaddr);
+ if (err)
+ return err;
- /* If MCAM lookup doesn't result in a match, drop the received packet.
- * And map this action to a counter to count dropped pkts.
- */
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_ACT(NIX_INTF_RX),
- NIX_RX_ACTIONOP_DROP);
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(NIX_INTF_RX),
- BIT_ULL(9) | mcam->rx_miss_act_cntr);
+ err = npc_flow_steering_init(rvu, blkaddr);
+ if (err) {
+ dev_err(rvu->dev,
+ "Incorrect mkex profile loaded using default mkex\n");
+ npc_load_mkex_profile(rvu, blkaddr, def_pfl_name);
+ }
return 0;
}
@@ -1307,10 +1710,13 @@ static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam,
/* Set mapping and increment counter's refcnt */
mcam->entry2cntr_map[entry] = cntr;
mcam->cntr_refcnt[cntr]++;
- /* Enable stats */
+ /* Enable stats
+ * NPC_AF_MCAMEX_BANKX_STAT_ACT[14:12] - counter[11:9]
+ * NPC_AF_MCAMEX_BANKX_STAT_ACT[8:0] - counter[8:0]
+ */
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank),
- BIT_ULL(9) | cntr);
+ ((cntr >> 9) << 12) | BIT_ULL(9) | cntr);
}
static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu,
@@ -1380,6 +1786,7 @@ static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
npc_unmap_mcam_entry_and_cntr(rvu, mcam,
blkaddr, index,
cntr);
+ mcam->entry2target_pffunc[index] = 0x0;
}
}
}
@@ -1766,6 +2173,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
goto exit;
mcam->entry2pfvf_map[req->entry] = 0;
+ mcam->entry2target_pffunc[req->entry] = 0x0;
npc_mcam_clear_bit(mcam, req->entry);
npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
@@ -1785,18 +2193,49 @@ exit:
return rc;
}
+int rvu_mbox_handler_npc_mcam_read_entry(struct rvu *rvu,
+ struct npc_mcam_read_entry_req *req,
+ struct npc_mcam_read_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (!rc) {
+ npc_read_mcam_entry(rvu, mcam, blkaddr, req->entry,
+ &rsp->entry_data,
+ &rsp->intf, &rsp->enable);
+ }
+
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
struct npc_mcam_write_entry_req *req,
struct msg_rsp *rsp)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 pcifunc = req->hdr.pcifunc;
+ u16 channel, chan_mask;
int blkaddr, rc;
+ u8 nix_intf;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return NPC_MCAM_INVALID_REQ;
+ chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
+ channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK;
+ channel &= chan_mask;
+
mutex_lock(&mcam->lock);
rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
if (rc)
@@ -1808,12 +2247,28 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
goto exit;
}
- if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) {
+ if (!is_npc_interface_valid(rvu, req->intf)) {
rc = NPC_MCAM_INVALID_REQ;
goto exit;
}
- npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf,
+ if (is_npc_intf_tx(req->intf))
+ nix_intf = pfvf->nix_tx_intf;
+ else
+ nix_intf = pfvf->nix_rx_intf;
+
+ if (npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
+ pcifunc)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, nix_intf,
&req->entry_data, req->enable_entry);
if (req->set_cntr)
@@ -2141,6 +2596,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
struct npc_mcam_alloc_and_write_entry_req *req,
struct npc_mcam_alloc_and_write_entry_rsp *rsp)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
struct npc_mcam_alloc_counter_req cntr_req;
struct npc_mcam_alloc_counter_rsp cntr_rsp;
struct npc_mcam_alloc_entry_req entry_req;
@@ -2148,13 +2604,26 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
struct npc_mcam *mcam = &rvu->hw->mcam;
u16 entry = NPC_MCAM_ENTRY_INVALID;
u16 cntr = NPC_MCAM_ENTRY_INVALID;
+ u16 channel, chan_mask;
int blkaddr, rc;
+ u8 nix_intf;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return NPC_MCAM_INVALID_REQ;
- if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX)
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return NPC_MCAM_INVALID_REQ;
+
+ chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
+ channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK;
+ channel &= chan_mask;
+
+ if (npc_mcam_verify_channel(rvu, req->hdr.pcifunc, req->intf, channel))
+ return NPC_MCAM_INVALID_REQ;
+
+ if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf,
+ req->hdr.pcifunc))
return NPC_MCAM_INVALID_REQ;
/* Try to allocate a MCAM entry */
@@ -2196,7 +2665,13 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
write_entry:
mutex_lock(&mcam->lock);
- npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf,
+
+ if (is_npc_intf_tx(req->intf))
+ nix_intf = pfvf->nix_tx_intf;
+ else
+ nix_intf = pfvf->nix_rx_intf;
+
+ npc_config_mcam_entry(rvu, mcam, blkaddr, entry, nix_intf,
&req->entry_data, req->enable_entry);
if (req->alloc_cntr)
@@ -2255,26 +2730,72 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf)
+bool rvu_npc_write_default_rule(struct rvu *rvu, int blkaddr, int nixlf,
+ u16 pcifunc, u8 intf, struct mcam_entry *entry,
+ int *index)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
- int blkaddr, index;
bool enable;
+ u8 nix_intf;
+
+ if (is_npc_intf_tx(intf))
+ nix_intf = pfvf->nix_tx_intf;
+ else
+ nix_intf = pfvf->nix_rx_intf;
+
+ *index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ /* dont force enable unicast entry */
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, *index);
+ npc_config_mcam_entry(rvu, mcam, blkaddr, *index, nix_intf,
+ entry, enable);
+
+ return enable;
+}
+
+int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu,
+ struct msg_req *req,
+ struct npc_mcam_read_base_rule_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int index, blkaddr, nixlf, rc = 0;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ u8 intf, enable;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ return NPC_MCAM_INVALID_REQ;
- if (!pfvf->rxvlan)
- return 0;
+ /* Return the channel number in case of PF */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ rsp->entry.kw[0] = pfvf->rx_chan_base;
+ rsp->entry.kw_mask[0] = 0xFFFULL;
+ goto out;
+ }
+ /* Find the pkt steering rule installed by PF to this VF */
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == pcifunc)
+ goto read_entry;
+ }
+
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
+ if (rc < 0) {
+ mutex_unlock(&mcam->lock);
+ goto out;
+ }
+ /* Read the default ucast entry if there is no pkt steering rule */
index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
NIXLF_UCAST_ENTRY);
- pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index);
- enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index);
- npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index,
- NIX_INTF_RX, &pfvf->entry, enable);
-
- return 0;
+read_entry:
+ /* Read the mcam entry */
+ npc_read_mcam_entry(rvu, mcam, blkaddr, index, &rsp->entry, &intf,
+ &enable);
+ mutex_unlock(&mcam->lock);
+out:
+ return rc;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
new file mode 100644
index 000000000000..14832b66d1fe
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -0,0 +1,1336 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include <linux/bitfield.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+
+#define NPC_BYTESM GENMASK_ULL(19, 16)
+#define NPC_HDR_OFFSET GENMASK_ULL(15, 8)
+#define NPC_KEY_OFFSET GENMASK_ULL(5, 0)
+#define NPC_LDATA_EN BIT_ULL(7)
+
+static const char * const npc_flow_names[] = {
+ [NPC_DMAC] = "dmac",
+ [NPC_SMAC] = "smac",
+ [NPC_ETYPE] = "ether type",
+ [NPC_OUTER_VID] = "outer vlan id",
+ [NPC_TOS] = "tos",
+ [NPC_SIP_IPV4] = "ipv4 source ip",
+ [NPC_DIP_IPV4] = "ipv4 destination ip",
+ [NPC_SIP_IPV6] = "ipv6 source ip",
+ [NPC_DIP_IPV6] = "ipv6 destination ip",
+ [NPC_SPORT_TCP] = "tcp source port",
+ [NPC_DPORT_TCP] = "tcp destination port",
+ [NPC_SPORT_UDP] = "udp source port",
+ [NPC_DPORT_UDP] = "udp destination port",
+ [NPC_SPORT_SCTP] = "sctp source port",
+ [NPC_DPORT_SCTP] = "sctp destination port",
+ [NPC_UNKNOWN] = "unknown",
+};
+
+const char *npc_get_field_name(u8 hdr)
+{
+ if (hdr >= ARRAY_SIZE(npc_flow_names))
+ return npc_flow_names[NPC_UNKNOWN];
+
+ return npc_flow_names[hdr];
+}
+
+/* Compute keyword masks and figure out the number of keywords a field
+ * spans in the key.
+ */
+static void npc_set_kw_masks(struct npc_mcam *mcam, u8 type,
+ u8 nr_bits, int start_kwi, int offset, u8 intf)
+{
+ struct npc_key_field *field = &mcam->rx_key_fields[type];
+ u8 bits_in_kw;
+ int max_kwi;
+
+ if (mcam->banks_per_entry == 1)
+ max_kwi = 1; /* NPC_MCAM_KEY_X1 */
+ else if (mcam->banks_per_entry == 2)
+ max_kwi = 3; /* NPC_MCAM_KEY_X2 */
+ else
+ max_kwi = 6; /* NPC_MCAM_KEY_X4 */
+
+ if (is_npc_intf_tx(intf))
+ field = &mcam->tx_key_fields[type];
+
+ if (offset + nr_bits <= 64) {
+ /* one KW only */
+ if (start_kwi > max_kwi)
+ return;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(nr_bits - 1, 0)
+ << offset;
+ field->nr_kws = 1;
+ } else if (offset + nr_bits > 64 &&
+ offset + nr_bits <= 128) {
+ /* two KWs */
+ if (start_kwi + 1 > max_kwi)
+ return;
+ /* first KW mask */
+ bits_in_kw = 64 - offset;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
+ << offset;
+ /* second KW mask i.e. mask for rest of bits */
+ bits_in_kw = nr_bits + offset - 64;
+ field->kw_mask[start_kwi + 1] |= GENMASK_ULL(bits_in_kw - 1, 0);
+ field->nr_kws = 2;
+ } else {
+ /* three KWs */
+ if (start_kwi + 2 > max_kwi)
+ return;
+ /* first KW mask */
+ bits_in_kw = 64 - offset;
+ field->kw_mask[start_kwi] |= GENMASK_ULL(bits_in_kw - 1, 0)
+ << offset;
+ /* second KW mask */
+ field->kw_mask[start_kwi + 1] = ~0ULL;
+ /* third KW mask i.e. mask for rest of bits */
+ bits_in_kw = nr_bits + offset - 128;
+ field->kw_mask[start_kwi + 2] |= GENMASK_ULL(bits_in_kw - 1, 0);
+ field->nr_kws = 3;
+ }
+}
+
+/* Helper function to figure out whether field exists in the key */
+static bool npc_is_field_present(struct rvu *rvu, enum key_fields type, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *input;
+
+ input = &mcam->rx_key_fields[type];
+ if (is_npc_intf_tx(intf))
+ input = &mcam->tx_key_fields[type];
+
+ return input->nr_kws > 0;
+}
+
+static bool npc_is_same(struct npc_key_field *input,
+ struct npc_key_field *field)
+{
+ int ret;
+
+ ret = memcmp(&input->layer_mdata, &field->layer_mdata,
+ sizeof(struct npc_layer_mdata));
+ return ret == 0;
+}
+
+static void npc_set_layer_mdata(struct npc_mcam *mcam, enum key_fields type,
+ u64 cfg, u8 lid, u8 lt, u8 intf)
+{
+ struct npc_key_field *input = &mcam->rx_key_fields[type];
+
+ if (is_npc_intf_tx(intf))
+ input = &mcam->tx_key_fields[type];
+
+ input->layer_mdata.hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ input->layer_mdata.key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+ input->layer_mdata.len = FIELD_GET(NPC_BYTESM, cfg) + 1;
+ input->layer_mdata.ltype = lt;
+ input->layer_mdata.lid = lid;
+}
+
+static bool npc_check_overlap_fields(struct npc_key_field *input1,
+ struct npc_key_field *input2)
+{
+ int kwi;
+
+ /* Fields with same layer id and different ltypes are mutually
+ * exclusive hence they can be overlapped
+ */
+ if (input1->layer_mdata.lid == input2->layer_mdata.lid &&
+ input1->layer_mdata.ltype != input2->layer_mdata.ltype)
+ return false;
+
+ for (kwi = 0; kwi < NPC_MAX_KWS_IN_KEY; kwi++) {
+ if (input1->kw_mask[kwi] & input2->kw_mask[kwi])
+ return true;
+ }
+
+ return false;
+}
+
+/* Helper function to check whether given field overlaps with any other fields
+ * in the key. Due to limitations on key size and the key extraction profile in
+ * use higher layers can overwrite lower layer's header fields. Hence overlap
+ * needs to be checked.
+ */
+static bool npc_check_overlap(struct rvu *rvu, int blkaddr,
+ enum key_fields type, u8 start_lid, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *dummy, *input;
+ int start_kwi, offset;
+ u8 nr_bits, lid, lt, ld;
+ u64 cfg;
+
+ dummy = &mcam->rx_key_fields[NPC_UNKNOWN];
+ input = &mcam->rx_key_fields[type];
+
+ if (is_npc_intf_tx(intf)) {
+ dummy = &mcam->tx_key_fields[NPC_UNKNOWN];
+ input = &mcam->tx_key_fields[type];
+ }
+
+ for (lid = start_lid; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG
+ (intf, lid, lt, ld));
+ if (!FIELD_GET(NPC_LDATA_EN, cfg))
+ continue;
+ memset(dummy, 0, sizeof(struct npc_key_field));
+ npc_set_layer_mdata(mcam, NPC_UNKNOWN, cfg,
+ lid, lt, intf);
+ /* exclude input */
+ if (npc_is_same(input, dummy))
+ continue;
+ start_kwi = dummy->layer_mdata.key / 8;
+ offset = (dummy->layer_mdata.key * 8) % 64;
+ nr_bits = dummy->layer_mdata.len * 8;
+ /* form KW masks */
+ npc_set_kw_masks(mcam, NPC_UNKNOWN, nr_bits,
+ start_kwi, offset, intf);
+ /* check any input field bits falls in any
+ * other field bits.
+ */
+ if (npc_check_overlap_fields(dummy, input))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
+ u8 intf)
+{
+ if (!npc_is_field_present(rvu, type, intf) ||
+ npc_check_overlap(rvu, blkaddr, type, 0, intf))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
+ u8 key_nibble, u8 intf)
+{
+ u8 offset = (key_nibble * 4) % 64; /* offset within key word */
+ u8 kwi = (key_nibble * 4) / 64; /* which word in key */
+ u8 nr_bits = 4; /* bits in a nibble */
+ u8 type;
+
+ switch (bit_number) {
+ case 0 ... 2:
+ type = NPC_CHAN;
+ break;
+ case 3:
+ type = NPC_ERRLEV;
+ break;
+ case 4 ... 5:
+ type = NPC_ERRCODE;
+ break;
+ case 6:
+ type = NPC_LXMB;
+ break;
+ /* check for LTYPE only as of now */
+ case 9:
+ type = NPC_LA;
+ break;
+ case 12:
+ type = NPC_LB;
+ break;
+ case 15:
+ type = NPC_LC;
+ break;
+ case 18:
+ type = NPC_LD;
+ break;
+ case 21:
+ type = NPC_LE;
+ break;
+ case 24:
+ type = NPC_LF;
+ break;
+ case 27:
+ type = NPC_LG;
+ break;
+ case 30:
+ type = NPC_LH;
+ break;
+ default:
+ return;
+ };
+ npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
+}
+
+static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct npc_key_field *key_fields;
+ /* Ether type can come from three layers
+ * (ethernet, single tagged, double tagged)
+ */
+ struct npc_key_field *etype_ether;
+ struct npc_key_field *etype_tag1;
+ struct npc_key_field *etype_tag2;
+ /* Outer VLAN TCI can come from two layers
+ * (single tagged, double tagged)
+ */
+ struct npc_key_field *vlan_tag1;
+ struct npc_key_field *vlan_tag2;
+ u64 *features;
+ u8 start_lid;
+ int i;
+
+ key_fields = mcam->rx_key_fields;
+ features = &mcam->rx_features;
+
+ if (is_npc_intf_tx(intf)) {
+ key_fields = mcam->tx_key_fields;
+ features = &mcam->tx_features;
+ }
+
+ /* Handle header fields which can come from multiple layers like
+ * etype, outer vlan tci. These fields should have same position in
+ * the key otherwise to install a mcam rule more than one entry is
+ * needed which complicates mcam space management.
+ */
+ etype_ether = &key_fields[NPC_ETYPE_ETHER];
+ etype_tag1 = &key_fields[NPC_ETYPE_TAG1];
+ etype_tag2 = &key_fields[NPC_ETYPE_TAG2];
+ vlan_tag1 = &key_fields[NPC_VLAN_TAG1];
+ vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
+
+ /* if key profile programmed does not extract Ethertype at all */
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ goto vlan_tci;
+
+ /* if key profile programmed extracts Ethertype from one layer */
+ if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_ether;
+ if (!etype_ether->nr_kws && etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_tag1;
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && etype_tag2->nr_kws)
+ key_fields[NPC_ETYPE] = *etype_tag2;
+
+ /* if key profile programmed extracts Ethertype from multiple layers */
+ if (etype_ether->nr_kws && etype_tag1->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i])
+ goto vlan_tci;
+ }
+ key_fields[NPC_ETYPE] = *etype_tag1;
+ }
+ if (etype_ether->nr_kws && etype_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i])
+ goto vlan_tci;
+ }
+ key_fields[NPC_ETYPE] = *etype_tag2;
+ }
+ if (etype_tag1->nr_kws && etype_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i])
+ goto vlan_tci;
+ }
+ key_fields[NPC_ETYPE] = *etype_tag2;
+ }
+
+ /* check none of higher layers overwrite Ethertype */
+ start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1;
+ if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf))
+ goto vlan_tci;
+ *features |= BIT_ULL(NPC_ETYPE);
+vlan_tci:
+ /* if key profile does not extract outer vlan tci at all */
+ if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
+ goto done;
+
+ /* if key profile extracts outer vlan tci from one layer */
+ if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
+ key_fields[NPC_OUTER_VID] = *vlan_tag1;
+ if (!vlan_tag1->nr_kws && vlan_tag2->nr_kws)
+ key_fields[NPC_OUTER_VID] = *vlan_tag2;
+
+ /* if key profile extracts outer vlan tci from multiple layers */
+ if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) {
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i])
+ goto done;
+ }
+ key_fields[NPC_OUTER_VID] = *vlan_tag2;
+ }
+ /* check none of higher layers overwrite outer vlan tci */
+ start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1;
+ if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf))
+ goto done;
+ *features |= BIT_ULL(NPC_OUTER_VID);
+done:
+ return;
+}
+
+static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
+ u8 lt, u64 cfg, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u8 hdr, key, nr_bytes, bit_offset;
+ u8 la_ltype, la_start;
+ /* starting KW index and starting bit position */
+ int start_kwi, offset;
+
+ nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1;
+ hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+ start_kwi = key / 8;
+ offset = (key * 8) % 64;
+
+ /* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding
+ * ethernet header.
+ */
+ if (is_npc_intf_tx(intf)) {
+ la_ltype = NPC_LT_LA_IH_NIX_ETHER;
+ la_start = 8;
+ } else {
+ la_ltype = NPC_LT_LA_ETHER;
+ la_start = 0;
+ }
+
+#define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \
+do { \
+ if (lid == (hlid) && lt == (hlt)) { \
+ if ((hstart) >= hdr && \
+ ((hstart) + (hlen)) <= (hdr + nr_bytes)) { \
+ bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \
+ npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \
+ npc_set_kw_masks(mcam, (name), (hlen) * 8, \
+ start_kwi, offset + bit_offset, intf);\
+ } \
+ } \
+} while (0)
+
+ /* List LID, LTYPE, start offset from layer and length(in bytes) of
+ * packet header fields below.
+ * Example: Source IP is 4 bytes and starts at 12th byte of IP header
+ */
+ NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
+ NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
+ NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2);
+ NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 2, 2);
+ NPC_SCAN_HDR(NPC_SPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 0, 2);
+ NPC_SCAN_HDR(NPC_DPORT_SCTP, NPC_LID_LD, NPC_LT_LD_SCTP, 2, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_ETHER, NPC_LID_LA, NPC_LT_LA_ETHER, 12, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 4, 2);
+ NPC_SCAN_HDR(NPC_ETYPE_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
+ NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
+ NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ /* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
+ NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
+}
+
+static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 *features = &mcam->rx_features;
+ u64 tcp_udp_sctp;
+ int err, hdr;
+
+ if (is_npc_intf_tx(intf))
+ features = &mcam->tx_features;
+
+ for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) {
+ err = npc_check_field(rvu, blkaddr, hdr, intf);
+ if (!err)
+ *features |= BIT_ULL(hdr);
+ }
+
+ tcp_udp_sctp = BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_SPORT_UDP) |
+ BIT_ULL(NPC_DPORT_TCP) | BIT_ULL(NPC_DPORT_UDP) |
+ BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP);
+
+ /* for tcp/udp/sctp corresponding layer type should be in the key */
+ if (*features & tcp_udp_sctp)
+ if (npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ *features &= ~tcp_udp_sctp;
+
+ /* for vlan corresponding layer type should be in the key */
+ if (*features & BIT_ULL(NPC_OUTER_VID))
+ if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ *features &= ~BIT_ULL(NPC_OUTER_VID);
+}
+
+/* Scan key extraction profile and record how fields of our interest
+ * fill the key structure. Also verify Channel and DMAC exists in
+ * key and not overwritten by other header fields.
+ */
+static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u8 lid, lt, ld, bitnr;
+ u8 key_nibble = 0;
+ u64 cfg;
+
+ /* Scan and note how parse result is going to be in key.
+ * A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from
+ * parse result in the key. The enabled nibbles from parse result
+ * will be concatenated in key.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));
+ cfg &= NPC_PARSE_NIBBLE;
+ for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) {
+ npc_scan_parse_result(mcam, bitnr, key_nibble, intf);
+ key_nibble++;
+ }
+
+ /* Scan and note how layer data is going to be in key */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG
+ (intf, lid, lt, ld));
+ if (!FIELD_GET(NPC_LDATA_EN, cfg))
+ continue;
+ npc_scan_ldata(rvu, blkaddr, lid, lt, cfg,
+ intf);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr)
+{
+ int err;
+
+ err = npc_scan_kex(rvu, blkaddr, NIX_INTF_RX);
+ if (err)
+ return err;
+
+ err = npc_scan_kex(rvu, blkaddr, NIX_INTF_TX);
+ if (err)
+ return err;
+
+ /* Channel is mandatory */
+ if (!npc_is_field_present(rvu, NPC_CHAN, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "Channel not present in Key\n");
+ return -EINVAL;
+ }
+ /* check that none of the fields overwrite channel */
+ if (npc_check_overlap(rvu, blkaddr, NPC_CHAN, 0, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "Channel cannot be overwritten\n");
+ return -EINVAL;
+ }
+ /* DMAC should be present in key for unicast filter to work */
+ if (!npc_is_field_present(rvu, NPC_DMAC, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "DMAC not present in Key\n");
+ return -EINVAL;
+ }
+ /* check that none of the fields overwrite DMAC */
+ if (npc_check_overlap(rvu, blkaddr, NPC_DMAC, 0, NIX_INTF_RX)) {
+ dev_err(rvu->dev, "DMAC cannot be overwritten\n");
+ return -EINVAL;
+ }
+
+ npc_set_features(rvu, blkaddr, NIX_INTF_TX);
+ npc_set_features(rvu, blkaddr, NIX_INTF_RX);
+ npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_TX);
+ npc_handle_multi_layer_fields(rvu, blkaddr, NIX_INTF_RX);
+
+ return 0;
+}
+
+int npc_flow_steering_init(struct rvu *rvu, int blkaddr)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ INIT_LIST_HEAD(&mcam->mcam_rules);
+
+ return npc_scan_verify_kex(rvu, blkaddr);
+}
+
+static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 *mcam_features = &mcam->rx_features;
+ u64 unsupported;
+ u8 bit;
+
+ if (is_npc_intf_tx(intf))
+ mcam_features = &mcam->tx_features;
+
+ unsupported = (*mcam_features ^ features) & ~(*mcam_features);
+ if (unsupported) {
+ dev_info(rvu->dev, "Unsupported flow(s):\n");
+ for_each_set_bit(bit, (unsigned long *)&unsupported, 64)
+ dev_info(rvu->dev, "%s ", npc_get_field_name(bit));
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* npc_update_entry - Based on the masks generated during
+ * the key scanning, updates the given entry with value and
+ * masks for the field of interest. Maximum 16 bytes of a packet
+ * header can be extracted by HW hence lo and hi are sufficient.
+ * When field bytes are less than or equal to 8 then hi should be
+ * 0 for value and mask.
+ *
+ * If exact match of value is required then mask should be all 1's.
+ * If any bits in mask are 0 then corresponding bits in value are
+ * dont care.
+ */
+static void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct mcam_entry dummy = { {0} };
+ struct npc_key_field *field;
+ u64 kw1, kw2, kw3;
+ u8 shift;
+ int i;
+
+ field = &mcam->rx_key_fields[type];
+ if (is_npc_intf_tx(intf))
+ field = &mcam->tx_key_fields[type];
+
+ if (!field->nr_kws)
+ return;
+
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (!field->kw_mask[i])
+ continue;
+ /* place key value in kw[x] */
+ shift = __ffs64(field->kw_mask[i]);
+ /* update entry value */
+ kw1 = (val_lo << shift) & field->kw_mask[i];
+ dummy.kw[i] = kw1;
+ /* update entry mask */
+ kw1 = (mask_lo << shift) & field->kw_mask[i];
+ dummy.kw_mask[i] = kw1;
+
+ if (field->nr_kws == 1)
+ break;
+ /* place remaining bits of key value in kw[x + 1] */
+ if (field->nr_kws == 2) {
+ /* update entry value */
+ kw2 = shift ? val_lo >> (64 - shift) : 0;
+ kw2 |= (val_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ dummy.kw[i + 1] = kw2;
+ /* update entry mask */
+ kw2 = shift ? mask_lo >> (64 - shift) : 0;
+ kw2 |= (mask_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ dummy.kw_mask[i + 1] = kw2;
+ break;
+ }
+ /* place remaining bits of key value in kw[x + 1], kw[x + 2] */
+ if (field->nr_kws == 3) {
+ /* update entry value */
+ kw2 = shift ? val_lo >> (64 - shift) : 0;
+ kw2 |= (val_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ kw3 = shift ? val_hi >> (64 - shift) : 0;
+ kw3 &= field->kw_mask[i + 2];
+ dummy.kw[i + 1] = kw2;
+ dummy.kw[i + 2] = kw3;
+ /* update entry mask */
+ kw2 = shift ? mask_lo >> (64 - shift) : 0;
+ kw2 |= (mask_hi << shift);
+ kw2 &= field->kw_mask[i + 1];
+ kw3 = shift ? mask_hi >> (64 - shift) : 0;
+ kw3 &= field->kw_mask[i + 2];
+ dummy.kw_mask[i + 1] = kw2;
+ dummy.kw_mask[i + 2] = kw3;
+ break;
+ }
+ }
+ /* dummy is ready with values and masks for given key
+ * field now clear and update input entry with those
+ */
+ for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
+ if (!field->kw_mask[i])
+ continue;
+ entry->kw[i] &= ~field->kw_mask[i];
+ entry->kw_mask[i] &= ~field->kw_mask[i];
+
+ entry->kw[i] |= dummy.kw[i];
+ entry->kw_mask[i] |= dummy.kw_mask[i];
+ }
+}
+
+#define IPV6_WORDS 4
+
+static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct rvu_npc_mcam_rule *output, u8 intf)
+{
+ u32 src_ip[IPV6_WORDS], src_ip_mask[IPV6_WORDS];
+ u32 dst_ip[IPV6_WORDS], dst_ip_mask[IPV6_WORDS];
+ struct flow_msg *opkt = &output->packet;
+ struct flow_msg *omask = &output->mask;
+ u64 mask_lo, mask_hi;
+ u64 val_lo, val_hi;
+
+ /* For an ipv6 address fe80::2c68:63ff:fe5e:2d0a the packet
+ * values to be programmed in MCAM should as below:
+ * val_high: 0xfe80000000000000
+ * val_low: 0x2c6863fffe5e2d0a
+ */
+ if (features & BIT_ULL(NPC_SIP_IPV6)) {
+ be32_to_cpu_array(src_ip_mask, mask->ip6src, IPV6_WORDS);
+ be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
+
+ mask_hi = (u64)src_ip_mask[0] << 32 | src_ip_mask[1];
+ mask_lo = (u64)src_ip_mask[2] << 32 | src_ip_mask[3];
+ val_hi = (u64)src_ip[0] << 32 | src_ip[1];
+ val_lo = (u64)src_ip[2] << 32 | src_ip[3];
+
+ npc_update_entry(rvu, NPC_SIP_IPV6, entry, val_lo, val_hi,
+ mask_lo, mask_hi, intf);
+ memcpy(opkt->ip6src, pkt->ip6src, sizeof(opkt->ip6src));
+ memcpy(omask->ip6src, mask->ip6src, sizeof(omask->ip6src));
+ }
+ if (features & BIT_ULL(NPC_DIP_IPV6)) {
+ be32_to_cpu_array(dst_ip_mask, mask->ip6dst, IPV6_WORDS);
+ be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
+
+ mask_hi = (u64)dst_ip_mask[0] << 32 | dst_ip_mask[1];
+ mask_lo = (u64)dst_ip_mask[2] << 32 | dst_ip_mask[3];
+ val_hi = (u64)dst_ip[0] << 32 | dst_ip[1];
+ val_lo = (u64)dst_ip[2] << 32 | dst_ip[3];
+
+ npc_update_entry(rvu, NPC_DIP_IPV6, entry, val_lo, val_hi,
+ mask_lo, mask_hi, intf);
+ memcpy(opkt->ip6dst, pkt->ip6dst, sizeof(opkt->ip6dst));
+ memcpy(omask->ip6dst, mask->ip6dst, sizeof(omask->ip6dst));
+ }
+}
+
+static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
+ u64 features, struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct rvu_npc_mcam_rule *output, u8 intf)
+{
+ u64 dmac_mask = ether_addr_to_u64(mask->dmac);
+ u64 smac_mask = ether_addr_to_u64(mask->smac);
+ u64 dmac_val = ether_addr_to_u64(pkt->dmac);
+ u64 smac_val = ether_addr_to_u64(pkt->smac);
+ struct flow_msg *opkt = &output->packet;
+ struct flow_msg *omask = &output->mask;
+
+ if (!features)
+ return;
+
+ /* For tcp/udp/sctp LTYPE should be present in entry */
+ if (features & (BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_DPORT_TCP)))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP,
+ 0, ~0ULL, 0, intf);
+ if (features & (BIT_ULL(NPC_SPORT_UDP) | BIT_ULL(NPC_DPORT_UDP)))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP,
+ 0, ~0ULL, 0, intf);
+ if (features & (BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP)))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP,
+ 0, ~0ULL, 0, intf);
+
+ if (features & BIT_ULL(NPC_OUTER_VID))
+ npc_update_entry(rvu, NPC_LB, entry,
+ NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0,
+ NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf);
+
+#define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \
+do { \
+ if (features & BIT_ULL((field))) { \
+ npc_update_entry(rvu, (field), entry, (val_lo), (val_hi), \
+ (mask_lo), (mask_hi), intf); \
+ memcpy(&opkt->member, &pkt->member, sizeof(pkt->member)); \
+ memcpy(&omask->member, &mask->member, sizeof(mask->member)); \
+ } \
+} while (0)
+
+ NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0);
+ NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0);
+ NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
+ ntohs(mask->etype), 0);
+ NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0,
+ ntohl(mask->ip4src), 0);
+ NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0,
+ ntohl(mask->ip4dst), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_TCP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_UDP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_TCP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_UDP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+ NPC_WRITE_FLOW(NPC_SPORT_SCTP, sport, ntohs(pkt->sport), 0,
+ ntohs(mask->sport), 0);
+ NPC_WRITE_FLOW(NPC_DPORT_SCTP, dport, ntohs(pkt->dport), 0,
+ ntohs(mask->dport), 0);
+
+ NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0,
+ ntohs(mask->vlan_tci), 0);
+
+ npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
+}
+
+static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam,
+ u16 entry)
+{
+ struct rvu_npc_mcam_rule *iter;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ if (iter->entry == entry) {
+ mutex_unlock(&mcam->lock);
+ return iter;
+ }
+ }
+ mutex_unlock(&mcam->lock);
+
+ return NULL;
+}
+
+static void rvu_mcam_add_rule(struct npc_mcam *mcam,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct list_head *head = &mcam->mcam_rules;
+ struct rvu_npc_mcam_rule *iter;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(iter, &mcam->mcam_rules, list) {
+ if (iter->entry > rule->entry)
+ break;
+ head = &iter->list;
+ }
+
+ list_add(&rule->list, head);
+ mutex_unlock(&mcam->lock);
+}
+
+static void rvu_mcam_remove_counter_from_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule)
+{
+ struct npc_mcam_oper_counter_req free_req = { 0 };
+ struct msg_rsp free_rsp;
+
+ if (!rule->has_cntr)
+ return;
+
+ free_req.hdr.pcifunc = pcifunc;
+ free_req.cntr = rule->cntr;
+
+ rvu_mbox_handler_npc_mcam_free_counter(rvu, &free_req, &free_rsp);
+ rule->has_cntr = false;
+}
+
+static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
+ struct rvu_npc_mcam_rule *rule,
+ struct npc_install_flow_rsp *rsp)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ int err;
+
+ cntr_req.hdr.pcifunc = pcifunc;
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+
+ /* we try to allocate a counter to track the stats of this
+ * rule. If counter could not be allocated then proceed
+ * without counter because counters are limited than entries.
+ */
+ err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req,
+ &cntr_rsp);
+ if (!err && cntr_rsp.count) {
+ rule->cntr = cntr_rsp.cntr;
+ rule->has_cntr = true;
+ rsp->counter = rule->cntr;
+ } else {
+ rsp->counter = err;
+ }
+}
+
+static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req, u16 target)
+{
+ struct nix_rx_action action;
+
+ npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0,
+ ~0ULL, 0, NIX_INTF_RX);
+
+ *(u64 *)&action = 0x00;
+ action.pf_func = target;
+ action.op = req->op;
+ action.index = req->index;
+ action.match_id = req->match_id;
+ action.flow_key_alg = req->flow_key_alg;
+
+ if (req->op == NIX_RX_ACTION_DEFAULT && pfvf->def_ucast_rule)
+ action = pfvf->def_ucast_rule->rx_action;
+
+ entry->action = *(u64 *)&action;
+
+ /* VTAG0 starts at 0th byte of LID_B.
+ * VTAG1 starts at 4th byte of LID_B.
+ */
+ entry->vtag_action = FIELD_PREP(RX_VTAG0_VALID_BIT, req->vtag0_valid) |
+ FIELD_PREP(RX_VTAG0_TYPE_MASK, req->vtag0_type) |
+ FIELD_PREP(RX_VTAG0_LID_MASK, NPC_LID_LB) |
+ FIELD_PREP(RX_VTAG0_RELPTR_MASK, 0) |
+ FIELD_PREP(RX_VTAG1_VALID_BIT, req->vtag1_valid) |
+ FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) |
+ FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) |
+ FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4);
+}
+
+static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
+ struct mcam_entry *entry,
+ struct npc_install_flow_req *req, u16 target)
+{
+ struct nix_tx_action action;
+
+ npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target),
+ 0, ~0ULL, 0, NIX_INTF_TX);
+
+ *(u64 *)&action = 0x00;
+ action.op = req->op;
+ action.index = req->index;
+ action.match_id = req->match_id;
+
+ entry->action = *(u64 *)&action;
+
+ /* VTAG0 starts at 0th byte of LID_B.
+ * VTAG1 starts at 4th byte of LID_B.
+ */
+ entry->vtag_action = FIELD_PREP(TX_VTAG0_DEF_MASK, req->vtag0_def) |
+ FIELD_PREP(TX_VTAG0_OP_MASK, req->vtag0_op) |
+ FIELD_PREP(TX_VTAG0_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(TX_VTAG0_RELPTR_MASK, 20) |
+ FIELD_PREP(TX_VTAG1_DEF_MASK, req->vtag1_def) |
+ FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) |
+ FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24);
+}
+
+static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
+ int nixlf, struct rvu_pfvf *pfvf,
+ struct npc_install_flow_req *req,
+ struct npc_install_flow_rsp *rsp, bool enable,
+ bool pf_set_vfs_mac)
+{
+ struct rvu_npc_mcam_rule *def_ucast_rule = pfvf->def_ucast_rule;
+ u64 features, installed_features, missing_features = 0;
+ struct npc_mcam_write_entry_req write_req = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule dummy = { 0 };
+ struct rvu_npc_mcam_rule *rule;
+ bool new = false, msg_from_vf;
+ u16 owner = req->hdr.pcifunc;
+ struct msg_rsp write_rsp;
+ struct mcam_entry *entry;
+ int entry_index, err;
+
+ msg_from_vf = !!(owner & RVU_PFVF_FUNC_MASK);
+
+ installed_features = req->features;
+ features = req->features;
+ entry = &write_req.entry_data;
+ entry_index = req->entry;
+
+ npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
+ req->intf);
+
+ if (is_npc_intf_rx(req->intf))
+ npc_update_rx_entry(rvu, pfvf, entry, req, target);
+ else
+ npc_update_tx_entry(rvu, pfvf, entry, req, target);
+
+ /* Default unicast rules do not exist for TX */
+ if (is_npc_intf_tx(req->intf))
+ goto find_rule;
+
+ if (def_ucast_rule)
+ missing_features = (def_ucast_rule->features ^ features) &
+ def_ucast_rule->features;
+
+ if (req->default_rule && req->append) {
+ /* add to default rule */
+ if (missing_features)
+ npc_update_flow(rvu, entry, missing_features,
+ &def_ucast_rule->packet,
+ &def_ucast_rule->mask,
+ &dummy, req->intf);
+ enable = rvu_npc_write_default_rule(rvu, blkaddr,
+ nixlf, target,
+ pfvf->nix_rx_intf, entry,
+ &entry_index);
+ installed_features = req->features | missing_features;
+ } else if (req->default_rule && !req->append) {
+ /* overwrite default rule */
+ enable = rvu_npc_write_default_rule(rvu, blkaddr,
+ nixlf, target,
+ pfvf->nix_rx_intf, entry,
+ &entry_index);
+ } else if (msg_from_vf) {
+ /* normal rule - include default rule also to it for VF */
+ npc_update_flow(rvu, entry, missing_features,
+ &def_ucast_rule->packet, &def_ucast_rule->mask,
+ &dummy, req->intf);
+ installed_features = req->features | missing_features;
+ }
+
+find_rule:
+ rule = rvu_mcam_find_rule(mcam, entry_index);
+ if (!rule) {
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+ new = true;
+ }
+ /* no counter for default rule */
+ if (req->default_rule)
+ goto update_rule;
+
+ /* allocate new counter if rule has no counter */
+ if (req->set_cntr && !rule->has_cntr)
+ rvu_mcam_add_counter_to_rule(rvu, owner, rule, rsp);
+
+ /* if user wants to delete an existing counter for a rule then
+ * free the counter
+ */
+ if (!req->set_cntr && rule->has_cntr)
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+
+ write_req.hdr.pcifunc = owner;
+ write_req.entry = req->entry;
+ write_req.intf = req->intf;
+ write_req.enable_entry = (u8)enable;
+ /* if counter is available then clear and use it */
+ if (req->set_cntr && rule->has_cntr) {
+ rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(rule->cntr), 0x00);
+ write_req.set_cntr = 1;
+ write_req.cntr = rule->cntr;
+ }
+
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
+ &write_rsp);
+ if (err) {
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+ if (new)
+ kfree(rule);
+ return err;
+ }
+update_rule:
+ memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
+ memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
+ rule->entry = entry_index;
+ memcpy(&rule->rx_action, &entry->action, sizeof(struct nix_rx_action));
+ if (is_npc_intf_tx(req->intf))
+ memcpy(&rule->tx_action, &entry->action,
+ sizeof(struct nix_tx_action));
+ rule->vtag_action = entry->vtag_action;
+ rule->features = installed_features;
+ rule->default_rule = req->default_rule;
+ rule->owner = owner;
+ rule->enable = enable;
+ if (is_npc_intf_tx(req->intf))
+ rule->intf = pfvf->nix_tx_intf;
+ else
+ rule->intf = pfvf->nix_rx_intf;
+
+ if (new)
+ rvu_mcam_add_rule(mcam, rule);
+ if (req->default_rule)
+ pfvf->def_ucast_rule = rule;
+
+ /* VF's MAC address is being changed via PF */
+ if (pf_set_vfs_mac) {
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+ ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ }
+
+ if (pfvf->pf_set_vf_cfg && req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
+ rule->vfvlan_cfg = true;
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
+ struct npc_install_flow_req *req,
+ struct npc_install_flow_rsp *rsp)
+{
+ bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK);
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+ bool pf_set_vfs_mac = false;
+ bool enable = true;
+ u16 target;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return -EINVAL;
+
+ if (from_vf && req->default_rule)
+ return NPC_MCAM_PERM_DENIED;
+
+ /* Each PF/VF info is maintained in struct rvu_pfvf.
+ * rvu_pfvf for the target PF/VF needs to be retrieved
+ * hence modify pcifunc accordingly.
+ */
+
+ /* AF installing for a PF/VF */
+ if (!req->hdr.pcifunc)
+ target = req->vf;
+ /* PF installing for its VF */
+ else if (!from_vf && req->vf) {
+ target = (req->hdr.pcifunc & ~RVU_PFVF_FUNC_MASK) | req->vf;
+ pf_set_vfs_mac = req->default_rule &&
+ (req->features & BIT_ULL(NPC_DMAC));
+ }
+ /* msg received from PF/VF */
+ else
+ target = req->hdr.pcifunc;
+
+ if (npc_check_unsupported_flows(rvu, req->features, req->intf))
+ return -EOPNOTSUPP;
+
+ if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, target);
+
+ /* PF installing for its VF */
+ if (req->hdr.pcifunc && !from_vf && req->vf)
+ pfvf->pf_set_vf_cfg = 1;
+
+ /* update req destination mac addr */
+ if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) &&
+ is_zero_ether_addr(req->packet.dmac)) {
+ ether_addr_copy(req->packet.dmac, pfvf->mac_addr);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ }
+
+ err = nix_get_nixlf(rvu, target, &nixlf, NULL);
+
+ /* If interface is uninitialized then do not enable entry */
+ if (err || (!req->default_rule && !pfvf->def_ucast_rule))
+ enable = false;
+
+ /* Packets reaching NPC in Tx path implies that a
+ * NIXLF is properly setup and transmitting.
+ * Hence rules can be enabled for Tx.
+ */
+ if (is_npc_intf_tx(req->intf))
+ enable = true;
+
+ /* Do not allow requests from uninitialized VFs */
+ if (from_vf && !enable)
+ return -EINVAL;
+
+ /* If message is from VF then its flow should not overlap with
+ * reserved unicast flow.
+ */
+ if (from_vf && pfvf->def_ucast_rule && is_npc_intf_rx(req->intf) &&
+ pfvf->def_ucast_rule->features & req->features)
+ return -EINVAL;
+
+ return npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, req, rsp,
+ enable, pf_set_vfs_mac);
+}
+
+static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule,
+ u16 pcifunc)
+{
+ struct npc_mcam_ena_dis_entry_req dis_req = { 0 };
+ struct msg_rsp dis_rsp;
+
+ if (rule->default_rule)
+ return 0;
+
+ if (rule->has_cntr)
+ rvu_mcam_remove_counter_from_rule(rvu, pcifunc, rule);
+
+ dis_req.hdr.pcifunc = pcifunc;
+ dis_req.entry = rule->entry;
+
+ list_del(&rule->list);
+ kfree(rule);
+
+ return rvu_mbox_handler_npc_mcam_dis_entry(rvu, &dis_req, &dis_rsp);
+}
+
+int rvu_mbox_handler_npc_delete_flow(struct rvu *rvu,
+ struct npc_delete_flow_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *iter, *tmp;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct list_head del_list;
+
+ INIT_LIST_HEAD(&del_list);
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry_safe(iter, tmp, &mcam->mcam_rules, list) {
+ if (iter->owner == pcifunc) {
+ /* All rules */
+ if (req->all) {
+ list_move_tail(&iter->list, &del_list);
+ /* Range of rules */
+ } else if (req->end && iter->entry >= req->start &&
+ iter->entry <= req->end) {
+ list_move_tail(&iter->list, &del_list);
+ /* single rule */
+ } else if (req->entry == iter->entry) {
+ list_move_tail(&iter->list, &del_list);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&mcam->lock);
+
+ list_for_each_entry_safe(iter, tmp, &del_list, list) {
+ u16 entry = iter->entry;
+
+ /* clear the mcam entry target pcifunc */
+ mcam->entry2target_pffunc[entry] = 0x0;
+ if (npc_delete_flow(rvu, iter, pcifunc))
+ dev_err(rvu->dev, "rule deletion failed for entry:%u",
+ entry);
+ }
+
+ return 0;
+}
+
+static int npc_update_dmac_value(struct rvu *rvu, int npcblkaddr,
+ struct rvu_npc_mcam_rule *rule,
+ struct rvu_pfvf *pfvf)
+{
+ struct npc_mcam_write_entry_req write_req = { 0 };
+ struct mcam_entry *entry = &write_req.entry_data;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct msg_rsp rsp;
+ u8 intf, enable;
+ int err;
+
+ ether_addr_copy(rule->packet.dmac, pfvf->mac_addr);
+
+ npc_read_mcam_entry(rvu, mcam, npcblkaddr, rule->entry,
+ entry, &intf, &enable);
+
+ npc_update_entry(rvu, NPC_DMAC, entry,
+ ether_addr_to_u64(pfvf->mac_addr), 0,
+ 0xffffffffffffull, 0, intf);
+
+ write_req.hdr.pcifunc = rule->owner;
+ write_req.entry = rule->entry;
+
+ mutex_unlock(&mcam->lock);
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req, &rsp);
+ mutex_lock(&mcam->lock);
+
+ return err;
+}
+
+void npc_mcam_enable_flows(struct rvu *rvu, u16 target)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, target);
+ struct rvu_npc_mcam_rule *def_ucast_rule;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ int blkaddr, bank, index;
+ u64 def_action;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ def_ucast_rule = pfvf->def_ucast_rule;
+
+ mutex_lock(&mcam->lock);
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (is_npc_intf_rx(rule->intf) &&
+ rule->rx_action.pf_func == target && !rule->enable) {
+ if (rule->default_rule) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, true);
+ rule->enable = true;
+ continue;
+ }
+
+ if (rule->vfvlan_cfg)
+ npc_update_dmac_value(rvu, blkaddr, rule, pfvf);
+
+ if (rule->rx_action.op == NIX_RX_ACTION_DEFAULT) {
+ if (!def_ucast_rule)
+ continue;
+ /* Use default unicast entry action */
+ rule->rx_action = def_ucast_rule->rx_action;
+ def_action = *(u64 *)&def_ucast_rule->rx_action;
+ bank = npc_get_bank(mcam, rule->entry);
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION
+ (rule->entry, bank), def_action);
+ }
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ rule->entry, true);
+ rule->enable = true;
+ }
+ }
+
+ /* Enable MCAM entries installed by PF with target as VF pcifunc */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == target)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ index, true);
+ }
+ mutex_unlock(&mcam->lock);
+}
+
+void npc_mcam_disable_flows(struct rvu *rvu, u16 target)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+ /* Disable MCAM entries installed by PF with target as VF pcifunc */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2target_pffunc[index] == target)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ index, false);
+ }
+ mutex_unlock(&mcam->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
index 9d7c135c7965..e266f0c49559 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
@@ -35,7 +35,7 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
{0x1200, 0x12E0} } },
{NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
{0x1610, 0x1618} } },
- {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x1768} } },
+ {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } },
{NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 7ca599b973c0..0fb2aa909a23 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -54,20 +54,20 @@
#define RVU_PRIV_PFX_MSIX_CFG(a) (0x8000110 | (a) << 16)
#define RVU_PRIV_PFX_ID_CFG(a) (0x8000120 | (a) << 16)
#define RVU_PRIV_PFX_INT_CFG(a) (0x8000200 | (a) << 16)
-#define RVU_PRIV_PFX_NIX0_CFG (0x8000300)
+#define RVU_PRIV_PFX_NIXX_CFG(a) (0x8000300 | (a) << 3)
#define RVU_PRIV_PFX_NPA_CFG (0x8000310)
#define RVU_PRIV_PFX_SSO_CFG (0x8000320)
#define RVU_PRIV_PFX_SSOW_CFG (0x8000330)
#define RVU_PRIV_PFX_TIM_CFG (0x8000340)
-#define RVU_PRIV_PFX_CPT0_CFG (0x8000350)
+#define RVU_PRIV_PFX_CPTX_CFG(a) (0x8000350 | (a) << 3)
#define RVU_PRIV_BLOCK_TYPEX_REV(a) (0x8000400 | (a) << 3)
#define RVU_PRIV_HWVFX_INT_CFG(a) (0x8001280 | (a) << 16)
-#define RVU_PRIV_HWVFX_NIX0_CFG (0x8001300)
+#define RVU_PRIV_HWVFX_NIXX_CFG(a) (0x8001300 | (a) << 3)
#define RVU_PRIV_HWVFX_NPA_CFG (0x8001310)
#define RVU_PRIV_HWVFX_SSO_CFG (0x8001320)
#define RVU_PRIV_HWVFX_SSOW_CFG (0x8001330)
#define RVU_PRIV_HWVFX_TIM_CFG (0x8001340)
-#define RVU_PRIV_HWVFX_CPT0_CFG (0x8001350)
+#define RVU_PRIV_HWVFX_CPTX_CFG(a) (0x8001350 | (a) << 3)
/* RVU PF registers */
#define RVU_PF_VFX_PFVF_MBOX0 (0x00000)
@@ -429,12 +429,63 @@
#define TIM_AF_LF_RST (0x20)
/* CPT */
-#define CPT_AF_CONSTANTS0 (0x0000)
-#define CPT_PRIV_LFX_CFG (0x41000)
-#define CPT_PRIV_LFX_INT_CFG (0x43000)
-#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000)
-#define CPT_AF_LF_RST (0x44000)
-#define CPT_AF_BLK_RST (0x46000)
+#define CPT_AF_CONSTANTS0 (0x0000)
+#define CPT_AF_CONSTANTS1 (0x1000)
+#define CPT_AF_DIAG (0x3000)
+#define CPT_AF_ECO (0x4000)
+#define CPT_AF_FLTX_INT(a) (0xa000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_W1S(a) (0xb000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_ENA_W1C(a) (0xc000ull | (u64)(a) << 3)
+#define CPT_AF_FLTX_INT_ENA_W1S(a) (0xd000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_EXE(a) (0xe000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_EXE_W1S(a) (0xf000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_LF(a) (0x10000ull | (u64)(a) << 3)
+#define CPT_AF_PSNX_LF_W1S(a) (0x11000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_CTL2(a) (0x12000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_STS(a) (0x13000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_ERR_INFO (0x14000)
+#define CPT_AF_EXEX_ACTIVE(a) (0x16000ull | (u64)(a) << 3)
+#define CPT_AF_INST_REQ_PC (0x17000)
+#define CPT_AF_INST_LATENCY_PC (0x18000)
+#define CPT_AF_RD_REQ_PC (0x19000)
+#define CPT_AF_RD_LATENCY_PC (0x1a000)
+#define CPT_AF_RD_UC_PC (0x1b000)
+#define CPT_AF_ACTIVE_CYCLES_PC (0x1c000)
+#define CPT_AF_EXE_DBG_CTL (0x1d000)
+#define CPT_AF_EXE_DBG_DATA (0x1e000)
+#define CPT_AF_EXE_REQ_TIMER (0x1f000)
+#define CPT_AF_EXEX_CTL(a) (0x20000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_PERF_CTL (0x21000)
+#define CPT_AF_EXE_DBG_CNTX(a) (0x22000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_PERF_EVENT_CNT (0x23000)
+#define CPT_AF_EXE_EPCI_INBX_CNT(a) (0x24000ull | (u64)(a) << 3)
+#define CPT_AF_EXE_EPCI_OUTBX_CNT(a) (0x25000ull | (u64)(a) << 3)
+#define CPT_AF_EXEX_UCODE_BASE(a) (0x26000ull | (u64)(a) << 3)
+#define CPT_AF_LFX_CTL(a) (0x27000ull | (u64)(a) << 3)
+#define CPT_AF_LFX_CTL2(a) (0x29000ull | (u64)(a) << 3)
+#define CPT_AF_CPTCLK_CNT (0x2a000)
+#define CPT_AF_PF_FUNC (0x2b000)
+#define CPT_AF_LFX_PTR_CTL(a) (0x2c000ull | (u64)(a) << 3)
+#define CPT_AF_GRPX_THR(a) (0x2d000ull | (u64)(a) << 3)
+#define CPT_AF_CTL (0x2e000ull)
+#define CPT_AF_XEX_THR(a) (0x2f000ull | (u64)(a) << 3)
+#define CPT_PRIV_LFX_CFG (0x41000)
+#define CPT_PRIV_AF_INT_CFG (0x42000)
+#define CPT_PRIV_LFX_INT_CFG (0x43000)
+#define CPT_AF_LF_RST (0x44000)
+#define CPT_AF_RVU_LF_CFG_DEBUG (0x45000)
+#define CPT_AF_BLK_RST (0x46000)
+#define CPT_AF_RVU_INT (0x47000)
+#define CPT_AF_RVU_INT_W1S (0x47008)
+#define CPT_AF_RVU_INT_ENA_W1S (0x47010)
+#define CPT_AF_RVU_INT_ENA_W1C (0x47018)
+#define CPT_AF_RAS_INT (0x47020)
+#define CPT_AF_RAS_INT_W1S (0x47028)
+#define CPT_AF_RAS_INT_ENA_W1S (0x47030)
+#define CPT_AF_RAS_INT_ENA_W1C (0x47038)
+
+#define CPT_AF_LF_CTL2_SHIFT 3
+#define CPT_AF_LF_SSO_PF_FUNC_SHIFT 32
#define NPC_AF_BLK_RST (0x00040)
@@ -446,6 +497,8 @@
#define NPC_AF_BLK_RST (0x00040)
#define NPC_AF_MCAM_SCRUB_CTL (0x000a0)
#define NPC_AF_KCAM_SCRUB_CTL (0x000b0)
+#define NPC_AF_CONST2 (0x00100)
+#define NPC_AF_CONST3 (0x00110)
#define NPC_AF_KPUX_CFG(a) (0x00500 | (a) << 3)
#define NPC_AF_PCK_CFG (0x00600)
#define NPC_AF_PCK_DEF_OL2 (0x00610)
@@ -469,20 +522,7 @@
(0x900000 | (a) << 16 | (b) << 12 | (c) << 5 | (d) << 3)
#define NPC_AF_INTFX_LDATAX_FLAGSX_CFG(a, b, c) \
(0x980000 | (a) << 16 | (b) << 12 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) \
- (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) \
- (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) \
- (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3)
-#define NPC_AF_MCAMEX_BANKX_CFG(a, b) (0x1800000ull | (a) << 8 | (b) << 4)
-#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) \
- (0x1880000 | (a) << 8 | (b) << 4)
-#define NPC_AF_MATCH_STATX(a) (0x1880008 | (a) << 8)
#define NPC_AF_INTFX_MISS_STAT_ACT(a) (0x1880040 + (a) * 0x8)
-#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) (0x1900000ull | (a) << 8 | (b) << 4)
-#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) \
- (0x1900008 | (a) << 8 | (b) << 4)
#define NPC_AF_INTFX_MISS_ACT(a) (0x1a00000 | (a) << 4)
#define NPC_AF_INTFX_MISS_TAG_ACT(a) (0x1b00008 | (a) << 4)
#define NPC_AF_MCAM_BANKX_HITX(a, b) (0x1c80000 | (a) << 8 | (b) << 4)
@@ -499,6 +539,70 @@
#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000000ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000000ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CAMX_W0(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000010ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000010ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CAMX_W1(a, b, c) ({ \
+ u64 offset; \
+ \
+ offset = (0x1000020ull | (a) << 10 | (b) << 6 | (c) << 3); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000020ull | (a) << 8 | (b) << 22 | (c) << 3); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_CFG(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1800000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000038ull | (a) << 8 | (b) << 22); \
+ offset; })
+
+#define NPC_AF_MCAMEX_BANKX_ACTION(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1900000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000040ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MCAMEX_BANKX_TAG_ACT(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1900008ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000048ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MCAMEX_BANKX_STAT_ACT(a, b) ({ \
+ u64 offset; \
+ \
+ offset = (0x1880000ull | (a) << 8 | (b) << 4); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000050ull | (a) << 8 | (b) << 22); \
+ offset; }) \
+
+#define NPC_AF_MATCH_STATX(a) ({ \
+ u64 offset; \
+ \
+ offset = (0x1880008ull | (a) << 8); \
+ if (rvu->hw->npc_ext_set) \
+ offset = (0x8000078ull | (a) << 8); \
+ offset; }) \
+
/* NDC */
#define NDC_AF_CONST (0x00000)
#define NDC_AF_CLK_EN (0x00020)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index a3ecb5de9000..723643868589 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -14,6 +14,8 @@
/* RVU Block revision IDs */
#define RVU_BLK_RVUM_REVID 0x01
+#define RVU_MULTI_BLK_VER 0x7ULL
+
/* RVU Block Address Enumeration */
enum rvu_block_addr_e {
BLKADDR_RVUM = 0x0ULL,
@@ -31,7 +33,9 @@ enum rvu_block_addr_e {
BLKADDR_NDC_NIX0_RX = 0xcULL,
BLKADDR_NDC_NIX0_TX = 0xdULL,
BLKADDR_NDC_NPA0 = 0xeULL,
- BLK_COUNT = 0xfULL,
+ BLKADDR_NDC_NIX1_RX = 0x10ULL,
+ BLKADDR_NDC_NIX1_TX = 0x11ULL,
+ BLK_COUNT = 0x12ULL,
};
/* RVU Block Type Enumeration */
@@ -917,4 +921,15 @@ enum nix_vtag_size {
VTAGSIZE_T4 = 0x0,
VTAGSIZE_T8 = 0x1,
};
+
+enum nix_tx_vtag_op {
+ NOP = 0x0,
+ VTAG_INSERT = 0x1,
+ VTAG_REPLACE = 0x2,
+};
+
+/* NIX RX VTAG actions */
+#define VTAG_STRIP BIT_ULL(4)
+#define VTAG_CAPTURE BIT_ULL(5)
+
#endif /* RVU_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index b2c6385707c9..4193ae3bde6b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o
+ otx2_ptp.o otx2_flows.o
octeontx2_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index d2581090f9a4..73fb94dd5fbc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -191,10 +191,14 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data))
+ if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- else
+ /* update dmac field in vlan offload rule */
+ if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ otx2_install_rxvlan_offload_flow(pfvf);
+ } else {
return -EPERM;
+ }
return 0;
}
@@ -355,7 +359,8 @@ int otx2_rss_init(struct otx2_nic *pfvf)
rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
- NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN;
+ NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN |
+ NIX_FLOW_KEY_TYPE_IPV4_PROTO;
ret = otx2_set_flowkey_cfg(pfvf);
if (ret)
@@ -531,8 +536,10 @@ static int otx2_get_link(struct otx2_nic *pfvf)
link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
}
/* LBK channel */
- if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE)
- link = 12;
+ if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE) {
+ map = pfvf->hw.tx_chan_base & 0x7FF;
+ link = pfvf->hw.cgx_links | ((map >> 8) & 0xF);
+ }
return link;
}
@@ -1237,7 +1244,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
sq = &qset->sq[qidx];
sq->sqb_count = 0;
- sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(u64 *), GFP_KERNEL);
+ sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
if (!sq->sqb_ptrs)
return -ENOMEM;
@@ -1503,6 +1510,8 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
pfvf->hw.tx_chan_base = rsp->tx_chan_base;
pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
+ pfvf->hw.cgx_links = rsp->cgx_links;
+ pfvf->hw.lbk_links = rsp->lbk_links;
}
EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index d6253f2a414d..103430400a8a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -11,13 +11,16 @@
#ifndef OTX2_COMMON_H
#define OTX2_COMMON_H
+#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <linux/soc/marvell/octeontx2/asm.h>
#include <mbox.h>
+#include <npc.h>
#include "otx2_reg.h"
#include "otx2_txrx.h"
#include <rvu_trace.h>
@@ -197,12 +200,17 @@ struct otx2_hw {
struct otx2_drv_stats drv_stats;
u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
+ u8 cgx_links; /* No. of CGX links present in HW */
+ u8 lbk_links; /* No. of LBK links present in HW */
};
struct otx2_vf_config {
struct otx2_nic *pf;
struct delayed_work link_event_work;
bool intf_down; /* interface was either configured or not */
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+ int tx_vtag_idx;
};
struct flr_work {
@@ -226,6 +234,32 @@ struct otx2_ptp {
#define OTX2_HW_TIMESTAMP_LEN 8
+struct otx2_mac_table {
+ u8 addr[ETH_ALEN];
+ u16 mcam_entry;
+ bool inuse;
+};
+
+struct otx2_flow_config {
+ u16 entry[NPC_MAX_NONCONTIG_ENTRIES];
+ u32 nr_flows;
+#define OTX2_MAX_NTUPLE_FLOWS 32
+#define OTX2_MAX_UNICAST_FLOWS 8
+#define OTX2_MAX_VLAN_FLOWS 1
+#define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \
+ OTX2_MAX_UNICAST_FLOWS + \
+ OTX2_MAX_VLAN_FLOWS)
+ u32 ntuple_offset;
+ u32 unicast_offset;
+ u32 rx_vlan_offset;
+ u32 vf_vlan_offset;
+#define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */
+#define OTX2_VF_VLAN_RX_INDEX 0
+#define OTX2_VF_VLAN_TX_INDEX 1
+ u32 ntuple_max_flows;
+ struct list_head flow_list;
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@@ -236,6 +270,12 @@ struct otx2_nic {
#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
+#define OTX2_FLAG_MCAM_ENTRIES_ALLOC BIT_ULL(3)
+#define OTX2_FLAG_NTUPLE_SUPPORT BIT_ULL(4)
+#define OTX2_FLAG_UCAST_FLTR_SUPPORT BIT_ULL(5)
+#define OTX2_FLAG_RX_VLAN_SUPPORT BIT_ULL(6)
+#define OTX2_FLAG_VF_VLAN_SUPPORT BIT_ULL(7)
+#define OTX2_FLAG_PF_SHUTDOWN BIT_ULL(8)
#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
u64 flags;
@@ -264,6 +304,7 @@ struct otx2_nic {
struct refill_work *refill_wrk;
struct workqueue_struct *otx2_wq;
struct work_struct rx_mode_work;
+ struct otx2_mac_table *mac_table;
/* Ethtool stuff */
u32 msg_enable;
@@ -273,6 +314,8 @@ struct otx2_nic {
struct otx2_ptp *ptp;
struct hwtstamp_config tstamp;
+
+ struct otx2_flow_config *flow_cfg;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -421,21 +464,9 @@ static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
return result;
}
-static inline u64 otx2_lmt_flush(uint64_t addr)
-{
- u64 result = 0;
-
- __asm__ volatile(".cpu generic+lse\n"
- "ldeor xzr,%x[rf],[%[rs]]"
- : [rf]"=r"(result)
- : [rs]"r"(addr));
- return result;
-}
-
#else
#define otx2_write128(lo, hi, addr)
#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
-#define otx2_lmt_flush(addr) ({ 0; })
#endif
/* Alloc pointer from pool/aura */
@@ -642,4 +673,24 @@ int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
+/* MCAM filter related APIs */
+int otx2_mcam_flow_init(struct otx2_nic *pf);
+int otx2_alloc_mcam_entries(struct otx2_nic *pfvf);
+void otx2_mcam_flow_del(struct otx2_nic *pf);
+int otx2_destroy_ntuple_flows(struct otx2_nic *pf);
+int otx2_destroy_mcam_flows(struct otx2_nic *pfvf);
+int otx2_get_flow(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc, u32 location);
+int otx2_get_all_flows(struct otx2_nic *pfvf,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs);
+int otx2_add_flow(struct otx2_nic *pfvf,
+ struct ethtool_rx_flow_spec *fsp);
+int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
+int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req);
+int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
+int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
+int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
+
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 662fb80dbb9d..67171b66a56c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -551,6 +551,16 @@ static int otx2_get_rxnfc(struct net_device *dev,
nfc->data = pfvf->hw.rx_queues;
ret = 0;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ nfc->rule_cnt = pfvf->flow_cfg->nr_flows;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = otx2_get_flow(pfvf, nfc, nfc->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = otx2_get_all_flows(pfvf, nfc, rules);
+ break;
case ETHTOOL_GRXFH:
return otx2_get_rss_hash_opts(pfvf, nfc);
default:
@@ -561,6 +571,50 @@ static int otx2_get_rxnfc(struct net_device *dev,
static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
{
+ bool ntuple = !!(dev->features & NETIF_F_NTUPLE);
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = otx2_set_rss_hash_opts(pfvf, nfc);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_add_flow(pfvf, &nfc->fs);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (netif_running(dev) && ntuple)
+ ret = otx2_remove_flow(pfvf, nfc->fs.location);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int otx2vf_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc, u32 *rules)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (nfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ nfc->data = pfvf->hw.rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ return otx2_get_rss_hash_opts(pfvf, nfc);
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
+{
struct otx2_nic *pfvf = netdev_priv(dev);
int ret = -EOPNOTSUPP;
@@ -806,8 +860,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_sset_count = otx2vf_get_sset_count,
.set_channels = otx2_set_channels,
.get_channels = otx2_get_channels,
- .get_rxnfc = otx2_get_rxnfc,
- .set_rxnfc = otx2_set_rxnfc,
+ .get_rxnfc = otx2vf_get_rxnfc,
+ .set_rxnfc = otx2vf_set_rxnfc,
.get_rxfh_key_size = otx2_get_rxfh_key_size,
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
new file mode 100644
index 000000000000..be8ccfce1848
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include <net/ipv6.h>
+
+#include "otx2_common.h"
+
+#define OTX2_DEFAULT_ACTION 0x1
+
+struct otx2_flow {
+ struct ethtool_rx_flow_spec flow_spec;
+ struct list_head list;
+ u32 location;
+ u16 entry;
+ bool is_vf;
+ int vf;
+};
+
+int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ int vf_vlan_max_flows;
+ int i;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
+ req->contig = false;
+ req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows;
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&pfvf->mbox)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&pfvf->mbox.mbox, 0, &req->hdr);
+
+ if (rsp->count != req->count) {
+ netdev_info(pfvf->netdev,
+ "Unable to allocate %d MCAM entries, got %d\n",
+ req->count, rsp->count);
+ /* support only ntuples here */
+ flow_cfg->ntuple_max_flows = rsp->count;
+ flow_cfg->ntuple_offset = 0;
+ pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ } else {
+ flow_cfg->vf_vlan_offset = 0;
+ flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
+ vf_vlan_max_flows;
+ flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
+ OTX2_MAX_NTUPLE_FLOWS;
+ flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
+ OTX2_MAX_UNICAST_FLOWS;
+ pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
+ }
+
+ for (i = 0; i < rsp->count; i++)
+ flow_cfg->entry[i] = rsp->entry_list[i];
+
+ pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+int otx2_mcam_flow_init(struct otx2_nic *pf)
+{
+ int err;
+
+ pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
+ GFP_KERNEL);
+ if (!pf->flow_cfg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
+
+ pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
+
+ err = otx2_alloc_mcam_entries(pf);
+ if (err)
+ return err;
+
+ pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
+ * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
+ if (!pf->mac_table)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void otx2_mcam_flow_del(struct otx2_nic *pf)
+{
+ otx2_destroy_mcam_flows(pf);
+}
+
+/* On success adds mcam entry
+ * On failure enable promisous mode
+ */
+static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
+{
+ struct otx2_flow_config *flow_cfg = pf->flow_cfg;
+ struct npc_install_flow_req *req;
+ int err, i;
+
+ if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
+ return -ENOMEM;
+
+ /* dont have free mcam entries or uc list is greater than alloted */
+ if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
+ return -ENOMEM;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ /* unicast offset starts with 32 0..31 for ntuple */
+ for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ if (pf->mac_table[i].inuse)
+ continue;
+ ether_addr_copy(pf->mac_table[i].addr, mac);
+ pf->mac_table[i].inuse = true;
+ pf->mac_table[i].mcam_entry =
+ flow_cfg->entry[i + flow_cfg->unicast_offset];
+ req->entry = pf->mac_table[i].mcam_entry;
+ break;
+ }
+
+ ether_addr_copy(req->packet.dmac, mac);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+
+ return err;
+}
+
+int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ return otx2_do_add_macfilter(pf, mac);
+}
+
+static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
+ int *mcam_entry)
+{
+ int i;
+
+ for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ if (!pf->mac_table[i].inuse)
+ continue;
+
+ if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
+ *mcam_entry = pf->mac_table[i].mcam_entry;
+ pf->mac_table[i].inuse = false;
+ return true;
+ }
+ }
+ return false;
+}
+
+int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct npc_delete_flow_req *req;
+ int err, mcam_entry;
+
+ /* check does mcam entry exists for given mac */
+ if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
+ return 0;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+ req->entry = mcam_entry;
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+
+ return err;
+}
+
+static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
+{
+ struct otx2_flow *iter;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location == location)
+ return iter;
+ }
+
+ return NULL;
+}
+
+static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
+{
+ struct list_head *head = &pfvf->flow_cfg->flow_list;
+ struct otx2_flow *iter;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location > flow->location)
+ break;
+ head = &iter->list;
+ }
+
+ list_add(&flow->list, head);
+}
+
+int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
+ u32 location)
+{
+ struct otx2_flow *iter;
+
+ if (location >= pfvf->flow_cfg->ntuple_max_flows)
+ return -EINVAL;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->location == location) {
+ nfc->fs = iter->flow_spec;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
+ u32 *rule_locs)
+{
+ u32 location = 0;
+ int idx = 0;
+ int err = 0;
+
+ nfc->data = pfvf->flow_cfg->ntuple_max_flows;
+ while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) {
+ err = otx2_get_flow(pfvf, nfc, location);
+ if (!err)
+ rule_locs[idx++] = location;
+ location++;
+ }
+
+ return err;
+}
+
+static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
+{
+ struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
+ struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
+ struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+
+ switch (flow_type) {
+ case IP_USER_FLOW:
+ if (ipv4_usr_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ipv4_usr_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if (ipv4_l4_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ipv4_l4_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+ if (ipv4_l4_mask->psrc) {
+ memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
+ sizeof(pkt->sport));
+ memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
+ sizeof(pmask->sport));
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
+ if (ipv4_l4_mask->pdst) {
+ memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
+ sizeof(pkt->dport));
+ memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
+ sizeof(pmask->dport));
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
+{
+ struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
+ struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
+ struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
+ struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+
+ switch (flow_type) {
+ case IPV6_USER_FLOW:
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
+ memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
+ memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+ if (ipv6_l4_mask->psrc) {
+ memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
+ sizeof(pkt->sport));
+ memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
+ sizeof(pmask->sport));
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
+ if (ipv6_l4_mask->pdst) {
+ memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
+ sizeof(pkt->dport));
+ memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
+ sizeof(pmask->dport));
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req)
+{
+ struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+ struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+ struct flow_msg *pmask = &req->mask;
+ struct flow_msg *pkt = &req->packet;
+ u32 flow_type;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ switch (flow_type) {
+ /* bits not set in mask are don't care */
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(eth_mask->h_source)) {
+ ether_addr_copy(pkt->smac, eth_hdr->h_source);
+ ether_addr_copy(pmask->smac, eth_mask->h_source);
+ req->features |= BIT_ULL(NPC_SMAC);
+ }
+ if (!is_zero_ether_addr(eth_mask->h_dest)) {
+ ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
+ ether_addr_copy(pmask->dmac, eth_mask->h_dest);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+ if (eth_mask->h_proto) {
+ memcpy(&pkt->etype, &eth_hdr->h_proto,
+ sizeof(pkt->etype));
+ memcpy(&pmask->etype, &eth_mask->h_proto,
+ sizeof(pmask->etype));
+ req->features |= BIT_ULL(NPC_ETYPE);
+ }
+ break;
+ case IP_USER_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ otx2_prepare_ipv4_flow(fsp, req, flow_type);
+ break;
+ case IPV6_USER_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ otx2_prepare_ipv6_flow(fsp, req, flow_type);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (fsp->flow_type & FLOW_EXT) {
+ if (fsp->m_ext.vlan_etype)
+ return -EINVAL;
+ if (fsp->m_ext.vlan_tci) {
+ if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
+ return -EINVAL;
+ if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
+ return -EINVAL;
+
+ memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
+ sizeof(pkt->vlan_tci));
+ memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
+ sizeof(pmask->vlan_tci));
+ req->features |= BIT_ULL(NPC_OUTER_VID);
+ }
+
+ /* Not Drop/Direct to queue but use action in default entry */
+ if (fsp->m_ext.data[1] &&
+ fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
+ req->op = NIX_RX_ACTION_DEFAULT;
+ }
+
+ if (fsp->flow_type & FLOW_MAC_EXT &&
+ !is_zero_ether_addr(fsp->m_ext.h_dest)) {
+ ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
+ ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
+ req->features |= BIT_ULL(NPC_DMAC);
+ }
+
+ if (!req->features)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
+{
+ u64 ring_cookie = flow->flow_spec.ring_cookie;
+ struct npc_install_flow_req *req;
+ int err, vf = 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_prepare_flow_request(&flow->flow_spec, req);
+ if (err) {
+ /* free the allocated msg above */
+ otx2_mbox_reset(&pfvf->mbox.mbox, 0);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ req->entry = flow->entry;
+ req->intf = NIX_INTF_RX;
+ req->set_cntr = 1;
+ req->channel = pfvf->hw.rx_chan_base;
+ if (ring_cookie == RX_CLS_FLOW_DISC) {
+ req->op = NIX_RX_ACTIONOP_DROP;
+ } else {
+ /* change to unicast only if action of default entry is not
+ * requested by user
+ */
+ if (req->op != NIX_RX_ACTION_DEFAULT)
+ req->op = NIX_RX_ACTIONOP_UCAST;
+ req->index = ethtool_get_flow_spec_ring(ring_cookie);
+ vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
+ if (vf > pci_num_vf(pfvf->pdev)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+ }
+
+ /* ethtool ring_cookie has (VF + 1) for VF */
+ if (vf) {
+ req->vf = vf;
+ flow->is_vf = true;
+ flow->vf = vf;
+ }
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ struct otx2_flow *flow;
+ bool new = false;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
+ return -ENOMEM;
+
+ if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ if (fsp->location >= flow_cfg->ntuple_max_flows)
+ return -EINVAL;
+
+ flow = otx2_find_flow(pfvf, fsp->location);
+ if (!flow) {
+ flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ if (!flow)
+ return -ENOMEM;
+ flow->location = fsp->location;
+ flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset +
+ flow->location];
+ new = true;
+ }
+ /* struct copy */
+ flow->flow_spec = *fsp;
+
+ err = otx2_add_flow_msg(pfvf, flow);
+ if (err) {
+ if (new)
+ kfree(flow);
+ return err;
+ }
+
+ /* add the new flow installed to list */
+ if (new) {
+ otx2_add_flow_to_list(pfvf, flow);
+ flow_cfg->nr_flows++;
+ }
+
+ return 0;
+}
+
+static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
+{
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = entry;
+ if (all)
+ req->all = 1;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct otx2_flow *flow;
+ int err;
+
+ if (location >= flow_cfg->ntuple_max_flows)
+ return -EINVAL;
+
+ flow = otx2_find_flow(pfvf, location);
+ if (!flow)
+ return -ENOENT;
+
+ err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ if (err)
+ return err;
+
+ list_del(&flow->list);
+ kfree(flow);
+ flow_cfg->nr_flows--;
+
+ return 0;
+}
+
+int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_delete_flow_req *req;
+ struct otx2_flow *iter, *tmp;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->start = flow_cfg->entry[flow_cfg->ntuple_offset];
+ req->end = flow_cfg->entry[flow_cfg->ntuple_offset +
+ flow_cfg->ntuple_max_flows - 1];
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ flow_cfg->nr_flows--;
+ }
+ return err;
+}
+
+int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_free_entry_req *req;
+ struct otx2_flow *iter, *tmp;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
+ return 0;
+
+ /* remove all flows */
+ err = otx2_remove_flow_msg(pfvf, 0, true);
+ if (err)
+ return err;
+
+ list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
+ list_del(&iter->list);
+ kfree(iter);
+ flow_cfg->nr_flows--;
+ }
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->all = 1;
+ /* Send message to AF to free MCAM entries */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_install_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
+ req->intf = NIX_INTF_RX;
+ ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->channel = pfvf->hw.rx_chan_base;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
+ req->vtag0_valid = true;
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_delete_flow_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
+ /* Send message to AF */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
+{
+ struct nix_vtag_config *req;
+ struct mbox_msghdr *rsp_hdr;
+ int err;
+
+ /* Dont have enough mcam entries */
+ if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
+ return -ENOMEM;
+
+ if (enable) {
+ err = otx2_install_rxvlan_offload_flow(pf);
+ if (err)
+ return err;
+ } else {
+ err = otx2_delete_rxvlan_offload_flow(pf);
+ if (err)
+ return err;
+ }
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ /* config strip, capture and size */
+ req->vtag_size = VTAGSIZE_T4;
+ req->cfg_type = 1; /* rx vlan cfg */
+ req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
+ req->rx.strip_vtag = enable;
+ req->rx.capture_vtag = enable;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err) {
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+ }
+
+ rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp_hdr)) {
+ mutex_unlock(&pf->mbox.lock);
+ return PTR_ERR(rsp_hdr);
+ }
+
+ mutex_unlock(&pf->mbox.lock);
+ return rsp_hdr->rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 66f1a212f1f4..634d60655a74 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1278,6 +1278,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
static int otx2_init_hw_resources(struct otx2_nic *pf)
{
+ struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_hw *hw = &pf->hw;
struct msg_req *req;
@@ -1359,8 +1360,9 @@ err_free_rq_ptrs:
otx2_aura_pool_free(pf);
err_free_nix_lf:
mutex_lock(&mbox->lock);
- req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
- if (req) {
+ free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (free_req) {
+ free_req->flags = NIX_LF_DISABLE_FLOWS;
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
@@ -1379,6 +1381,7 @@ exit:
static void otx2_free_hw_resources(struct otx2_nic *pf)
{
struct otx2_qset *qset = &pf->qset;
+ struct nix_lf_free_req *free_req;
struct mbox *mbox = &pf->mbox;
struct otx2_cq_queue *cq;
struct msg_req *req;
@@ -1419,8 +1422,11 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
mutex_lock(&mbox->lock);
/* Reset NIX LF */
- req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
- if (req) {
+ free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ if (free_req) {
+ free_req->flags = NIX_LF_DISABLE_FLOWS;
+ if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
+ free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
if (otx2_sync_mbox_msg(mbox))
dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
}
@@ -1562,6 +1568,9 @@ int otx2_open(struct net_device *netdev)
otx2_set_cints_affinity(pf);
+ if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ otx2_enable_rxvlan(pf, true);
+
/* When reinitializing enable time stamping if it is enabled before */
if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
@@ -1716,10 +1725,20 @@ static void otx2_do_set_rx_mode(struct work_struct *work)
struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
struct net_device *netdev = pf->netdev;
struct nix_rx_mode *req;
+ bool promisc = false;
if (!(netdev->flags & IFF_UP))
return;
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
+ promisc = true;
+ }
+
+ /* Write unicast address to mcam entries or del from mcam */
+ if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
+ __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
+
mutex_lock(&pf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
if (!req) {
@@ -1729,8 +1748,7 @@ static void otx2_do_set_rx_mode(struct work_struct *work)
req->mode = NIX_RX_MODE_UCAST;
- /* We don't support MAC address filtering yet */
- if (netdev->flags & IFF_PROMISC)
+ if (promisc)
req->mode |= NIX_RX_MODE_PROMISC;
else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
req->mode |= NIX_RX_MODE_ALLMULTI;
@@ -1743,11 +1761,20 @@ static int otx2_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t changed = features ^ netdev->features;
+ bool ntuple = !!(features & NETIF_F_NTUPLE);
struct otx2_nic *pf = netdev_priv(netdev);
if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
return otx2_cgx_config_loopback(pf,
features & NETIF_F_LOOPBACK);
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
+ return otx2_enable_rxvlan(pf,
+ features & NETIF_F_HW_VLAN_CTAG_RX);
+
+ if ((changed & NETIF_F_NTUPLE) && !ntuple)
+ otx2_destroy_ntuple_flows(pf);
+
return 0;
}
@@ -1903,6 +1930,245 @@ static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
}
}
+static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
+{
+ struct npc_install_flow_req *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ ether_addr_copy(req->packet.dmac, mac);
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->default_rule = 1;
+ req->append = 1;
+ req->vf = vf + 1;
+ req->op = NIX_RX_ACTION_DEFAULT;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ struct otx2_vf_config *config;
+ int ret;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ config = &pf->vf_configs[vf];
+ ether_addr_copy(config->mac, mac);
+
+ ret = otx2_do_set_vf_mac(pf, vf, mac);
+ if (ret == 0)
+ dev_info(&pdev->dev, "Reload VF driver to apply the changes\n");
+
+ return ret;
+}
+
+static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
+ __be16 proto)
+{
+ struct otx2_flow_config *flow_cfg = pf->flow_cfg;
+ struct nix_vtag_config_rsp *vtag_rsp;
+ struct npc_delete_flow_req *del_req;
+ struct nix_vtag_config *vtag_req;
+ struct npc_install_flow_req *req;
+ struct otx2_vf_config *config;
+ int err = 0;
+ u32 idx;
+
+ config = &pf->vf_configs[vf];
+
+ if (!vlan && !config->vlan)
+ goto out;
+
+ mutex_lock(&pf->mbox.lock);
+
+ /* free old tx vtag entry */
+ if (config->vlan) {
+ vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!vtag_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ vtag_req->cfg_type = 0;
+ vtag_req->tx.free_vtag0 = 1;
+ vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+ }
+
+ if (!vlan && config->vlan) {
+ /* rx */
+ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!del_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
+ del_req->entry =
+ flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ /* tx */
+ del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
+ if (!del_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
+ del_req->entry =
+ flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ goto out;
+ }
+
+ /* rx */
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
+ req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ req->packet.vlan_tci = htons(vlan);
+ req->mask.vlan_tci = htons(VLAN_VID_MASK);
+ /* af fills the destination mac addr */
+ eth_broadcast_addr((u8 *)&req->mask.dmac);
+ req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.rx_chan_base;
+ req->intf = NIX_INTF_RX;
+ req->vf = vf + 1;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ req->vtag0_valid = true;
+ req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ /* tx */
+ vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
+ if (!vtag_req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* configure tx vtag params */
+ vtag_req->vtag_size = VTAGSIZE_T4;
+ vtag_req->cfg_type = 0; /* tx vlan cfg */
+ vtag_req->tx.cfg_vtag0 = 1;
+ vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
+ (&pf->mbox.mbox, 0, &vtag_req->hdr);
+ if (IS_ERR(vtag_rsp)) {
+ err = PTR_ERR(vtag_rsp);
+ goto out;
+ }
+ config->tx_vtag_idx = vtag_rsp->vtag0_idx;
+
+ req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ eth_zero_addr((u8 *)&req->mask.dmac);
+ idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
+ req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ req->features = BIT_ULL(NPC_DMAC);
+ req->channel = pf->hw.tx_chan_base;
+ req->intf = NIX_INTF_TX;
+ req->vf = vf + 1;
+ req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
+ req->vtag0_def = vtag_rsp->vtag0_idx;
+ req->vtag0_op = VTAG_INSERT;
+ req->set_cntr = 1;
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ config->vlan = vlan;
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
+ __be16 proto)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ /* qos is currently unsupported */
+ if (vlan >= VLAN_N_VID || qos)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
+ return -EOPNOTSUPP;
+
+ return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
+}
+
+static int otx2_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ struct otx2_vf_config *config;
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ config = &pf->vf_configs[vf];
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, config->mac);
+ ivi->vlan = config->vlan;
+
+ return 0;
+}
+
static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
@@ -1914,6 +2180,9 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_do_ioctl = otx2_ioctl,
+ .ndo_set_vf_mac = otx2_set_vf_mac,
+ .ndo_set_vf_vlan = otx2_set_vf_vlan,
+ .ndo_get_vf_config = otx2_get_vf_config,
};
static int otx2_wq_init(struct otx2_nic *pf)
@@ -2110,6 +2379,25 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
+ err = otx2_mcam_flow_init(pf);
+ if (err)
+ goto err_ptp_destroy;
+
+ if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
+ netdev->hw_features |= NETIF_F_NTUPLE;
+
+ if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Support TSO on tag interface */
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX;
+ netdev->features |= netdev->hw_features;
+
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
@@ -2122,7 +2410,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_ptp_destroy;
+ goto err_del_mcam_entries;
}
err = otx2_wq_init(pf);
@@ -2142,6 +2430,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
+err_del_mcam_entries:
+ otx2_mcam_flow_del(pf);
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
@@ -2285,6 +2575,8 @@ static void otx2_remove(struct pci_dev *pdev)
pf = netdev_priv(netdev);
+ pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
+
if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
otx2_config_hw_tx_tstamp(pf, false);
if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
@@ -2300,6 +2592,7 @@ static void otx2_remove(struct pci_dev *pdev)
destroy_workqueue(pf->otx2_wq);
otx2_ptp_destroy(pf);
+ otx2_mcam_flow_del(pf);
otx2_detach_resources(&pf->mbox);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index d5d7a2f37493..d0e25414f1a1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -556,6 +556,19 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
ext->tstmp = 1;
}
+#define OTX2_VLAN_PTR_OFFSET (ETH_HLEN - ETH_TLEN)
+ if (skb_vlan_tag_present(skb)) {
+ if (skb->vlan_proto == htons(ETH_P_8021Q)) {
+ ext->vlan1_ins_ena = 1;
+ ext->vlan1_ins_ptr = OTX2_VLAN_PTR_OFFSET;
+ ext->vlan1_ins_tci = skb_vlan_tag_get(skb);
+ } else if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ ext->vlan0_ins_ena = 1;
+ ext->vlan0_ins_ptr = OTX2_VLAN_PTR_OFFSET;
+ ext->vlan0_ins_tci = skb_vlan_tag_get(skb);
+ }
+ }
+
*offset += sizeof(*ext);
}
@@ -871,6 +884,9 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
}
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
+ /* Insert vlan tag before giving pkt to tso */
+ if (skb_vlan_tag_present(skb))
+ skb = __vlan_hwaccel_push_inside(skb);
otx2_sq_append_tso(pfvf, sq, skb, qidx);
return true;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 67fabf265fe6..d3e4cfd244e2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -558,6 +558,11 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_L4;
netdev->features = netdev->hw_features;
+ /* Support TSO on tag interface */
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ netdev->features |= netdev->hw_features;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 25981a7a43b5..ebe1406c6e64 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4900,7 +4900,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
};
if (chipid >= CHIP_ID_YUKON_XL && chipid <= CHIP_ID_YUKON_OP_2)
- strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
+ snprintf(buf, sz, "%s", name[chipid - CHIP_ID_YUKON_XL]);
else
snprintf(buf, sz, "(chip %#x)", chipid);
return buf;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 106513f772c3..157f7eef92f1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2027,7 +2027,6 @@ static void mlx4_en_clear_stats(struct net_device *dev)
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
- memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 502d1b97855c..55fc33de4ce7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -684,7 +684,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
xdp_prog = rcu_dereference(ring->xdp_prog);
xdp.rxq = &ring->xdp_rxq;
xdp.frame_sz = priv->frag_info[0].frag_stride;
- doorbell_pending = 0;
+ doorbell_pending = false;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
@@ -914,7 +914,6 @@ next:
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
ring->cons = cq->mcq.cons_index;
}
- AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
mlx4_en_refill_rx_buffers(priv, ring);
@@ -966,8 +965,6 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
/* in case we got here because of !clean_complete */
done = budget;
- INC_PERF_COUNTER(priv->pstats.napi_quota);
-
cpu_curr = smp_processor_id();
idata = irq_desc_get_irq_data(cq->irq_desc);
aff = irq_data_get_affinity_mask(idata);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 3ddb7268e415..b15ec32758a3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -864,9 +864,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!priv->port_up))
goto tx_drop;
- /* fetch ring->cons far ahead before needing it to avoid stall */
- ring_cons = READ_ONCE(ring->cons);
-
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
&inline_ok, &fragptr);
if (unlikely(!real_size))
@@ -898,10 +895,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
- /* Track current inflight packets for performance analysis */
- AVG_PERF_COUNTER(priv->pstats.inflight_avg,
- (u32)(ring->prod - ring_cons - 1));
-
/* Packet is good - grab an index and transmit it */
index = ring->prod & ring->size_mask;
bf_index = ring->prod;
@@ -1012,7 +1005,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->packets++;
}
ring->bytes += tx_info->nr_bytes;
- AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
if (tx_info->inl)
build_inline_wqe(tx_desc, skb, shinfo, fragptr);
@@ -1141,10 +1133,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
index = ring->prod & ring->size_mask;
tx_info = &ring->tx_info[index];
- /* Track current inflight packets for performance analysis */
- AVG_PERF_COUNTER(priv->pstats.inflight_avg,
- (u32)(ring->prod - READ_ONCE(ring->cons) - 1));
-
tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
data = &tx_desc->data;
@@ -1169,7 +1157,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
rx_ring->xdp_tx++;
- AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, length);
ring->prod += MLX4_EN_XDP_TX_NRTXBB;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
index 582997577a04..954b86faac29 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h
@@ -135,7 +135,7 @@ int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport,
* @dev: mlx4_dev.
* @port: Physical port number.
* @vport: Vport id.
- * @out_param: Array of mlx4_vport_qos_param which holds the requested values.
+ * @in_param: Array of mlx4_vport_qos_param which holds the requested values.
*
* Returns 0 on success or a negative mlx4_core errno code.
**/
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index a46efe37cfa9..1c50d0f22199 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -36,6 +36,7 @@
#include <linux/bitops.h>
#include <linux/compiler.h>
+#include <linux/ethtool.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
@@ -170,27 +171,6 @@
#define MLX4_EN_LOOPBACK_RETRIES 5
#define MLX4_EN_LOOPBACK_TIMEOUT 100
-#ifdef MLX4_EN_PERF_STAT
-/* Number of samples to 'average' */
-#define AVG_SIZE 128
-#define AVG_FACTOR 1024
-
-#define INC_PERF_COUNTER(cnt) (++(cnt))
-#define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
-#define AVG_PERF_COUNTER(cnt, sample) \
- ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
-#define GET_PERF_COUNTER(cnt) (cnt)
-#define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
-
-#else
-
-#define INC_PERF_COUNTER(cnt) do {} while (0)
-#define ADD_PERF_COUNTER(cnt, add) do {} while (0)
-#define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
-#define GET_PERF_COUNTER(cnt) (0)
-#define GET_AVG_PERF_COUNTER(cnt) (0)
-#endif /* MLX4_EN_PERF_STAT */
-
/* Constants for TX flow */
enum {
MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
@@ -599,7 +579,6 @@ struct mlx4_en_priv {
struct work_struct linkstate_task;
struct delayed_work stats_task;
struct delayed_work service_task;
- struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_counter_stats pf_stats;
struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES];
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 51d4eaab6a2f..7b51ae8cf759 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -2,12 +2,6 @@
#ifndef _MLX4_STATS_
#define _MLX4_STATS_
-#ifdef MLX4_EN_PERF_STAT
-#define NUM_PERF_STATS NUM_PERF_COUNTERS
-#else
-#define NUM_PERF_STATS 0
-#endif
-
#define NUM_PRIORITIES 9
#define NUM_PRIORITY_STATS 2
@@ -46,16 +40,6 @@ struct mlx4_en_port_stats {
#define NUM_PORT_STATS 10
};
-struct mlx4_en_perf_stats {
- u32 tx_poll;
- u64 tx_pktsz_avg;
- u32 inflight_avg;
- u16 tx_coal_avg;
- u16 rx_coal_avg;
- u32 napi_quota;
-#define NUM_PERF_COUNTERS 6
-};
-
struct mlx4_en_xdp_stats {
unsigned long rx_xdp_drop;
unsigned long rx_xdp_tx;
@@ -135,7 +119,7 @@ enum {
};
#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
- NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS + \
+ NUM_FLOW_STATS + NUM_PF_STATS + \
NUM_XDP_STATS + NUM_PHY_STATS)
#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1187ef1375e2..394f43add85c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -300,7 +300,7 @@ static const char *resource_str(enum mlx4_resource rt)
case RES_FS_RULE: return "RES_FS_RULE";
case RES_XRCD: return "RES_XRCD";
default: return "Unknown resource type !!!";
- };
+ }
}
static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 2d477f9a8cb7..83a67ca43a41 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -81,7 +81,7 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/t
mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
steering/dr_matcher.o steering/dr_rule.o \
- steering/dr_icm_pool.o \
+ steering/dr_icm_pool.o steering/dr_buddy.o \
steering/dr_ste.o steering/dr_send.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e49387dbef98..50c7b9ee80c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -2142,7 +2142,6 @@ dma_pool_err:
kvfree(cmd->stats);
return err;
}
-EXPORT_SYMBOL(mlx5_cmd_init);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
{
@@ -2155,11 +2154,9 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
dma_pool_destroy(cmd->pool);
kvfree(cmd->stats);
}
-EXPORT_SYMBOL(mlx5_cmd_cleanup);
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
enum mlx5_cmdif_state cmdif_state)
{
dev->cmd.state = cmdif_state;
}
-EXPORT_SYMBOL(mlx5_cmd_set_state);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index a28f95df2901..e2ed341648e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -13,17 +13,8 @@ static int mlx5_devlink_flash_update(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- const struct firmware *fw;
- int err;
-
- err = request_firmware_direct(&fw, params->file_name, &dev->pdev->dev);
- if (err)
- return err;
-
- err = mlx5_firmware_flash(dev, fw, extack);
- release_firmware(fw);
- return err;
+ return mlx5_firmware_flash(dev, params->fw, extack);
}
static u8 mlx5_fw_ver_major(u32 version)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index a700f3c86899..87d65f6b5310 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -247,6 +247,9 @@ const char *parse_fs_dst(struct trace_seq *p,
case MLX5_FLOW_DESTINATION_TYPE_TIR:
trace_seq_printf(p, "tir=%u\n", dst->tir_num);
break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ trace_seq_printf(p, "sampler_id=%u\n", dst->sampler_id);
+ break;
case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
trace_seq_printf(p, "counter_id=%u\n", counter_id);
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index 3dc9dd3f24dc..464eb3a18450 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -8,37 +8,66 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev)
return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1;
}
-static int mlx5_peer_pf_init(struct mlx5_core_dev *dev)
+static bool mlx5_ecpf_esw_admins_host_pf(const struct mlx5_core_dev *dev)
{
- u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
- int err;
+ /* In separate host mode, PF enables itself.
+ * When ECPF is eswitch manager, eswitch enables host PF after
+ * eswitch is setup.
+ */
+ return mlx5_core_is_ecpf_esw_manager(dev);
+}
+
+int mlx5_cmd_host_pf_enable_hca(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
- err = mlx5_cmd_exec_in(dev, enable_hca, in);
+ MLX5_SET(enable_hca_in, in, function_id, 0);
+ MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
+ return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+}
+
+int mlx5_cmd_host_pf_disable_hca(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
+
+ MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
+ MLX5_SET(disable_hca_in, in, function_id, 0);
+ MLX5_SET(disable_hca_in, in, embedded_cpu_function, 0);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_host_pf_init(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ if (mlx5_ecpf_esw_admins_host_pf(dev))
+ return 0;
+
+ /* ECPF shall enable HCA for host PF in the same way a PF
+ * does this for its VFs when ECPF is not a eswitch manager.
+ */
+ err = mlx5_cmd_host_pf_enable_hca(dev);
if (err)
- mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n",
- err);
+ mlx5_core_err(dev, "Failed to enable external host PF HCA err(%d)\n", err);
return err;
}
-static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev)
+static void mlx5_host_pf_cleanup(struct mlx5_core_dev *dev)
{
- u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
int err;
- MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
- err = mlx5_cmd_exec_in(dev, disable_hca, in);
+ if (mlx5_ecpf_esw_admins_host_pf(dev))
+ return;
+
+ err = mlx5_cmd_host_pf_disable_hca(dev);
if (err) {
- mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n",
- err);
+ mlx5_core_err(dev, "Failed to disable external host PF HCA err(%d)\n", err);
return;
}
-
- err = mlx5_wait_for_pages(dev, &dev->priv.peer_pf_pages);
- if (err)
- mlx5_core_warn(dev, "Timeout reclaiming peer PF pages err(%d)\n",
- err);
}
int mlx5_ec_init(struct mlx5_core_dev *dev)
@@ -46,16 +75,19 @@ int mlx5_ec_init(struct mlx5_core_dev *dev)
if (!mlx5_core_is_ecpf(dev))
return 0;
- /* ECPF shall enable HCA for peer PF in the same way a PF
- * does this for its VFs.
- */
- return mlx5_peer_pf_init(dev);
+ return mlx5_host_pf_init(dev);
}
void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
{
+ int err;
+
if (!mlx5_core_is_ecpf(dev))
return;
- mlx5_peer_pf_cleanup(dev);
+ mlx5_host_pf_cleanup(dev);
+
+ err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages);
+ if (err)
+ mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h
index d3d7a00a02ac..40b6ad76dca6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h
@@ -17,6 +17,9 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev);
int mlx5_ec_init(struct mlx5_core_dev *dev);
void mlx5_ec_cleanup(struct mlx5_core_dev *dev);
+int mlx5_cmd_host_pf_enable_hca(struct mlx5_core_dev *dev);
+int mlx5_cmd_host_pf_disable_hca(struct mlx5_core_dev *dev);
+
#else /* CONFIG_MLX5_ESWITCH */
static inline bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 38e4f19d69f8..43271a3856ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include "en/params.h"
+#include "en/txrx.h"
+#include "en_accel/tls_rxtx.h"
static inline bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
@@ -152,3 +154,35 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
}
+
+u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
+{
+ bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
+ u16 stop_room;
+
+ stop_room = mlx5e_tls_get_stop_room(mdev, params);
+ stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ if (is_mpwqe)
+ /* A MPWQE can take up to the maximum-sized WQE + all the normal
+ * stop room can be taken if a new packet breaks the active
+ * MPWQE session and allocates its WQEs right away.
+ */
+ stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+
+ return stop_room;
+}
+
+int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params)
+{
+ size_t sq_size = 1 << params->log_sq_size;
+ u16 stop_room;
+
+ stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
+ if (stop_room >= sq_size) {
+ netdev_err(priv->netdev, "Stop room %hu is bigger than the SQ size %zu\n",
+ stop_room, sq_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index a87273e801b2..187007ad3349 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -30,6 +30,7 @@ struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
bool is_mpw;
+ u16 stop_room;
};
struct mlx5e_channel_param {
@@ -124,4 +125,7 @@ void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_sq_param *param);
+u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params);
+
#endif /* __MLX5_EN_PARAMS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index ae90d533a350..2e3e78b0f333 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -366,7 +366,8 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_wqe_info *wi,
u32 *xsk_frames,
- bool recycle)
+ bool recycle,
+ struct xdp_frame_bulk *bq)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
u16 i;
@@ -379,7 +380,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
/* XDP_TX from the XSK RQ and XDP_REDIRECT */
dma_unmap_single(sq->pdev, xdpi.frame.dma_addr,
xdpi.frame.xdpf->len, DMA_TO_DEVICE);
- xdp_return_frame(xdpi.frame.xdpf);
+ xdp_return_frame_bulk(xdpi.frame.xdpf, bq);
break;
case MLX5E_XDP_XMIT_MODE_PAGE:
/* XDP_TX from the regular RQ */
@@ -397,12 +398,15 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
{
+ struct xdp_frame_bulk bq;
struct mlx5e_xdpsq *sq;
struct mlx5_cqe64 *cqe;
u32 xsk_frames = 0;
u16 sqcc;
int i;
+ xdp_frame_bulk_init(&bq);
+
sq = container_of(cq, struct mlx5e_xdpsq, cq);
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
@@ -434,7 +438,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
sqcc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true, &bq);
} while (!last_wqe);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
@@ -447,6 +451,8 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
}
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+ xdp_flush_frame_bulk(&bq);
+
if (xsk_frames)
xsk_tx_completed(sq->xsk_pool, xsk_frames);
@@ -463,8 +469,13 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
{
+ struct xdp_frame_bulk bq;
u32 xsk_frames = 0;
+ xdp_frame_bulk_init(&bq);
+
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
+
while (sq->cc != sq->pc) {
struct mlx5e_xdp_wqe_info *wi;
u16 ci;
@@ -474,9 +485,12 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
sq->cc += wi->num_wqebbs;
- mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false);
+ mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false, &bq);
}
+ xdp_flush_frame_bulk(&bq);
+ rcu_read_unlock();
+
if (xsk_frames)
xsk_tx_completed(sq->xsk_pool, xsk_frames);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index b140e13fdcc8..d16def68ecff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -13,20 +13,20 @@ struct mlx5e_dump_wqe {
(DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
static u8
-mlx5e_ktls_dumps_num_wqes(struct mlx5e_txqsq *sq, unsigned int nfrags,
+mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags,
unsigned int sync_len)
{
/* Given the MTU and sync_len, calculates an upper bound for the
* number of DUMP WQEs needed for the TX resync of a record.
*/
- return nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu);
+ return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu));
}
-u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq)
+u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params)
{
u16 num_dumps, stop_room = 0;
- num_dumps = mlx5e_ktls_dumps_num_wqes(sq, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
+ num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index 7521c9be735b..ee04e916fa21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -14,7 +14,7 @@ struct mlx5e_accel_tx_tls_state {
u32 tls_tisn;
};
-u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq);
+u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params);
bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
struct sk_buff *skb, int datalen,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 6982b193ee8a..f51c04284e4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -385,15 +385,13 @@ void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
*cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
}
-u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq)
+u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
- struct mlx5_core_dev *mdev = sq->channel->mdev;
-
if (!mlx5_accel_is_tls_device(mdev))
return 0;
if (mlx5_accel_is_ktls_device(mdev))
- return mlx5e_ktls_get_stop_room(sq);
+ return mlx5e_ktls_get_stop_room(params);
/* FPGA */
/* Resync SKB. */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
index 5f162ad2ee8f..9923132c9440 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
@@ -43,7 +43,7 @@
#include "en.h"
#include "en/txrx.h"
-u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq);
+u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
@@ -71,7 +71,7 @@ mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false;
static inline void
mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {}
-static inline u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq)
+static inline u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d25a56ec6876..42e61dc28ead 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -32,6 +32,7 @@
#include "en.h"
#include "en/port.h"
+#include "en/params.h"
#include "en/xsk/pool.h"
#include "lib/clock.h"
@@ -369,6 +370,10 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
new_channels.params.log_rq_mtu_frames = log_rq_size;
new_channels.params.log_sq_size = log_sq_size;
+ err = mlx5e_validate_params(priv, &new_channels.params);
+ if (err)
+ goto unlock;
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
goto unlock;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ebce97921e03..527c5f12c5af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1121,28 +1121,6 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
return 0;
}
-static int mlx5e_calc_sq_stop_room(struct mlx5e_txqsq *sq, u8 log_sq_size)
-{
- int sq_size = 1 << log_sq_size;
-
- sq->stop_room = mlx5e_tls_get_stop_room(sq);
- sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
- if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state))
- /* A MPWQE can take up to the maximum-sized WQE + all the normal
- * stop room can be taken if a new packet breaks the active
- * MPWQE session and allocates its WQEs right away.
- */
- sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
-
- if (WARN_ON(sq->stop_room >= sq_size)) {
- netdev_err(sq->channel->netdev, "Stop room %hu is bigger than the SQ size %d\n",
- sq->stop_room, sq_size);
- return -ENOSPC;
- }
-
- return 0;
-}
-
static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int txq_ix,
@@ -1176,9 +1154,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
if (param->is_mpw)
set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
- err = mlx5e_calc_sq_stop_room(sq, params->log_sq_size);
- if (err)
- return err;
+ sq->stop_room = param->stop_room;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -2225,6 +2201,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
+ param->stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
mlx5e_build_tx_cq_param(priv, params, &param->cqp);
}
@@ -3999,6 +3976,9 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
new_channels.params = *params;
new_channels.params.sw_mtu = new_mtu;
+ err = mlx5e_validate_params(priv, &new_channels.params);
+ if (err)
+ goto out;
if (params->xdp_prog &&
!mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 8ebfe782f95e..4ea5d6ddf56a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -189,19 +189,21 @@ u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
return count_eqe;
}
-static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, unsigned long *flags)
+static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery,
+ unsigned long *flags)
__acquires(&eq->lock)
{
- if (in_irq())
+ if (!recovery)
spin_lock(&eq->lock);
else
spin_lock_irqsave(&eq->lock, *flags);
}
-static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, unsigned long *flags)
+static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery,
+ unsigned long *flags)
__releases(&eq->lock)
{
- if (in_irq())
+ if (!recovery)
spin_unlock(&eq->lock);
else
spin_unlock_irqrestore(&eq->lock, *flags);
@@ -223,11 +225,13 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
struct mlx5_eqe *eqe;
unsigned long flags;
int num_eqes = 0;
+ bool recovery;
dev = eq->dev;
eqt = dev->priv.eq_table;
- mlx5_eq_async_int_lock(eq_async, &flags);
+ recovery = action == ASYNC_EQ_RECOVER;
+ mlx5_eq_async_int_lock(eq_async, recovery, &flags);
eqe = next_eqe_sw(eq);
if (!eqe)
@@ -249,9 +253,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
out:
eq_update_ci(eq, 1);
- mlx5_eq_async_int_unlock(eq_async, &flags);
+ mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
- return unlikely(action == ASYNC_EQ_RECOVER) ? num_eqes : 0;
+ return unlikely(recovery) ? num_eqes : 0;
}
void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
index 22f4c1c28006..4a369669e51e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
@@ -8,6 +8,7 @@
struct mlx5_flow_table *
esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
{
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *acl;
@@ -33,7 +34,9 @@ esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
return ERR_PTR(-EOPNOTSUPP);
}
- acl = mlx5_create_vport_flow_table(root_ns, 0, size, 0, vport_num);
+ ft_attr.max_fte = size;
+ ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
+ acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport_num);
if (IS_ERR(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "vport[%d] create %s ACL table, err(%d)\n", vport_num,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index d4ee0a9c03db..7f8c4a957f72 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1474,6 +1474,26 @@ vf_err:
return err;
}
+static int host_pf_enable_hca(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_core_is_ecpf(dev))
+ return 0;
+
+ /* Once vport and representor are ready, take out the external host PF
+ * out of initializing state. Enabling HCA clears the iser->initializing
+ * bit and host PF driver loading can progress.
+ */
+ return mlx5_cmd_host_pf_enable_hca(dev);
+}
+
+static void host_pf_disable_hca(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_core_is_ecpf(dev))
+ return;
+
+ mlx5_cmd_host_pf_disable_hca(dev);
+}
+
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
* whichever are present on the eswitch.
*/
@@ -1488,6 +1508,11 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
if (ret)
return ret;
+ /* Enable external host PF HCA */
+ ret = host_pf_enable_hca(esw->dev);
+ if (ret)
+ goto pf_hca_err;
+
/* Enable ECPF vport */
if (mlx5_ecpf_vport_exists(esw->dev)) {
ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
@@ -1505,8 +1530,9 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
vf_err:
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
-
ecpf_err:
+ host_pf_disable_hca(esw->dev);
+pf_hca_err:
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
return ret;
}
@@ -1521,6 +1547,7 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
+ host_pf_disable_hca(esw->dev);
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
index 656f96be6e20..89ef592656c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h
@@ -47,11 +47,12 @@
/**
* enum mlx5_fpga_access_type - Enumerated the different methods possible for
* accessing the device memory address space
+ *
+ * @MLX5_FPGA_ACCESS_TYPE_I2C: Use the slow CX-FPGA I2C bus
+ * @MLX5_FPGA_ACCESS_TYPE_DONTCARE: Use the fastest available method
*/
enum mlx5_fpga_access_type {
- /** Use the slow CX-FPGA I2C bus */
MLX5_FPGA_ACCESS_TYPE_I2C = 0x0,
- /** Use the fastest available method */
MLX5_FPGA_ACCESS_TYPE_DONTCARE = 0x0,
};
@@ -113,6 +114,7 @@ struct mlx5_fpga_conn_attr {
* subsequent receives.
*/
void (*recv_cb)(void *cb_arg, struct mlx5_fpga_dma_buf *buf);
+ /** @cb_arg: A context to be passed to recv_cb callback */
void *cb_arg;
};
@@ -145,7 +147,7 @@ void mlx5_fpga_sbu_conn_destroy(struct mlx5_fpga_conn *conn);
/**
* mlx5_fpga_sbu_conn_sendmsg() - Queue the transmission of a packet
- * @fdev: An FPGA SBU connection
+ * @conn: An FPGA SBU connection
* @buf: The packet buffer
*
* Queues a packet for transmission over an FPGA SBU connection.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index babe3405132a..8e06731d3cb3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -172,10 +172,9 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
- if (ft->vport) {
- MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
- MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
- }
+ MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
+ MLX5_SET(set_flow_table_root_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
return mlx5_cmd_exec_in(dev, set_flow_table_root, in);
}
@@ -199,10 +198,9 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(create_flow_table_in, in, table_type, ft->type);
MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
- if (ft->vport) {
- MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
- MLX5_SET(create_flow_table_in, in, other_vport, 1);
- }
+ MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
+ MLX5_SET(create_flow_table_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
en_decap);
@@ -252,10 +250,9 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
- if (ft->vport) {
- MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
- MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
- }
+ MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
+ MLX5_SET(destroy_flow_table_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
return mlx5_cmd_exec_in(dev, destroy_flow_table, in);
}
@@ -283,11 +280,9 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
flow_table_context.lag_master_next_table_id, 0);
}
} else {
- if (ft->vport) {
- MLX5_SET(modify_flow_table_in, in, vport_number,
- ft->vport);
- MLX5_SET(modify_flow_table_in, in, other_vport, 1);
- }
+ MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
+ MLX5_SET(modify_flow_table_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
MLX5_SET(modify_flow_table_in, in, modify_field_select,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
@@ -325,6 +320,9 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(create_flow_group_in, in, other_vport, 1);
}
+ MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
+ MLX5_SET(create_flow_group_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
if (!err)
fg->id = MLX5_GET(create_flow_group_out, out,
@@ -344,11 +342,9 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
- if (ft->vport) {
- MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
- MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
- }
-
+ MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
+ MLX5_SET(destroy_flow_group_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
}
@@ -427,10 +423,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, ignore_flow_level,
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
- if (ft->vport) {
- MLX5_SET(set_fte_in, in, vport_number, ft->vport);
- MLX5_SET(set_fte_in, in, other_vport, 1);
- }
+ MLX5_SET(set_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(set_fte_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
@@ -515,6 +510,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
dst->dest_attr.vport.pkt_reformat->id);
}
break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ id = dst->dest_attr.sampler_id;
+ break;
default:
id = dst->dest_attr.tir_num;
}
@@ -601,10 +599,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
MLX5_SET(delete_fte_in, in, table_type, ft->type);
MLX5_SET(delete_fte_in, in, table_id, ft->id);
MLX5_SET(delete_fte_in, in, flow_index, fte->index);
- if (ft->vport) {
- MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
- MLX5_SET(delete_fte_in, in, other_vport, 1);
- }
+ MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(delete_fte_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
return mlx5_cmd_exec_in(dev, delete_fte, in);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 9fdd99272e31..b899539a0786 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1152,18 +1152,13 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
{
return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
}
+EXPORT_SYMBOL(mlx5_create_flow_table);
-struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
- int prio, int max_fte,
- u32 level, u16 vport)
+struct mlx5_flow_table *
+mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_table_attr *ft_attr, u16 vport)
{
- struct mlx5_flow_table_attr ft_attr = {};
-
- ft_attr.max_fte = max_fte;
- ft_attr.level = level;
- ft_attr.prio = prio;
-
- return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
+ return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
}
struct mlx5_flow_table*
@@ -1243,6 +1238,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
return fg;
}
+EXPORT_SYMBOL(mlx5_create_flow_group);
static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
{
@@ -2146,6 +2142,7 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
fg->id);
}
+EXPORT_SYMBOL(mlx5_destroy_flow_group);
struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
int n)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index afe7f0bffb93..b24a9849c45e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -194,7 +194,7 @@ struct mlx5_ft_underlay_qp {
u32 qpn;
};
-#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_a00
+#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_c00
/* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last.
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 8ff207aa1479..d86f06f14cd3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1126,23 +1126,23 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_sriov;
}
- err = mlx5_sriov_attach(dev);
- if (err) {
- mlx5_core_err(dev, "sriov init failed %d\n", err);
- goto err_sriov;
- }
-
err = mlx5_ec_init(dev);
if (err) {
mlx5_core_err(dev, "Failed to init embedded CPU\n");
goto err_ec;
}
+ err = mlx5_sriov_attach(dev);
+ if (err) {
+ mlx5_core_err(dev, "sriov init failed %d\n", err);
+ goto err_sriov;
+ }
+
return 0;
-err_ec:
- mlx5_sriov_detach(dev);
err_sriov:
+ mlx5_ec_cleanup(dev);
+err_ec:
mlx5_cleanup_fs(dev);
err_fs:
mlx5_accel_tls_cleanup(dev);
@@ -1168,8 +1168,8 @@ err_irq_table:
static void mlx5_unload(struct mlx5_core_dev *dev)
{
- mlx5_ec_cleanup(dev);
mlx5_sriov_detach(dev);
+ mlx5_ec_cleanup(dev);
mlx5_cleanup_fs(dev);
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
@@ -1594,6 +1594,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
+ { PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 8cec85ab419d..9d00efa9e6bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -122,6 +122,10 @@ enum mlx5_semaphore_space_address {
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
+int mlx5_cmd_init(struct mlx5_core_dev *dev);
+void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
+void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
+ enum mlx5_cmdif_state cmdif_state);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 4d7f8a357df7..eb956ce904bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -374,7 +374,7 @@ retry:
if (func_id)
dev->priv.vfs_pages += npages;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
- dev->priv.peer_pf_pages += npages;
+ dev->priv.host_pf_pages += npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
npages, ec_function, func_id, err);
@@ -416,7 +416,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
if (func_id)
dev->priv.vfs_pages -= npages;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
- dev->priv.peer_pf_pages -= npages;
+ dev->priv.host_pf_pages -= npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
npages, ec_function, func_id);
@@ -523,7 +523,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
if (func_id)
dev->priv.vfs_pages -= num_claimed;
else if (mlx5_core_is_ecpf(dev) && !ec_function)
- dev->priv.peer_pf_pages -= num_claimed;
+ dev->priv.host_pf_pages -= num_claimed;
out_free:
kvfree(out);
@@ -678,9 +678,9 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
WARN(dev->priv.vfs_pages,
"VFs FW pages counter is %d after reclaiming all pages\n",
dev->priv.vfs_pages);
- WARN(dev->priv.peer_pf_pages,
- "Peer PF FW pages counter is %d after reclaiming all pages\n",
- dev->priv.peer_pf_pages);
+ WARN(dev->priv.host_pf_pages,
+ "External host PF FW pages counter is %d after reclaiming all pages\n",
+ dev->priv.host_pf_pages);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
new file mode 100644
index 000000000000..7df11a019df9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 - 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006 - 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include "dr_types.h"
+
+int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
+ unsigned int max_order)
+{
+ int i;
+
+ buddy->max_order = max_order;
+
+ INIT_LIST_HEAD(&buddy->list_node);
+ INIT_LIST_HEAD(&buddy->used_list);
+ INIT_LIST_HEAD(&buddy->hot_list);
+
+ buddy->bitmap = kcalloc(buddy->max_order + 1,
+ sizeof(*buddy->bitmap),
+ GFP_KERNEL);
+ buddy->num_free = kcalloc(buddy->max_order + 1,
+ sizeof(*buddy->num_free),
+ GFP_KERNEL);
+
+ if (!buddy->bitmap || !buddy->num_free)
+ goto err_free_all;
+
+ /* Allocating max_order bitmaps, one for each order */
+
+ for (i = 0; i <= buddy->max_order; ++i) {
+ unsigned int size = 1 << (buddy->max_order - i);
+
+ buddy->bitmap[i] = bitmap_zalloc(size, GFP_KERNEL);
+ if (!buddy->bitmap[i])
+ goto err_out_free_each_bit_per_order;
+ }
+
+ /* In the beginning, we have only one order that is available for
+ * use (the biggest one), so mark the first bit in both bitmaps.
+ */
+
+ bitmap_set(buddy->bitmap[buddy->max_order], 0, 1);
+
+ buddy->num_free[buddy->max_order] = 1;
+
+ return 0;
+
+err_out_free_each_bit_per_order:
+ for (i = 0; i <= buddy->max_order; ++i)
+ bitmap_free(buddy->bitmap[i]);
+
+err_free_all:
+ kfree(buddy->num_free);
+ kfree(buddy->bitmap);
+ return -ENOMEM;
+}
+
+void mlx5dr_buddy_cleanup(struct mlx5dr_icm_buddy_mem *buddy)
+{
+ int i;
+
+ list_del(&buddy->list_node);
+
+ for (i = 0; i <= buddy->max_order; ++i)
+ bitmap_free(buddy->bitmap[i]);
+
+ kfree(buddy->num_free);
+ kfree(buddy->bitmap);
+}
+
+static int dr_buddy_find_free_seg(struct mlx5dr_icm_buddy_mem *buddy,
+ unsigned int start_order,
+ unsigned int *segment,
+ unsigned int *order)
+{
+ unsigned int seg, order_iter, m;
+
+ for (order_iter = start_order;
+ order_iter <= buddy->max_order; ++order_iter) {
+ if (!buddy->num_free[order_iter])
+ continue;
+
+ m = 1 << (buddy->max_order - order_iter);
+ seg = find_first_bit(buddy->bitmap[order_iter], m);
+
+ if (WARN(seg >= m,
+ "ICM Buddy: failed finding free mem for order %d\n",
+ order_iter))
+ return -ENOMEM;
+
+ break;
+ }
+
+ if (order_iter > buddy->max_order)
+ return -ENOMEM;
+
+ *segment = seg;
+ *order = order_iter;
+ return 0;
+}
+
+/**
+ * mlx5dr_buddy_alloc_mem() - Update second level bitmap.
+ * @buddy: Buddy to update.
+ * @order: Order of the buddy to update.
+ * @segment: Segment number.
+ *
+ * This function finds the first area of the ICM memory managed by this buddy.
+ * It uses the data structures of the buddy system in order to find the first
+ * area of free place, starting from the current order till the maximum order
+ * in the system.
+ *
+ * Return: 0 when segment is set, non-zero error status otherwise.
+ *
+ * The function returns the location (segment) in the whole buddy ICM memory
+ * area - the index of the memory segment that is available for use.
+ */
+int mlx5dr_buddy_alloc_mem(struct mlx5dr_icm_buddy_mem *buddy,
+ unsigned int order,
+ unsigned int *segment)
+{
+ unsigned int seg, order_iter;
+ int err;
+
+ err = dr_buddy_find_free_seg(buddy, order, &seg, &order_iter);
+ if (err)
+ return err;
+
+ bitmap_clear(buddy->bitmap[order_iter], seg, 1);
+ --buddy->num_free[order_iter];
+
+ /* If we found free memory in some order that is bigger than the
+ * required order, we need to split every order between the required
+ * order and the order that we found into two parts, and mark accordingly.
+ */
+ while (order_iter > order) {
+ --order_iter;
+ seg <<= 1;
+ bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1);
+ ++buddy->num_free[order_iter];
+ }
+
+ seg <<= order;
+ *segment = seg;
+
+ return 0;
+}
+
+void mlx5dr_buddy_free_mem(struct mlx5dr_icm_buddy_mem *buddy,
+ unsigned int seg, unsigned int order)
+{
+ seg >>= order;
+
+ /* Whenever a segment is free,
+ * the mem is added to the buddy that gave it.
+ */
+ while (test_bit(seg ^ 1, buddy->bitmap[order])) {
+ bitmap_clear(buddy->bitmap[order], seg ^ 1, 1);
+ --buddy->num_free[order];
+ seg >>= 1;
+ ++order;
+ }
+ bitmap_set(buddy->bitmap[order], seg, 1);
+
+ ++buddy->num_free[order];
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 51bbd88ff021..ba65ec406cfa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -94,12 +94,12 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
- if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) {
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
}
- if (mlx5dr_matcher_supp_flex_parser_icmp_v6(caps)) {
+ if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED) {
caps->flex_parser_id_icmpv6_dw0 =
MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
caps->flex_parser_id_icmpv6_dw1 =
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index cc33515b9aba..66c24767e3b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -4,50 +4,16 @@
#include "dr_types.h"
#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
-#define DR_ICM_SYNC_THRESHOLD (64 * 1024 * 1024)
-
-struct mlx5dr_icm_pool;
-
-struct mlx5dr_icm_bucket {
- struct mlx5dr_icm_pool *pool;
-
- /* Chunks that aren't visible to HW not directly and not in cache */
- struct list_head free_list;
- unsigned int free_list_count;
-
- /* Used chunks, HW may be accessing this memory */
- struct list_head used_list;
- unsigned int used_list_count;
-
- /* HW may be accessing this memory but at some future,
- * undetermined time, it might cease to do so. Before deciding to call
- * sync_ste, this list is moved to sync_list
- */
- struct list_head hot_list;
- unsigned int hot_list_count;
-
- /* Pending sync list, entries from the hot list are moved to this list.
- * sync_ste is executed and then sync_list is concatenated to the free list
- */
- struct list_head sync_list;
- unsigned int sync_list_count;
-
- u32 total_chunks;
- u32 num_of_entries;
- u32 entry_size;
- /* protect the ICM bucket */
- struct mutex mutex;
-};
+#define DR_ICM_SYNC_THRESHOLD_POOL (64 * 1024 * 1024)
struct mlx5dr_icm_pool {
- struct mlx5dr_icm_bucket *buckets;
enum mlx5dr_icm_type icm_type;
enum mlx5dr_icm_chunk_size max_log_chunk_sz;
- enum mlx5dr_icm_chunk_size num_of_buckets;
- struct list_head icm_mr_list;
- /* protect the ICM MR list */
- struct mutex mr_mutex;
struct mlx5dr_domain *dmn;
+ /* memory management */
+ struct mutex mutex; /* protect the ICM pool and ICM buddy */
+ struct list_head buddy_mem_list;
+ u64 hot_memory_size;
};
struct mlx5dr_icm_dm {
@@ -58,13 +24,11 @@ struct mlx5dr_icm_dm {
};
struct mlx5dr_icm_mr {
- struct mlx5dr_icm_pool *pool;
struct mlx5_core_mkey mkey;
struct mlx5dr_icm_dm dm;
- size_t used_length;
+ struct mlx5dr_domain *dmn;
size_t length;
u64 icm_start_addr;
- struct list_head mr_list;
};
static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
@@ -107,8 +71,7 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
if (!icm_mr)
return NULL;
- icm_mr->pool = pool;
- INIT_LIST_HEAD(&icm_mr->mr_list);
+ icm_mr->dmn = pool->dmn;
icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
pool->icm_type);
@@ -150,8 +113,6 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
goto free_mkey;
}
- list_add_tail(&icm_mr->mr_list, &pool->icm_mr_list);
-
return icm_mr;
free_mkey:
@@ -166,10 +127,9 @@ free_icm_mr:
static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
{
- struct mlx5_core_dev *mdev = icm_mr->pool->dmn->mdev;
+ struct mlx5_core_dev *mdev = icm_mr->dmn->mdev;
struct mlx5dr_icm_dm *dm = &icm_mr->dm;
- list_del(&icm_mr->mr_list);
mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
dm->addr, dm->obj_id);
@@ -178,19 +138,17 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk)
{
- struct mlx5dr_icm_bucket *bucket = chunk->bucket;
-
- chunk->ste_arr = kvzalloc(bucket->num_of_entries *
+ chunk->ste_arr = kvzalloc(chunk->num_of_entries *
sizeof(chunk->ste_arr[0]), GFP_KERNEL);
if (!chunk->ste_arr)
return -ENOMEM;
- chunk->hw_ste_arr = kvzalloc(bucket->num_of_entries *
+ chunk->hw_ste_arr = kvzalloc(chunk->num_of_entries *
DR_STE_SIZE_REDUCED, GFP_KERNEL);
if (!chunk->hw_ste_arr)
goto out_free_ste_arr;
- chunk->miss_list = kvmalloc(bucket->num_of_entries *
+ chunk->miss_list = kvmalloc(chunk->num_of_entries *
sizeof(chunk->miss_list[0]), GFP_KERNEL);
if (!chunk->miss_list)
goto out_free_hw_ste_arr;
@@ -204,72 +162,6 @@ out_free_ste_arr:
return -ENOMEM;
}
-static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket)
-{
- size_t mr_free_size, mr_req_size, mr_row_size;
- struct mlx5dr_icm_pool *pool = bucket->pool;
- struct mlx5dr_icm_mr *icm_mr = NULL;
- struct mlx5dr_icm_chunk *chunk;
- int i, err = 0;
-
- mr_req_size = bucket->num_of_entries * bucket->entry_size;
- mr_row_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
- pool->icm_type);
- mutex_lock(&pool->mr_mutex);
- if (!list_empty(&pool->icm_mr_list)) {
- icm_mr = list_last_entry(&pool->icm_mr_list,
- struct mlx5dr_icm_mr, mr_list);
-
- if (icm_mr)
- mr_free_size = icm_mr->dm.length - icm_mr->used_length;
- }
-
- if (!icm_mr || mr_free_size < mr_row_size) {
- icm_mr = dr_icm_pool_mr_create(pool);
- if (!icm_mr) {
- err = -ENOMEM;
- goto out_err;
- }
- }
-
- /* Create memory aligned chunks */
- for (i = 0; i < mr_row_size / mr_req_size; i++) {
- chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL);
- if (!chunk) {
- err = -ENOMEM;
- goto out_err;
- }
-
- chunk->bucket = bucket;
- chunk->rkey = icm_mr->mkey.key;
- /* mr start addr is zero based */
- chunk->mr_addr = icm_mr->used_length;
- chunk->icm_addr = (uintptr_t)icm_mr->icm_start_addr + icm_mr->used_length;
- icm_mr->used_length += mr_req_size;
- chunk->num_of_entries = bucket->num_of_entries;
- chunk->byte_size = chunk->num_of_entries * bucket->entry_size;
-
- if (pool->icm_type == DR_ICM_TYPE_STE) {
- err = dr_icm_chunk_ste_init(chunk);
- if (err)
- goto out_free_chunk;
- }
-
- INIT_LIST_HEAD(&chunk->chunk_list);
- list_add(&chunk->chunk_list, &bucket->free_list);
- bucket->free_list_count++;
- bucket->total_chunks++;
- }
- mutex_unlock(&pool->mr_mutex);
- return 0;
-
-out_free_chunk:
- kvfree(chunk);
-out_err:
- mutex_unlock(&pool->mr_mutex);
- return err;
-}
-
static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
{
kvfree(chunk->miss_list);
@@ -277,166 +169,199 @@ static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
kvfree(chunk->ste_arr);
}
-static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk)
+static enum mlx5dr_icm_type
+get_chunk_icm_type(struct mlx5dr_icm_chunk *chunk)
+{
+ return chunk->buddy_mem->pool->icm_type;
+}
+
+static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
+ struct mlx5dr_icm_buddy_mem *buddy)
{
- struct mlx5dr_icm_bucket *bucket = chunk->bucket;
+ enum mlx5dr_icm_type icm_type = get_chunk_icm_type(chunk);
+ buddy->used_memory -= chunk->byte_size;
list_del(&chunk->chunk_list);
- bucket->total_chunks--;
- if (bucket->pool->icm_type == DR_ICM_TYPE_STE)
+ if (icm_type == DR_ICM_TYPE_STE)
dr_icm_chunk_ste_cleanup(chunk);
kvfree(chunk);
}
-static void dr_icm_bucket_init(struct mlx5dr_icm_pool *pool,
- struct mlx5dr_icm_bucket *bucket,
- enum mlx5dr_icm_chunk_size chunk_size)
+static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
{
- if (pool->icm_type == DR_ICM_TYPE_STE)
- bucket->entry_size = DR_STE_SIZE;
- else
- bucket->entry_size = DR_MODIFY_ACTION_SIZE;
-
- bucket->num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
- bucket->pool = pool;
- mutex_init(&bucket->mutex);
- INIT_LIST_HEAD(&bucket->free_list);
- INIT_LIST_HEAD(&bucket->used_list);
- INIT_LIST_HEAD(&bucket->hot_list);
- INIT_LIST_HEAD(&bucket->sync_list);
+ struct mlx5dr_icm_buddy_mem *buddy;
+ struct mlx5dr_icm_mr *icm_mr;
+
+ icm_mr = dr_icm_pool_mr_create(pool);
+ if (!icm_mr)
+ return -ENOMEM;
+
+ buddy = kvzalloc(sizeof(*buddy), GFP_KERNEL);
+ if (!buddy)
+ goto free_mr;
+
+ if (mlx5dr_buddy_init(buddy, pool->max_log_chunk_sz))
+ goto err_free_buddy;
+
+ buddy->icm_mr = icm_mr;
+ buddy->pool = pool;
+
+ /* add it to the -start- of the list in order to search in it first */
+ list_add(&buddy->list_node, &pool->buddy_mem_list);
+
+ return 0;
+
+err_free_buddy:
+ kvfree(buddy);
+free_mr:
+ dr_icm_pool_mr_destroy(icm_mr);
+ return -ENOMEM;
}
-static void dr_icm_bucket_cleanup(struct mlx5dr_icm_bucket *bucket)
+static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
{
struct mlx5dr_icm_chunk *chunk, *next;
- mutex_destroy(&bucket->mutex);
- list_splice_tail_init(&bucket->sync_list, &bucket->free_list);
- list_splice_tail_init(&bucket->hot_list, &bucket->free_list);
+ list_for_each_entry_safe(chunk, next, &buddy->hot_list, chunk_list)
+ dr_icm_chunk_destroy(chunk, buddy);
+
+ list_for_each_entry_safe(chunk, next, &buddy->used_list, chunk_list)
+ dr_icm_chunk_destroy(chunk, buddy);
- list_for_each_entry_safe(chunk, next, &bucket->free_list, chunk_list)
- dr_icm_chunk_destroy(chunk);
+ dr_icm_pool_mr_destroy(buddy->icm_mr);
- WARN_ON(bucket->total_chunks != 0);
+ mlx5dr_buddy_cleanup(buddy);
- /* Cleanup of unreturned chunks */
- list_for_each_entry_safe(chunk, next, &bucket->used_list, chunk_list)
- dr_icm_chunk_destroy(chunk);
+ kvfree(buddy);
}
-static u64 dr_icm_hot_mem_size(struct mlx5dr_icm_pool *pool)
+static struct mlx5dr_icm_chunk *
+dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
+ enum mlx5dr_icm_chunk_size chunk_size,
+ struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
+ unsigned int seg)
{
- u64 hot_size = 0;
- int chunk_order;
+ struct mlx5dr_icm_chunk *chunk;
+ int offset;
- for (chunk_order = 0; chunk_order < pool->num_of_buckets; chunk_order++)
- hot_size += pool->buckets[chunk_order].hot_list_count *
- mlx5dr_icm_pool_chunk_size_to_byte(chunk_order, pool->icm_type);
+ chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (!chunk)
+ return NULL;
- return hot_size;
-}
+ offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
+
+ chunk->rkey = buddy_mem_pool->icm_mr->mkey.key;
+ chunk->mr_addr = offset;
+ chunk->icm_addr =
+ (uintptr_t)buddy_mem_pool->icm_mr->icm_start_addr + offset;
+ chunk->num_of_entries =
+ mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
+ chunk->byte_size =
+ mlx5dr_icm_pool_chunk_size_to_byte(chunk_size, pool->icm_type);
+ chunk->seg = seg;
+
+ if (pool->icm_type == DR_ICM_TYPE_STE && dr_icm_chunk_ste_init(chunk)) {
+ mlx5dr_err(pool->dmn,
+ "Failed to init ste arrays (order: %d)\n",
+ chunk_size);
+ goto out_free_chunk;
+ }
-static bool dr_icm_reuse_hot_entries(struct mlx5dr_icm_pool *pool,
- struct mlx5dr_icm_bucket *bucket)
-{
- u64 bytes_for_sync;
+ buddy_mem_pool->used_memory += chunk->byte_size;
+ chunk->buddy_mem = buddy_mem_pool;
+ INIT_LIST_HEAD(&chunk->chunk_list);
- bytes_for_sync = dr_icm_hot_mem_size(pool);
- if (bytes_for_sync < DR_ICM_SYNC_THRESHOLD || !bucket->hot_list_count)
- return false;
+ /* chunk now is part of the used_list */
+ list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
- return true;
-}
+ return chunk;
-static void dr_icm_chill_bucket_start(struct mlx5dr_icm_bucket *bucket)
-{
- list_splice_tail_init(&bucket->hot_list, &bucket->sync_list);
- bucket->sync_list_count += bucket->hot_list_count;
- bucket->hot_list_count = 0;
+out_free_chunk:
+ kvfree(chunk);
+ return NULL;
}
-static void dr_icm_chill_bucket_end(struct mlx5dr_icm_bucket *bucket)
+static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
{
- list_splice_tail_init(&bucket->sync_list, &bucket->free_list);
- bucket->free_list_count += bucket->sync_list_count;
- bucket->sync_list_count = 0;
-}
+ if (pool->hot_memory_size > DR_ICM_SYNC_THRESHOLD_POOL)
+ return true;
-static void dr_icm_chill_bucket_abort(struct mlx5dr_icm_bucket *bucket)
-{
- list_splice_tail_init(&bucket->sync_list, &bucket->hot_list);
- bucket->hot_list_count += bucket->sync_list_count;
- bucket->sync_list_count = 0;
+ return false;
}
-static void dr_icm_chill_buckets_start(struct mlx5dr_icm_pool *pool,
- struct mlx5dr_icm_bucket *cb,
- bool buckets[DR_CHUNK_SIZE_MAX])
+static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
{
- struct mlx5dr_icm_bucket *bucket;
- int i;
-
- for (i = 0; i < pool->num_of_buckets; i++) {
- bucket = &pool->buckets[i];
- if (bucket == cb) {
- dr_icm_chill_bucket_start(bucket);
- continue;
- }
+ struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
+ int err;
- /* Freeing the mutex is done at the end of that process, after
- * sync_ste was executed at dr_icm_chill_buckets_end func.
- */
- if (mutex_trylock(&bucket->mutex)) {
- dr_icm_chill_bucket_start(bucket);
- buckets[i] = true;
- }
+ err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
+ if (err) {
+ mlx5dr_err(pool->dmn, "Failed to sync to HW (err: %d)\n", err);
+ return err;
}
-}
-static void dr_icm_chill_buckets_end(struct mlx5dr_icm_pool *pool,
- struct mlx5dr_icm_bucket *cb,
- bool buckets[DR_CHUNK_SIZE_MAX])
-{
- struct mlx5dr_icm_bucket *bucket;
- int i;
-
- for (i = 0; i < pool->num_of_buckets; i++) {
- bucket = &pool->buckets[i];
- if (bucket == cb) {
- dr_icm_chill_bucket_end(bucket);
- continue;
- }
+ list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) {
+ struct mlx5dr_icm_chunk *chunk, *tmp_chunk;
- if (!buckets[i])
- continue;
+ list_for_each_entry_safe(chunk, tmp_chunk, &buddy->hot_list, chunk_list) {
+ mlx5dr_buddy_free_mem(buddy, chunk->seg,
+ ilog2(chunk->num_of_entries));
+ pool->hot_memory_size -= chunk->byte_size;
+ dr_icm_chunk_destroy(chunk, buddy);
+ }
- dr_icm_chill_bucket_end(bucket);
- mutex_unlock(&bucket->mutex);
+ if (!buddy->used_memory && pool->icm_type == DR_ICM_TYPE_STE)
+ dr_icm_buddy_destroy(buddy);
}
+
+ return 0;
}
-static void dr_icm_chill_buckets_abort(struct mlx5dr_icm_pool *pool,
- struct mlx5dr_icm_bucket *cb,
- bool buckets[DR_CHUNK_SIZE_MAX])
+static int dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool *pool,
+ enum mlx5dr_icm_chunk_size chunk_size,
+ struct mlx5dr_icm_buddy_mem **buddy,
+ unsigned int *seg)
{
- struct mlx5dr_icm_bucket *bucket;
- int i;
-
- for (i = 0; i < pool->num_of_buckets; i++) {
- bucket = &pool->buckets[i];
- if (bucket == cb) {
- dr_icm_chill_bucket_abort(bucket);
- continue;
- }
+ struct mlx5dr_icm_buddy_mem *buddy_mem_pool;
+ bool new_mem = false;
+ int err;
- if (!buckets[i])
- continue;
+alloc_buddy_mem:
+ /* find the next free place from the buddy list */
+ list_for_each_entry(buddy_mem_pool, &pool->buddy_mem_list, list_node) {
+ err = mlx5dr_buddy_alloc_mem(buddy_mem_pool,
+ chunk_size, seg);
+ if (!err)
+ goto found;
+
+ if (WARN_ON(new_mem)) {
+ /* We have new memory pool, first in the list */
+ mlx5dr_err(pool->dmn,
+ "No memory for order: %d\n",
+ chunk_size);
+ goto out;
+ }
+ }
- dr_icm_chill_bucket_abort(bucket);
- mutex_unlock(&bucket->mutex);
+ /* no more available allocators in that pool, create new */
+ err = dr_icm_buddy_create(pool);
+ if (err) {
+ mlx5dr_err(pool->dmn,
+ "Failed creating buddy for order %d\n",
+ chunk_size);
+ goto out;
}
+
+ /* mark we have new memory, first in list */
+ new_mem = true;
+ goto alloc_buddy_mem;
+
+found:
+ *buddy = buddy_mem_pool;
+out:
+ return err;
}
/* Allocate an ICM chunk, each chunk holds a piece of ICM memory and
@@ -446,68 +371,48 @@ struct mlx5dr_icm_chunk *
mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size)
{
- struct mlx5dr_icm_chunk *chunk = NULL; /* Fix compilation warning */
- bool buckets[DR_CHUNK_SIZE_MAX] = {};
- struct mlx5dr_icm_bucket *bucket;
- int err;
+ struct mlx5dr_icm_chunk *chunk = NULL;
+ struct mlx5dr_icm_buddy_mem *buddy;
+ unsigned int seg;
+ int ret;
if (chunk_size > pool->max_log_chunk_sz)
return NULL;
- bucket = &pool->buckets[chunk_size];
-
- mutex_lock(&bucket->mutex);
-
- /* Take chunk from pool if available, otherwise allocate new chunks */
- if (list_empty(&bucket->free_list)) {
- if (dr_icm_reuse_hot_entries(pool, bucket)) {
- dr_icm_chill_buckets_start(pool, bucket, buckets);
- err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
- if (err) {
- dr_icm_chill_buckets_abort(pool, bucket, buckets);
- mlx5dr_err(pool->dmn, "Sync_steering failed\n");
- chunk = NULL;
- goto out;
- }
- dr_icm_chill_buckets_end(pool, bucket, buckets);
- } else {
- dr_icm_chunks_create(bucket);
- }
- }
+ mutex_lock(&pool->mutex);
+ /* find mem, get back the relevant buddy pool and seg in that mem */
+ ret = dr_icm_handle_buddies_get_mem(pool, chunk_size, &buddy, &seg);
+ if (ret)
+ goto out;
- if (!list_empty(&bucket->free_list)) {
- chunk = list_last_entry(&bucket->free_list,
- struct mlx5dr_icm_chunk,
- chunk_list);
- if (chunk) {
- list_del_init(&chunk->chunk_list);
- list_add_tail(&chunk->chunk_list, &bucket->used_list);
- bucket->free_list_count--;
- bucket->used_list_count++;
- }
- }
+ chunk = dr_icm_chunk_create(pool, chunk_size, buddy, seg);
+ if (!chunk)
+ goto out_err;
+
+ goto out;
+
+out_err:
+ mlx5dr_buddy_free_mem(buddy, seg, chunk_size);
out:
- mutex_unlock(&bucket->mutex);
+ mutex_unlock(&pool->mutex);
return chunk;
}
void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
{
- struct mlx5dr_icm_bucket *bucket = chunk->bucket;
+ struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+ struct mlx5dr_icm_pool *pool = buddy->pool;
- if (bucket->pool->icm_type == DR_ICM_TYPE_STE) {
- memset(chunk->ste_arr, 0,
- bucket->num_of_entries * sizeof(chunk->ste_arr[0]));
- memset(chunk->hw_ste_arr, 0,
- bucket->num_of_entries * DR_STE_SIZE_REDUCED);
- }
+ /* move the memory to the waiting list AKA "hot" */
+ mutex_lock(&pool->mutex);
+ list_move_tail(&chunk->chunk_list, &buddy->hot_list);
+ pool->hot_memory_size += chunk->byte_size;
+
+ /* Check if we have chunks that are waiting for sync-ste */
+ if (dr_icm_pool_is_sync_required(pool))
+ dr_icm_pool_sync_all_buddy_pools(pool);
- mutex_lock(&bucket->mutex);
- list_del_init(&chunk->chunk_list);
- list_add_tail(&chunk->chunk_list, &bucket->hot_list);
- bucket->hot_list_count++;
- bucket->used_list_count--;
- mutex_unlock(&bucket->mutex);
+ mutex_unlock(&pool->mutex);
}
struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
@@ -515,7 +420,6 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
{
enum mlx5dr_icm_chunk_size max_log_chunk_sz;
struct mlx5dr_icm_pool *pool;
- int i;
if (icm_type == DR_ICM_TYPE_STE)
max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
@@ -526,43 +430,24 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
if (!pool)
return NULL;
- pool->buckets = kcalloc(max_log_chunk_sz + 1,
- sizeof(pool->buckets[0]),
- GFP_KERNEL);
- if (!pool->buckets)
- goto free_pool;
-
pool->dmn = dmn;
pool->icm_type = icm_type;
pool->max_log_chunk_sz = max_log_chunk_sz;
- pool->num_of_buckets = max_log_chunk_sz + 1;
- INIT_LIST_HEAD(&pool->icm_mr_list);
- for (i = 0; i < pool->num_of_buckets; i++)
- dr_icm_bucket_init(pool, &pool->buckets[i], i);
+ INIT_LIST_HEAD(&pool->buddy_mem_list);
- mutex_init(&pool->mr_mutex);
+ mutex_init(&pool->mutex);
return pool;
-
-free_pool:
- kvfree(pool);
- return NULL;
}
void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
{
- struct mlx5dr_icm_mr *icm_mr, *next;
- int i;
-
- mutex_destroy(&pool->mr_mutex);
-
- list_for_each_entry_safe(icm_mr, next, &pool->icm_mr_list, mr_list)
- dr_icm_pool_mr_destroy(icm_mr);
+ struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
- for (i = 0; i < pool->num_of_buckets; i++)
- dr_icm_bucket_cleanup(&pool->buckets[i]);
+ list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node)
+ dr_icm_buddy_destroy(buddy);
- kfree(pool->buckets);
+ mutex_destroy(&pool->mutex);
kvfree(pool);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 7df883686d46..6527eb4df153 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -85,7 +85,7 @@ static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec)
(_misc2)._inner_outer##_first_mpls_s_bos || \
(_misc2)._inner_outer##_first_mpls_ttl)
-static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc)
+static bool dr_mask_is_tnl_gre_set(struct mlx5dr_match_misc *misc)
{
return (misc->gre_key_h || misc->gre_key_l ||
misc->gre_protocol || misc->gre_c_present ||
@@ -98,12 +98,12 @@ static bool dr_mask_is_gre_set(struct mlx5dr_match_misc *misc)
(_misc2).outer_first_mpls_over_##gre_udp##_s_bos || \
(_misc2).outer_first_mpls_over_##gre_udp##_ttl)
-#define DR_MASK_IS_FLEX_PARSER_0_SET(_misc2) ( \
+#define DR_MASK_IS_TNL_MPLS_SET(_misc2) ( \
DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), gre) || \
DR_MASK_IS_OUTER_MPLS_OVER_GRE_UDP_SET((_misc2), udp))
static bool
-dr_mask_is_misc3_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
+dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
{
return (misc3->outer_vxlan_gpe_vni ||
misc3->outer_vxlan_gpe_next_protocol ||
@@ -111,21 +111,20 @@ dr_mask_is_misc3_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
}
static bool
-dr_matcher_supp_flex_parser_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
+dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
{
- return caps->flex_protocols &
- MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
+ return caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
}
static bool
-dr_mask_is_flex_parser_tnl_vxlan_gpe_set(struct mlx5dr_match_param *mask,
- struct mlx5dr_domain *dmn)
+dr_mask_is_tnl_vxlan_gpe(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
{
- return dr_mask_is_misc3_vxlan_gpe_set(&mask->misc3) &&
- dr_matcher_supp_flex_parser_vxlan_gpe(&dmn->info.caps);
+ return dr_mask_is_vxlan_gpe_set(&mask->misc3) &&
+ dr_matcher_supp_vxlan_gpe(&dmn->info.caps);
}
-static bool dr_mask_is_misc_geneve_set(struct mlx5dr_match_misc *misc)
+static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc)
{
return misc->geneve_vni ||
misc->geneve_oam ||
@@ -134,26 +133,46 @@ static bool dr_mask_is_misc_geneve_set(struct mlx5dr_match_misc *misc)
}
static bool
-dr_matcher_supp_flex_parser_geneve(struct mlx5dr_cmd_caps *caps)
+dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
{
- return caps->flex_protocols &
- MLX5_FLEX_PARSER_GENEVE_ENABLED;
+ return caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED;
}
static bool
-dr_mask_is_flex_parser_tnl_geneve_set(struct mlx5dr_match_param *mask,
- struct mlx5dr_domain *dmn)
+dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
{
- return dr_mask_is_misc_geneve_set(&mask->misc) &&
- dr_matcher_supp_flex_parser_geneve(&dmn->info.caps);
+ return dr_mask_is_tnl_geneve_set(&mask->misc) &&
+ dr_matcher_supp_tnl_geneve(&dmn->info.caps);
}
-static bool dr_mask_is_flex_parser_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
+static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED;
+}
+
+static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED;
+}
+
+static bool dr_mask_is_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
{
return (misc3->icmpv6_type || misc3->icmpv6_code ||
misc3->icmpv6_header_data);
}
+static bool dr_mask_is_icmp(struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn)
+{
+ if (DR_MASK_IS_ICMPV4_SET(&mask->misc3))
+ return dr_matcher_supp_icmp_v4(&dmn->info.caps);
+ else if (dr_mask_is_icmpv6_set(&mask->misc3))
+ return dr_matcher_supp_icmp_v6(&dmn->info.caps);
+
+ return false;
+}
+
static bool dr_mask_is_wqe_metadata_set(struct mlx5dr_match_misc2 *misc2)
{
return misc2->metadata_reg_a;
@@ -257,7 +276,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_smac_set(&mask.outer) &&
dr_mask_is_dmac_set(&mask.outer)) {
- mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], &mask,
+ mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], &mask,
inner, rx);
}
@@ -277,8 +296,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
- mlx5dr_ste_build_ipv6_l3_l4(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
+ inner, rx);
} else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
@@ -289,14 +308,12 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner, rx);
}
- if (dr_mask_is_flex_parser_tnl_vxlan_gpe_set(&mask, dmn))
- mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(&sb[idx++],
- &mask,
- inner, rx);
- else if (dr_mask_is_flex_parser_tnl_geneve_set(&mask, dmn))
- mlx5dr_ste_build_flex_parser_tnl_geneve(&sb[idx++],
- &mask,
- inner, rx);
+ if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
+ mlx5dr_ste_build_tnl_vxlan_gpe(&sb[idx++], &mask,
+ inner, rx);
+ else if (dr_mask_is_tnl_geneve(&mask, dmn))
+ mlx5dr_ste_build_tnl_geneve(&sb[idx++], &mask,
+ inner, rx);
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
@@ -304,22 +321,18 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
- if (DR_MASK_IS_FLEX_PARSER_0_SET(mask.misc2))
- mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask,
- inner, rx);
+ if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
+ mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
- if ((DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(&mask.misc3) &&
- mlx5dr_matcher_supp_flex_parser_icmp_v4(&dmn->info.caps)) ||
- (dr_mask_is_flex_parser_icmpv6_set(&mask.misc3) &&
- mlx5dr_matcher_supp_flex_parser_icmp_v6(&dmn->info.caps))) {
- ret = mlx5dr_ste_build_flex_parser_1(&sb[idx++],
- &mask, &dmn->info.caps,
- inner, rx);
+ if (dr_mask_is_icmp(&mask, dmn)) {
+ ret = mlx5dr_ste_build_icmp(&sb[idx++],
+ &mask, &dmn->info.caps,
+ inner, rx);
if (ret)
return ret;
}
- if (dr_mask_is_gre_set(&mask.misc))
- mlx5dr_ste_build_gre(&sb[idx++], &mask, inner, rx);
+ if (dr_mask_is_tnl_gre_set(&mask.misc))
+ mlx5dr_ste_build_tnl_gre(&sb[idx++], &mask, inner, rx);
}
/* Inner */
@@ -334,7 +347,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_smac_set(&mask.inner) &&
dr_mask_is_dmac_set(&mask.inner)) {
- mlx5dr_ste_build_eth_l2_src_des(&sb[idx++],
+ mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++],
&mask, inner, rx);
}
@@ -354,8 +367,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
- mlx5dr_ste_build_ipv6_l3_l4(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
+ inner, rx);
} else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
@@ -372,8 +385,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
- if (DR_MASK_IS_FLEX_PARSER_0_SET(mask.misc2))
- mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask, inner, rx);
+ if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
+ mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
}
/* Empty matcher, takes all */
if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
@@ -630,7 +643,7 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
}
if (mask) {
- if (mask->match_sz > sizeof(struct mlx5dr_match_param)) {
+ if (mask->match_sz > DR_SZ_MATCH_PARAM) {
mlx5dr_err(dmn, "Invalid match size attribute\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index b3c9dc032026..6d73719db1f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -874,8 +874,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
u32 s_idx, e_idx;
if (!value_size ||
- (value_size > sizeof(struct mlx5dr_match_param) ||
- (value_size % sizeof(u32)))) {
+ (value_size > DR_SZ_MATCH_PARAM || (value_size % sizeof(u32)))) {
mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index b01aaec75622..d275823bff2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -1090,7 +1090,7 @@ static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
return 0;
}
-void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
@@ -1594,9 +1594,9 @@ static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
return 0;
}
-void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx)
+void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
{
dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask);
@@ -1693,8 +1693,8 @@ static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
return 0;
}
-void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask, bool inner, bool rx)
+void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask, bool inner, bool rx)
{
dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask);
@@ -1771,9 +1771,9 @@ static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
return 0;
}
-void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx)
+void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
{
dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask);
@@ -1792,8 +1792,8 @@ static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
u8 *bit_mask)
{
+ bool is_ipv4_mask = DR_MASK_IS_ICMPV4_SET(&mask->misc3);
struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3;
- bool is_ipv4_mask = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3_mask);
u32 icmp_header_data_mask;
u32 icmp_type_mask;
u32 icmp_code_mask;
@@ -1869,7 +1869,7 @@ static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
u32 icmp_code;
bool is_ipv4;
- is_ipv4 = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3);
+ is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
if (is_ipv4) {
icmp_header_data = misc_3->icmpv4_header_data;
icmp_type = misc_3->icmpv4_type;
@@ -1928,10 +1928,10 @@ static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
return 0;
}
-int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- struct mlx5dr_cmd_caps *caps,
- bool inner, bool rx)
+int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx)
{
int ret;
@@ -2069,9 +2069,9 @@ dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
return 0;
}
-void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx)
+void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
{
dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner,
sb->bit_mask);
@@ -2122,9 +2122,9 @@ dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
return 0;
}
-void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx)
+void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
{
dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask);
sb->rx = rx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index cf62ea4f882e..51880df26724 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -17,6 +17,7 @@
#define WIRE_PORT 0xFFFF
#define DR_STE_SVLAN 0x1
#define DR_STE_CVLAN 0x2
+#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
#define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
#define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
@@ -114,7 +115,7 @@ enum mlx5dr_ipv {
struct mlx5dr_icm_pool;
struct mlx5dr_icm_chunk;
-struct mlx5dr_icm_bucket;
+struct mlx5dr_icm_buddy_mem;
struct mlx5dr_ste_htbl;
struct mlx5dr_match_param;
struct mlx5dr_cmd_caps;
@@ -288,7 +289,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_match_param *value,
u8 *ste_arr);
-void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder,
+void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *builder,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
@@ -312,31 +313,31 @@ void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx);
+void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx);
+void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
+int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_cmd_caps *caps,
+ bool inner, bool rx);
+void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- struct mlx5dr_cmd_caps *caps,
- bool inner, bool rx);
-void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx);
-void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx);
+void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
@@ -588,9 +589,9 @@ struct mlx5dr_match_param {
struct mlx5dr_match_misc3 misc3;
};
-#define DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
- (_misc3)->icmpv4_code || \
- (_misc3)->icmpv4_header_data)
+#define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
+ (_misc3)->icmpv4_code || \
+ (_misc3)->icmpv4_header_data)
struct mlx5dr_esw_caps {
u64 drop_icm_address_rx;
@@ -732,7 +733,6 @@ struct mlx5dr_action {
struct mlx5dr_domain *dmn;
struct mlx5dr_icm_chunk *chunk;
u8 *data;
- u32 data_size;
u16 num_of_actions;
u32 index;
u8 allow_rx:1;
@@ -805,7 +805,7 @@ void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste,
struct mlx5dr_ste *ste);
struct mlx5dr_icm_chunk {
- struct mlx5dr_icm_bucket *bucket;
+ struct mlx5dr_icm_buddy_mem *buddy_mem;
struct list_head chunk_list;
u32 rkey;
u32 num_of_entries;
@@ -813,6 +813,11 @@ struct mlx5dr_icm_chunk {
u64 icm_addr;
u64 mr_addr;
+ /* indicates the index of this chunk in the whole memory,
+ * used for deleting the chunk from the buddy
+ */
+ unsigned int seg;
+
/* Memory optimisation */
struct mlx5dr_ste *ste_arr;
u8 *hw_ste_arr;
@@ -841,23 +846,20 @@ static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn)
mlx5dr_domain_nic_unlock(&dmn->info.rx);
}
-static inline int
-mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps *caps)
-{
- return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED;
-}
-
-static inline int
-mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps)
-{
- return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED;
-}
-
int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
enum mlx5dr_ipv outer_ipv,
enum mlx5dr_ipv inner_ipv);
+static inline int
+mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type)
+{
+ if (icm_type == DR_ICM_TYPE_STE)
+ return DR_STE_SIZE;
+
+ return DR_MODIFY_ACTION_SIZE;
+}
+
static inline u32
mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
{
@@ -871,11 +873,7 @@ mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,
int num_of_entries;
int entry_size;
- if (icm_type == DR_ICM_TYPE_STE)
- entry_size = DR_STE_SIZE;
- else
- entry_size = DR_MODIFY_ACTION_SIZE;
-
+ entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(icm_type);
num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
return entry_size * num_of_entries;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 7914fe3fc68d..4177786b8eaf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -127,4 +127,36 @@ mlx5dr_is_supported(struct mlx5_core_dev *dev)
return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner);
}
+/* buddy functions & structure */
+
+struct mlx5dr_icm_mr;
+
+struct mlx5dr_icm_buddy_mem {
+ unsigned long **bitmap;
+ unsigned int *num_free;
+ u32 max_order;
+ struct list_head list_node;
+ struct mlx5dr_icm_mr *icm_mr;
+ struct mlx5dr_icm_pool *pool;
+
+ /* This is the list of used chunks. HW may be accessing this memory */
+ struct list_head used_list;
+ u64 used_memory;
+
+ /* Hardware may be accessing this memory but at some future,
+ * undetermined time, it might cease to do so.
+ * sync_ste command sets them free.
+ */
+ struct list_head hot_list;
+};
+
+int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
+ unsigned int max_order);
+void mlx5dr_buddy_cleanup(struct mlx5dr_icm_buddy_mem *buddy);
+int mlx5dr_buddy_alloc_mem(struct mlx5dr_icm_buddy_mem *buddy,
+ unsigned int order,
+ unsigned int *segment);
+void mlx5dr_buddy_free_mem(struct mlx5dr_icm_buddy_mem *buddy,
+ unsigned int seg, unsigned int order);
+
#endif /* _MLX5DR_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
index bcd166911d44..46245e0b2462 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
@@ -368,7 +368,6 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
}
mlxfw_info(mlxfw_dev, "Initialize firmware flash process\n");
- devlink_flash_update_begin_notify(mlxfw_dev->devlink);
mlxfw_status_notify(mlxfw_dev, "Initializing firmware flash process",
NULL, 0, 0);
err = mlxfw_dev->ops->fsm_lock(mlxfw_dev, &fwhandle);
@@ -417,7 +416,6 @@ int mlxfw_firmware_flash(struct mlxfw_dev *mlxfw_dev,
mlxfw_info(mlxfw_dev, "Firmware flash done\n");
mlxfw_status_notify(mlxfw_dev, "Firmware flash done", NULL, 0, 0);
mlxfw_mfa2_file_fini(mfa2_file);
- devlink_flash_update_end_notify(mlxfw_dev->devlink);
return 0;
err_state_wait_activate_to_locked:
@@ -429,7 +427,6 @@ err_state_wait_idle_to_locked:
mlxfw_dev->ops->fsm_release(mlxfw_dev, fwhandle);
err_fsm_lock:
mlxfw_mfa2_file_fini(mfa2_file);
- devlink_flash_update_end_notify(mlxfw_dev->devlink);
return err;
}
EXPORT_SYMBOL(mlxfw_firmware_flash);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 1a86535c4968..630109f139a0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1117,16 +1117,7 @@ static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
- const struct firmware *firmware;
- int err;
-
- err = request_firmware_direct(&firmware, params->file_name, mlxsw_core->bus_info->dev);
- if (err)
- return err;
- err = mlxsw_core_fw_flash(mlxsw_core, firmware, extack);
- release_firmware(firmware);
-
- return err;
+ return mlxsw_core_fw_flash(mlxsw_core, params->fw, extack);
}
static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index 8e36a2634ef5..2b23f8a87862 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -4,6 +4,9 @@
#ifndef _MLXSW_CORE_ENV_H
#define _MLXSW_CORE_ENV_H
+struct ethtool_modinfo;
+struct ethtool_eeprom;
+
int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
int off, int *temp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 39eff6a57ba2..1077ed2046fe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -834,17 +834,30 @@ MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8);
+/* reg_spvid_et_vlan
+ * EtherType used for when VLAN is pushed at ingress (for untagged
+ * packets or for QinQ push mode).
+ * 0: ether_type0 - (default)
+ * 1: ether_type1
+ * 2: ether_type2 - Reserved when Spectrum-1, supported by Spectrum-2
+ * Ethertype IDs are configured by SVER.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvid, et_vlan, 0x04, 16, 2);
+
/* reg_spvid_pvid
* Port default VID
* Access: RW
*/
MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12);
-static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
+static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid,
+ u8 et_vlan)
{
MLXSW_REG_ZERO(spvid, payload);
mlxsw_reg_spvid_local_port_set(payload, local_port);
mlxsw_reg_spvid_pvid_set(payload, pvid);
+ mlxsw_reg_spvid_et_vlan_set(payload, et_vlan);
}
/* SPVM - Switch Port VLAN Membership
@@ -1857,6 +1870,104 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
}
}
+/* SPVC - Switch Port VLAN Classification Register
+ * -----------------------------------------------
+ * Configures the port to identify packets as untagged / single tagged /
+ * double packets based on the packet EtherTypes.
+ * Ethertype IDs are configured by SVER.
+ */
+#define MLXSW_REG_SPVC_ID 0x2026
+#define MLXSW_REG_SPVC_LEN 0x0C
+
+MLXSW_REG_DEFINE(spvc, MLXSW_REG_SPVC_ID, MLXSW_REG_SPVC_LEN);
+
+/* reg_spvc_local_port
+ * Local port.
+ * Access: Index
+ *
+ * Note: applies both to Rx port and Tx port, so if a packet traverses
+ * through Rx port i and a Tx port j then port i and port j must have the
+ * same configuration.
+ */
+MLXSW_ITEM32(reg, spvc, local_port, 0x00, 16, 8);
+
+/* reg_spvc_inner_et2
+ * Vlan Tag1 EtherType2 enable.
+ * Packet is initially classified as double VLAN Tag if in addition to
+ * being classified with a tag0 VLAN Tag its tag1 EtherType value is
+ * equal to ether_type2.
+ * 0: disable (default)
+ * 1: enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, inner_et2, 0x08, 17, 1);
+
+/* reg_spvc_et2
+ * Vlan Tag0 EtherType2 enable.
+ * Packet is initially classified as VLAN Tag if its tag0 EtherType is
+ * equal to ether_type2.
+ * 0: disable (default)
+ * 1: enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, et2, 0x08, 16, 1);
+
+/* reg_spvc_inner_et1
+ * Vlan Tag1 EtherType1 enable.
+ * Packet is initially classified as double VLAN Tag if in addition to
+ * being classified with a tag0 VLAN Tag its tag1 EtherType value is
+ * equal to ether_type1.
+ * 0: disable
+ * 1: enable (default)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, inner_et1, 0x08, 9, 1);
+
+/* reg_spvc_et1
+ * Vlan Tag0 EtherType1 enable.
+ * Packet is initially classified as VLAN Tag if its tag0 EtherType is
+ * equal to ether_type1.
+ * 0: disable
+ * 1: enable (default)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, et1, 0x08, 8, 1);
+
+/* reg_inner_et0
+ * Vlan Tag1 EtherType0 enable.
+ * Packet is initially classified as double VLAN Tag if in addition to
+ * being classified with a tag0 VLAN Tag its tag1 EtherType value is
+ * equal to ether_type0.
+ * 0: disable
+ * 1: enable (default)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, inner_et0, 0x08, 1, 1);
+
+/* reg_et0
+ * Vlan Tag0 EtherType0 enable.
+ * Packet is initially classified as VLAN Tag if its tag0 EtherType is
+ * equal to ether_type0.
+ * 0: disable
+ * 1: enable (default)
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spvc, et0, 0x08, 0, 1);
+
+static inline void mlxsw_reg_spvc_pack(char *payload, u8 local_port, bool et1,
+ bool et0)
+{
+ MLXSW_REG_ZERO(spvc, payload);
+ mlxsw_reg_spvc_local_port_set(payload, local_port);
+ /* Enable inner_et1 and inner_et0 to enable identification of double
+ * tagged packets.
+ */
+ mlxsw_reg_spvc_inner_et1_set(payload, 1);
+ mlxsw_reg_spvc_inner_et0_set(payload, 1);
+ mlxsw_reg_spvc_et1_set(payload, et1);
+ mlxsw_reg_spvc_et0_set(payload, et0);
+}
+
/* CWTP - Congetion WRED ECN TClass Profile
* ----------------------------------------
* Configures the profiles for queues of egress port and traffic class
@@ -7279,10 +7390,11 @@ static inline void mlxsw_reg_ralue_pack4(char *payload,
enum mlxsw_reg_ralxx_protocol protocol,
enum mlxsw_reg_ralue_op op,
u16 virtual_router, u8 prefix_len,
- u32 dip)
+ u32 *dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
- mlxsw_reg_ralue_dip4_set(payload, dip);
+ if (dip)
+ mlxsw_reg_ralue_dip4_set(payload, *dip);
}
static inline void mlxsw_reg_ralue_pack6(char *payload,
@@ -7292,7 +7404,8 @@ static inline void mlxsw_reg_ralue_pack6(char *payload,
const void *dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
- mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
+ if (dip)
+ mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
}
static inline void
@@ -8245,6 +8358,86 @@ mlxsw_reg_rmft2_ipv6_pack(char *payload, bool v, u16 offset, u16 virtual_router,
mlxsw_reg_rmft2_sip6_mask_memcpy_to(payload, (void *)&sip6_mask);
}
+/* Note that XRALXX register position violates the rule of ordering register
+ * definition by the ID. However, XRALXX pack helpers are using RALXX pack
+ * helpers, RALXX registers have higher IDs.
+ */
+
+/* XRALTA - XM Router Algorithmic LPM Tree Allocation Register
+ * -----------------------------------------------------------
+ * The XRALTA is used to allocate the XLT LPM trees.
+ *
+ * This register embeds original RALTA register.
+ */
+#define MLXSW_REG_XRALTA_ID 0x7811
+#define MLXSW_REG_XRALTA_LEN 0x08
+#define MLXSW_REG_XRALTA_RALTA_OFFSET 0x04
+
+MLXSW_REG_DEFINE(xralta, MLXSW_REG_XRALTA_ID, MLXSW_REG_XRALTA_LEN);
+
+static inline void mlxsw_reg_xralta_pack(char *payload, bool alloc,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ char *ralta_payload = payload + MLXSW_REG_XRALTA_RALTA_OFFSET;
+
+ MLXSW_REG_ZERO(xralta, payload);
+ mlxsw_reg_ralta_pack(ralta_payload, alloc, protocol, tree_id);
+}
+
+/* XRALST - XM Router Algorithmic LPM Structure Tree Register
+ * ----------------------------------------------------------
+ * The XRALST is used to set and query the structure of an XLT LPM tree.
+ *
+ * This register embeds original RALST register.
+ */
+#define MLXSW_REG_XRALST_ID 0x7812
+#define MLXSW_REG_XRALST_LEN 0x108
+#define MLXSW_REG_XRALST_RALST_OFFSET 0x04
+
+MLXSW_REG_DEFINE(xralst, MLXSW_REG_XRALST_ID, MLXSW_REG_XRALST_LEN);
+
+static inline void mlxsw_reg_xralst_pack(char *payload, u8 root_bin, u8 tree_id)
+{
+ char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
+
+ MLXSW_REG_ZERO(xralst, payload);
+ mlxsw_reg_ralst_pack(ralst_payload, root_bin, tree_id);
+}
+
+static inline void mlxsw_reg_xralst_bin_pack(char *payload, u8 bin_number,
+ u8 left_child_bin,
+ u8 right_child_bin)
+{
+ char *ralst_payload = payload + MLXSW_REG_XRALST_RALST_OFFSET;
+
+ mlxsw_reg_ralst_bin_pack(ralst_payload, bin_number, left_child_bin,
+ right_child_bin);
+}
+
+/* XRALTB - XM Router Algorithmic LPM Tree Binding Register
+ * --------------------------------------------------------
+ * The XRALTB register is used to bind virtual router and protocol
+ * to an allocated LPM tree.
+ *
+ * This register embeds original RALTB register.
+ */
+#define MLXSW_REG_XRALTB_ID 0x7813
+#define MLXSW_REG_XRALTB_LEN 0x08
+#define MLXSW_REG_XRALTB_RALTB_OFFSET 0x04
+
+MLXSW_REG_DEFINE(xraltb, MLXSW_REG_XRALTB_ID, MLXSW_REG_XRALTB_LEN);
+
+static inline void mlxsw_reg_xraltb_pack(char *payload, u16 virtual_router,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ char *raltb_payload = payload + MLXSW_REG_XRALTB_RALTB_OFFSET;
+
+ MLXSW_REG_ZERO(xraltb, payload);
+ mlxsw_reg_raltb_pack(raltb_payload, virtual_router, protocol, tree_id);
+}
+
/* MFCR - Management Fan Control Register
* --------------------------------------
* This register controls the settings of the Fan Speed PWM mechanism.
@@ -11130,6 +11323,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(svpe),
MLXSW_REG(sfmr),
MLXSW_REG(spvmlr),
+ MLXSW_REG(spvc),
MLXSW_REG(cwtp),
MLXSW_REG(cwtpm),
MLXSW_REG(pgcr),
@@ -11195,6 +11389,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rigr2),
MLXSW_REG(recr2),
MLXSW_REG(rmft2),
+ MLXSW_REG(xralta),
+ MLXSW_REG(xralst),
+ MLXSW_REG(xraltb),
MLXSW_REG(mfcr),
MLXSW_REG(mfsc),
MLXSW_REG(mfsm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index b08853f71b2b..385eb3c3b362 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -384,13 +384,37 @@ int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
return err;
}
+static int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
+{
+ switch (ethtype) {
+ case ETH_P_8021Q:
+ *p_sver_type = 0;
+ break;
+ case ETH_P_8021AD:
+ *p_sver_type = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid)
+ u16 vid, u16 ethtype)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char spvid_pl[MLXSW_REG_SPVID_LEN];
+ u8 sver_type;
+ int err;
+
+ err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
+ if (err)
+ return err;
+
+ mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
+ sver_type);
- mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
}
@@ -404,7 +428,8 @@ static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
}
-int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u16 ethtype)
{
int err;
@@ -413,7 +438,7 @@ int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
if (err)
return err;
} else {
- err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
+ err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
if (err)
return err;
err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
@@ -425,7 +450,7 @@ int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
return 0;
err_port_allow_untagged_set:
- __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
+ __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
return err;
}
@@ -1386,6 +1411,19 @@ static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_po
return 0;
}
+int
+mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool is_8021ad_tagged,
+ bool is_8021q_tagged)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spvc_pl[MLXSW_REG_SPVC_LEN];
+
+ mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
+ is_8021ad_tagged, is_8021q_tagged);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
+}
+
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u8 split_base_local_port,
struct mlxsw_sp_port_mapping *port_mapping)
@@ -1575,7 +1613,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_nve_init;
}
- err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
+ ETH_P_8021Q);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
mlxsw_sp_port->local_port);
@@ -1592,6 +1631,16 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
}
mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
+ /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
+ * only packets with 802.1q header as tagged packets.
+ */
+ err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
+ local_port);
+ goto err_port_vlan_classification_set;
+ }
+
INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
mlxsw_sp->ptp_ops->shaper_work);
@@ -1618,6 +1667,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
err_register_netdev:
err_port_overheat_init_val_set:
+ mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
+err_port_vlan_classification_set:
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
err_port_vlan_create:
@@ -1664,6 +1715,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
+ mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
mlxsw_sp_port_nve_fini(mlxsw_sp_port);
@@ -3618,7 +3670,8 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
lag->ref_count--;
/* Make sure untagged frames are allowed to ingress */
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
+ ETH_P_8021Q);
}
static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -3840,6 +3893,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
struct net_device *upper_dev;
struct mlxsw_sp *mlxsw_sp;
int err = 0;
+ u16 proto;
mlxsw_sp_port = netdev_priv(dev);
mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -3897,6 +3951,36 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
return -EINVAL;
}
+ if (netif_is_bridge_master(upper_dev)) {
+ br_vlan_get_proto(upper_dev, &proto);
+ if (br_vlan_enabled(upper_dev) &&
+ proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
+ return -EOPNOTSUPP;
+ }
+ if (vlan_uses_dev(lower_dev) &&
+ br_vlan_enabled(upper_dev) &&
+ proto == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+ if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
+ struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
+
+ if (br_vlan_enabled(br_dev)) {
+ br_vlan_get_proto(br_dev, &proto);
+ if (proto == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+ if (is_vlan_dev(upper_dev) &&
+ ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
+ return -EOPNOTSUPP;
+ }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4162,6 +4246,7 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
struct netdev_notifier_changeupper_info *info = ptr;
struct netlink_ext_ack *extack;
struct net_device *upper_dev;
+ u16 proto;
if (!mlxsw_sp)
return 0;
@@ -4177,6 +4262,18 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
}
if (!info->linking)
break;
+ if (br_vlan_enabled(br_dev)) {
+ br_vlan_get_proto(br_dev, &proto);
+ if (proto == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "Uppers are not supported on top of an 802.1ad bridge");
+ return -EOPNOTSUPP;
+ }
+ }
+ if (is_vlan_dev(upper_dev) &&
+ ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
+ NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
+ return -EOPNOTSUPP;
+ }
if (netif_is_macvlan(upper_dev) &&
!mlxsw_sp_rif_exists(mlxsw_sp, br_dev)) {
NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 74b3959b36d4..ce26cc41831f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -4,6 +4,7 @@
#ifndef _MLXSW_SPECTRUM_H
#define _MLXSW_SPECTRUM_H
+#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/rhashtable.h>
@@ -427,6 +428,10 @@ int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
int prio, char *ppcnt_pl);
int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_up);
+int
+mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool is_8021ad_tagged,
+ bool is_8021q_tagged);
/* spectrum_buffers.c */
struct mlxsw_sp_hdroom_prio {
@@ -579,7 +584,8 @@ int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable);
int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
bool learn_enable);
-int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
+ u16 ethtype);
struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index daf029931b5f..ed81d4fa48ac 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -913,7 +913,8 @@ static u64 mlxsw_sp_dpipe_table_adj_size(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router)
if (mlxsw_sp_nexthop_offload(nh) &&
- !mlxsw_sp_nexthop_group_has_ipip(nh))
+ !mlxsw_sp_nexthop_group_has_ipip(nh) &&
+ !mlxsw_sp_nexthop_is_discard(nh))
size++;
return size;
}
@@ -1105,7 +1106,8 @@ start_again:
nh_count = 0;
mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
if (!mlxsw_sp_nexthop_offload(nh) ||
- mlxsw_sp_nexthop_group_has_ipip(nh))
+ mlxsw_sp_nexthop_group_has_ipip(nh) ||
+ mlxsw_sp_nexthop_is_discard(nh))
continue;
if (nh_count < nh_skip)
@@ -1186,7 +1188,8 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable)
mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) {
if (!mlxsw_sp_nexthop_offload(nh) ||
- mlxsw_sp_nexthop_group_has_ipip(nh))
+ mlxsw_sp_nexthop_group_has_ipip(nh) ||
+ mlxsw_sp_nexthop_is_discard(nh))
continue;
mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index a8525992528f..089d99535f9e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -181,23 +181,26 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp,
}
static int
-mlxsw_sp_ipip_fib_entry_op_gre4_ralue(struct mlxsw_sp *mlxsw_sp,
- u32 dip, u8 prefix_len, u16 ul_vr_id,
- enum mlxsw_reg_ralue_op op,
- u32 tunnel_index)
+mlxsw_sp_ipip_fib_entry_op_gre4_do(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ u32 dip, u8 prefix_len, u16 ul_vr_id,
+ enum mlxsw_sp_fib_entry_op op,
+ u32 tunnel_index,
+ struct mlxsw_sp_fib_entry_priv *priv)
{
- char ralue_pl[MLXSW_REG_RALUE_LEN];
-
- mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_REG_RALXX_PROTOCOL_IPV4, op,
- ul_vr_id, prefix_len, dip);
- mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl, tunnel_index);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ ll_ops->fib_entry_pack(op_ctx, MLXSW_SP_L3_PROTO_IPV4, op, ul_vr_id,
+ prefix_len, (unsigned char *) &dip, priv);
+ ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx, tunnel_index);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int mlxsw_sp_ipip_fib_entry_op_gre4(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_ipip_entry *ipip_entry,
- enum mlxsw_reg_ralue_op op,
- u32 tunnel_index)
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ enum mlxsw_sp_fib_entry_op op, u32 tunnel_index,
+ struct mlxsw_sp_fib_entry_priv *priv)
{
u16 ul_vr_id = mlxsw_sp_ipip_lb_ul_vr_id(ipip_entry->ol_lb);
__be32 dip;
@@ -210,9 +213,8 @@ static int mlxsw_sp_ipip_fib_entry_op_gre4(struct mlxsw_sp *mlxsw_sp,
dip = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4,
ipip_entry->ol_dev).addr4;
- return mlxsw_sp_ipip_fib_entry_op_gre4_ralue(mlxsw_sp, be32_to_cpu(dip),
- 32, ul_vr_id, op,
- tunnel_index);
+ return mlxsw_sp_ipip_fib_entry_op_gre4_do(mlxsw_sp, ll_ops, op_ctx, be32_to_cpu(dip),
+ 32, ul_vr_id, op, tunnel_index, priv);
}
static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto,
@@ -231,8 +233,7 @@ static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto,
}
static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *ol_dev,
- enum mlxsw_sp_l3proto ol_proto)
+ const struct net_device *ol_dev)
{
struct ip_tunnel *tunnel = netdev_priv(ol_dev);
__be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index bb5c4d4a5872..d32702cb6ab4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -43,8 +43,7 @@ struct mlxsw_sp_ipip_ops {
struct mlxsw_sp_ipip_entry *ipip_entry);
bool (*can_offload)(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *ol_dev,
- enum mlxsw_sp_l3proto ol_proto);
+ const struct net_device *ol_dev);
/* Return a configuration for creating an overlay loopback RIF. */
struct mlxsw_sp_rif_ipip_lb_config
@@ -52,9 +51,12 @@ struct mlxsw_sp_ipip_ops {
const struct net_device *ol_dev);
int (*fib_entry_op)(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_ipip_entry *ipip_entry,
- enum mlxsw_reg_ralue_op op,
- u32 tunnel_index);
+ enum mlxsw_sp_fib_entry_op op,
+ u32 tunnel_index,
+ struct mlxsw_sp_fib_entry_priv *priv);
int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index ca8090a28dec..d6e9ecb14681 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -828,10 +828,10 @@ struct mlxsw_sp_ptp_state *mlxsw_sp1_ptp_init(struct mlxsw_sp *mlxsw_sp)
goto err_hashtable_init;
/* Delive these message types as PTP0. */
- message_type = BIT(MLXSW_SP_PTP_MESSAGE_TYPE_SYNC) |
- BIT(MLXSW_SP_PTP_MESSAGE_TYPE_DELAY_REQ) |
- BIT(MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_REQ) |
- BIT(MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_RESP);
+ message_type = BIT(PTP_MSGTYPE_SYNC) |
+ BIT(PTP_MSGTYPE_DELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_REQ) |
+ BIT(PTP_MSGTYPE_PDELAY_RESP);
err = mlxsw_sp_ptp_mtptpt_set(mlxsw_sp, MLXSW_REG_MTPTPT_TRAP_ID_PTP0,
message_type);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index 8c386571afce..1d43a3755285 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -11,13 +11,6 @@ struct mlxsw_sp;
struct mlxsw_sp_port;
struct mlxsw_sp_ptp_clock;
-enum {
- MLXSW_SP_PTP_MESSAGE_TYPE_SYNC,
- MLXSW_SP_PTP_MESSAGE_TYPE_DELAY_REQ,
- MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_REQ,
- MLXSW_SP_PTP_MESSAGE_TYPE_PDELAY_RESP,
-};
-
static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct ethtool_ts_info *info)
{
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 4381f8c6c3fb..85223647fdb6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -352,6 +352,7 @@ enum mlxsw_sp_fib_entry_type {
MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
};
+struct mlxsw_sp_nexthop_group_info;
struct mlxsw_sp_nexthop_group;
struct mlxsw_sp_fib_entry;
@@ -368,18 +369,71 @@ struct mlxsw_sp_fib_entry_decap {
u32 tunnel_index;
};
+static struct mlxsw_sp_fib_entry_priv *
+mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
+{
+ struct mlxsw_sp_fib_entry_priv *priv;
+
+ if (!ll_ops->fib_entry_priv_size)
+ /* No need to have priv */
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+ refcount_set(&priv->refcnt, 1);
+ return priv;
+}
+
+static void
+mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
+{
+ kfree(priv);
+}
+
+static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
+{
+ refcount_inc(&priv->refcnt);
+}
+
+static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
+{
+ if (!priv || !refcount_dec_and_test(&priv->refcnt))
+ return;
+ mlxsw_sp_fib_entry_priv_destroy(priv);
+}
+
+static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_entry_priv *priv)
+{
+ if (!priv)
+ return;
+ mlxsw_sp_fib_entry_priv_hold(priv);
+ list_add(&priv->list, &op_ctx->fib_entry_priv_list);
+}
+
+static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
+{
+ struct mlxsw_sp_fib_entry_priv *priv, *tmp;
+
+ list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
+ mlxsw_sp_fib_entry_priv_put(priv);
+ INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
+}
+
struct mlxsw_sp_fib_entry {
struct mlxsw_sp_fib_node *fib_node;
enum mlxsw_sp_fib_entry_type type;
struct list_head nexthop_group_node;
struct mlxsw_sp_nexthop_group *nh_group;
struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
+ struct mlxsw_sp_fib_entry_priv *priv;
};
struct mlxsw_sp_fib4_entry {
struct mlxsw_sp_fib_entry common;
+ struct fib_info *fi;
u32 tb_id;
- u32 prio;
u8 tos;
u8 type;
};
@@ -409,6 +463,7 @@ struct mlxsw_sp_fib {
struct mlxsw_sp_vr *vr;
struct mlxsw_sp_lpm_tree *lpm_tree;
enum mlxsw_sp_l3proto proto;
+ const struct mlxsw_sp_router_ll_ops *ll_ops;
};
struct mlxsw_sp_vr {
@@ -422,12 +477,31 @@ struct mlxsw_sp_vr {
refcount_t ul_rif_refcnt;
};
+static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
+{
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
+ xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
+}
+
+static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
+{
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
+ xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
+}
+
+static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
+{
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
+ xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
+}
+
static const struct rhashtable_params mlxsw_sp_fib_ht_params;
static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_vr *vr,
enum mlxsw_sp_l3proto proto)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
struct mlxsw_sp_fib *fib;
int err;
@@ -443,6 +517,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
fib->proto = proto;
fib->vr = vr;
fib->lpm_tree = lpm_tree;
+ fib->ll_ops = ll_ops;
mlxsw_sp_lpm_tree_hold(lpm_tree);
err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
if (err)
@@ -481,33 +556,36 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
}
static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char ralta_pl[MLXSW_REG_RALTA_LEN];
+ char xralta_pl[MLXSW_REG_XRALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, true,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ mlxsw_reg_xralta_pack(xralta_pl, true,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
}
static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char ralta_pl[MLXSW_REG_RALTA_LEN];
+ char xralta_pl[MLXSW_REG_XRALTA_LEN];
- mlxsw_reg_ralta_pack(ralta_pl, false,
- (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
- lpm_tree->id);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ mlxsw_reg_xralta_pack(xralta_pl, false,
+ (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
+ lpm_tree->id);
+ ll_ops->ralta_write(mlxsw_sp, xralta_pl);
}
static int
mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- char ralst_pl[MLXSW_REG_RALST_LEN];
+ char xralst_pl[MLXSW_REG_XRALST_LEN];
u8 root_bin = 0;
u8 prefix;
u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
@@ -515,19 +593,20 @@ mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
root_bin = prefix;
- mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
+ mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
if (prefix == 0)
continue;
- mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
- MLXSW_REG_RALST_BIN_NO_CHILD);
+ mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
+ MLXSW_REG_RALST_BIN_NO_CHILD);
last_prefix = prefix;
}
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+ return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
}
static struct mlxsw_sp_lpm_tree *
mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
@@ -538,12 +617,11 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
if (!lpm_tree)
return ERR_PTR(-EBUSY);
lpm_tree->proto = proto;
- err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
+ err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
if (err)
return ERR_PTR(err);
- err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
- lpm_tree);
+ err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
if (err)
goto err_left_struct_set;
memcpy(&lpm_tree->prefix_usage, prefix_usage,
@@ -554,14 +632,15 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
err_left_struct_set:
- mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
return ERR_PTR(err);
}
static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
- mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
}
static struct mlxsw_sp_lpm_tree *
@@ -569,6 +648,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_prefix_usage *prefix_usage,
enum mlxsw_sp_l3proto proto)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
struct mlxsw_sp_lpm_tree *lpm_tree;
int i;
@@ -582,7 +662,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
return lpm_tree;
}
}
- return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
+ return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
}
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
@@ -593,8 +673,11 @@ static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops =
+ mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
+
if (--lpm_tree->ref_count == 0)
- mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
+ mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
}
#define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
@@ -684,23 +767,23 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib, u8 tree_id)
{
- char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char xraltb_pl[MLXSW_REG_XRALTB_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto,
- tree_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+ mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto,
+ tree_id);
+ return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
}
static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_fib *fib)
{
- char raltb_pl[MLXSW_REG_RALTB_LEN];
+ char xraltb_pl[MLXSW_REG_XRALTB_LEN];
/* Bind to tree 0 which is default */
- mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
- (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+ mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
+ (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
+ return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
}
static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
@@ -1270,21 +1353,33 @@ mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
/* Given decap parameters, find the corresponding IPIP entry. */
static struct mlxsw_sp_ipip_entry *
-mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *ul_dev,
+mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
enum mlxsw_sp_l3proto ul_proto,
union mlxsw_sp_l3addr ul_dip)
{
- struct mlxsw_sp_ipip_entry *ipip_entry;
+ struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
+ struct net_device *ul_dev;
+
+ rcu_read_lock();
+
+ ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
+ if (!ul_dev)
+ goto out_unlock;
list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
ipip_list_node)
if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
ul_proto, ul_dip,
ipip_entry))
- return ipip_entry;
+ goto out_unlock;
+
+ rcu_read_unlock();
return NULL;
+
+out_unlock:
+ rcu_read_unlock();
+ return ipip_entry;
}
static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
@@ -1370,11 +1465,7 @@ static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_ops *ops
= mlxsw_sp->router->ipip_ops_arr[ipipt];
- /* For deciding whether decap should be offloaded, we don't care about
- * overlay protocol, so ask whether either one is supported.
- */
- return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
- ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
+ return ops->can_offload(mlxsw_sp, ol_dev);
}
static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
@@ -2749,10 +2840,11 @@ struct mlxsw_sp_nexthop {
struct list_head neigh_list_node; /* member of neigh entry list */
struct list_head rif_list_node;
struct list_head router_list_node;
- struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
- * this belongs to
- */
+ struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
+ * this nexthop belongs to
+ */
struct rhash_head ht_node;
+ struct neigh_table *neigh_tbl;
struct mlxsw_sp_nexthop_key key;
unsigned char gw_addr[sizeof(struct in6_addr)];
int ifindex;
@@ -2766,9 +2858,10 @@ struct mlxsw_sp_nexthop {
offloaded:1, /* set in case the neigh is actually put into
* KVD linear area of this group.
*/
- update:1; /* set indicates that MAC of this neigh should be
+ update:1, /* set indicates that MAC of this neigh should be
* updated in HW
*/
+ discard:1; /* nexthop is programmed to discard packets */
enum mlxsw_sp_nexthop_type type;
union {
struct mlxsw_sp_neigh_entry *neigh_entry;
@@ -2778,21 +2871,54 @@ struct mlxsw_sp_nexthop {
bool counter_valid;
};
-struct mlxsw_sp_nexthop_group {
- void *priv;
- struct rhash_head ht_node;
- struct list_head fib_list; /* list of fib entries that use this group */
- struct neigh_table *neigh_tbl;
- u8 adj_index_valid:1,
- gateway:1; /* routes using the group use a gateway */
+enum mlxsw_sp_nexthop_group_type {
+ MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
+ MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
+ MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
+};
+
+struct mlxsw_sp_nexthop_group_info {
+ struct mlxsw_sp_nexthop_group *nh_grp;
u32 adj_index;
u16 ecmp_size;
u16 count;
int sum_norm_weight;
+ u8 adj_index_valid:1,
+ gateway:1; /* routes using the group use a gateway */
struct mlxsw_sp_nexthop nexthops[0];
#define nh_rif nexthops[0].rif
};
+struct mlxsw_sp_nexthop_group_vr_key {
+ u16 vr_id;
+ enum mlxsw_sp_l3proto proto;
+};
+
+struct mlxsw_sp_nexthop_group_vr_entry {
+ struct list_head list; /* member in vr_list */
+ struct rhash_head ht_node; /* member in vr_ht */
+ refcount_t ref_count;
+ struct mlxsw_sp_nexthop_group_vr_key key;
+};
+
+struct mlxsw_sp_nexthop_group {
+ struct rhash_head ht_node;
+ struct list_head fib_list; /* list of fib entries that use this group */
+ union {
+ struct {
+ struct fib_info *fi;
+ } ipv4;
+ struct {
+ u32 id;
+ } obj;
+ };
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct list_head vr_list;
+ struct rhashtable vr_ht;
+ enum mlxsw_sp_nexthop_group_type type;
+ bool can_destroy;
+};
+
void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
@@ -2858,18 +2984,18 @@ unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
u32 *p_adj_size, u32 *p_adj_hash_index)
{
- struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
u32 adj_hash_index = 0;
int i;
- if (!nh->offloaded || !nh_grp->adj_index_valid)
+ if (!nh->offloaded || !nhgi->adj_index_valid)
return -EINVAL;
- *p_adj_index = nh_grp->adj_index;
- *p_adj_size = nh_grp->ecmp_size;
+ *p_adj_index = nhgi->adj_index;
+ *p_adj_size = nhgi->ecmp_size;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
if (nh_iter == nh)
break;
@@ -2888,11 +3014,11 @@ struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
{
- struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
int i;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
return true;
@@ -2900,17 +3026,107 @@ bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
return false;
}
-static struct fib_info *
-mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
+bool mlxsw_sp_nexthop_is_discard(const struct mlxsw_sp_nexthop *nh)
{
- return nh_grp->priv;
+ return nh->discard;
+}
+
+static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
+ .automatic_shrinking = true,
+};
+
+static struct mlxsw_sp_nexthop_group_vr_entry *
+mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_fib *fib)
+{
+ struct mlxsw_sp_nexthop_group_vr_key key;
+
+ memset(&key, 0, sizeof(key));
+ key.vr_id = fib->vr->id;
+ key.proto = fib->proto;
+ return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
+ mlxsw_sp_nexthop_group_vr_ht_params);
+}
+
+static int
+mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_fib *fib)
+{
+ struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
+ int err;
+
+ vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
+ if (!vr_entry)
+ return -ENOMEM;
+
+ vr_entry->key.vr_id = fib->vr->id;
+ vr_entry->key.proto = fib->proto;
+ refcount_set(&vr_entry->ref_count, 1);
+
+ err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
+ mlxsw_sp_nexthop_group_vr_ht_params);
+ if (err)
+ goto err_hashtable_insert;
+
+ list_add(&vr_entry->list, &nh_grp->vr_list);
+
+ return 0;
+
+err_hashtable_insert:
+ kfree(vr_entry);
+ return err;
+}
+
+static void
+mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
+{
+ list_del(&vr_entry->list);
+ rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
+ mlxsw_sp_nexthop_group_vr_ht_params);
+ kfree(vr_entry);
+}
+
+static int
+mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_fib *fib)
+{
+ struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
+
+ vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
+ if (vr_entry) {
+ refcount_inc(&vr_entry->ref_count);
+ return 0;
+ }
+
+ return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
+}
+
+static void
+mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
+ const struct mlxsw_sp_fib *fib)
+{
+ struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
+
+ vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
+ if (WARN_ON_ONCE(!vr_entry))
+ return;
+
+ if (!refcount_dec_and_test(&vr_entry->ref_count))
+ return;
+
+ mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
}
struct mlxsw_sp_nexthop_group_cmp_arg {
- enum mlxsw_sp_l3proto proto;
+ enum mlxsw_sp_nexthop_group_type type;
union {
struct fib_info *fi;
struct mlxsw_sp_fib6_entry *fib6_entry;
+ u32 id;
};
};
@@ -2921,10 +3137,10 @@ mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
{
int i;
- for (i = 0; i < nh_grp->count; i++) {
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
const struct mlxsw_sp_nexthop *nh;
- nh = &nh_grp->nexthops[i];
+ nh = &nh_grp->nhgi->nexthops[i];
if (nh->ifindex == ifindex && nh->nh_weight == weight &&
ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
return true;
@@ -2939,7 +3155,7 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
{
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
- if (nh_grp->count != fib6_entry->nrt6)
+ if (nh_grp->nhgi->count != fib6_entry->nrt6)
return false;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
@@ -2964,24 +3180,23 @@ mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
- switch (cmp_arg->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
- case MLXSW_SP_L3_PROTO_IPV6:
+ if (nh_grp->type != cmp_arg->type)
+ return 1;
+
+ switch (cmp_arg->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
+ return cmp_arg->fi != nh_grp->ipv4.fi;
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
cmp_arg->fib6_entry);
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
+ return cmp_arg->id != nh_grp->obj.id;
default:
WARN_ON(1);
return 1;
}
}
-static int
-mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp)
-{
- return nh_grp->neigh_tbl->family;
-}
-
static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
{
const struct mlxsw_sp_nexthop_group *nh_grp = data;
@@ -2990,18 +3205,20 @@ static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
unsigned int val;
int i;
- switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
- case AF_INET:
- fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
+ switch (nh_grp->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
+ fi = nh_grp->ipv4.fi;
return jhash(&fi, sizeof(fi), seed);
- case AF_INET6:
- val = nh_grp->count;
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
+ val = nh_grp->nhgi->count;
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ nh = &nh_grp->nhgi->nexthops[i];
val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
}
return jhash(&val, sizeof(val), seed);
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
+ return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
default:
WARN_ON(1);
return 0;
@@ -3031,11 +3248,13 @@ mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
{
const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
- switch (cmp_arg->proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
+ switch (cmp_arg->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
- case MLXSW_SP_L3_PROTO_IPV6:
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
+ return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
default:
WARN_ON(1);
return 0;
@@ -3052,8 +3271,8 @@ static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
- !nh_grp->gateway)
+ if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
+ !nh_grp->nhgi->gateway)
return 0;
return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
@@ -3064,8 +3283,8 @@ static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 &&
- !nh_grp->gateway)
+ if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
+ !nh_grp->nhgi->gateway)
return;
rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
@@ -3079,7 +3298,7 @@ mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
- cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
+ cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
cmp_arg.fi = fi;
return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
&cmp_arg,
@@ -3092,7 +3311,7 @@ mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
- cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
+ cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
cmp_arg.fib6_entry = fib6_entry;
return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
&cmp_arg,
@@ -3128,7 +3347,8 @@ mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
- const struct mlxsw_sp_fib *fib,
+ enum mlxsw_sp_l3proto proto,
+ u16 vr_id,
u32 adj_index, u16 ecmp_size,
u32 new_adj_index,
u16 new_ecmp_size)
@@ -3136,8 +3356,8 @@ static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
char raleu_pl[MLXSW_REG_RALEU_LEN];
mlxsw_reg_raleu_pack(raleu_pl,
- (enum mlxsw_reg_ralxx_protocol) fib->proto,
- fib->vr->id, adj_index, ecmp_size, new_adj_index,
+ (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
+ adj_index, ecmp_size, new_adj_index,
new_ecmp_size);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
}
@@ -3146,23 +3366,31 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
u32 old_adj_index, u16 old_ecmp_size)
{
- struct mlxsw_sp_fib_entry *fib_entry;
- struct mlxsw_sp_fib *fib = NULL;
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
int err;
- list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
- if (fib == fib_entry->fib_node->fib)
- continue;
- fib = fib_entry->fib_node->fib;
- err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
+ list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
+ err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
+ vr_entry->key.proto,
+ vr_entry->key.vr_id,
old_adj_index,
old_ecmp_size,
- nh_grp->adj_index,
- nh_grp->ecmp_size);
+ nhgi->adj_index,
+ nhgi->ecmp_size);
if (err)
- return err;
+ goto err_mass_update_vr;
}
return 0;
+
+err_mass_update_vr:
+ list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
+ mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
+ vr_entry->key.vr_id,
+ nhgi->adj_index,
+ nhgi->ecmp_size,
+ old_adj_index, old_ecmp_size);
+ return err;
}
static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
@@ -3173,8 +3401,12 @@ static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
true, MLXSW_REG_RATR_TYPE_ETHERNET,
- adj_index, neigh_entry->rif);
- mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
+ adj_index, nh->rif->rif_index);
+ if (nh->discard)
+ mlxsw_reg_ratr_trap_action_set(ratr_pl,
+ MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
+ else
+ mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
if (nh->counter_valid)
mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
else
@@ -3229,15 +3461,15 @@ static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop_group_info *nhgi,
bool reallocate)
{
- u32 adj_index = nh_grp->adj_index; /* base */
+ u32 adj_index = nhgi->adj_index; /* base */
struct mlxsw_sp_nexthop *nh;
int i;
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
if (!nh->should_offload) {
nh->offloaded = 0;
@@ -3337,13 +3569,13 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
}
static void
-mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
+mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
{
int i, g = 0, sum_norm_weight = 0;
struct mlxsw_sp_nexthop *nh;
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
if (!nh->should_offload)
continue;
@@ -3353,8 +3585,8 @@ mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
g = nh->nh_weight;
}
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
if (!nh->should_offload)
continue;
@@ -3362,18 +3594,18 @@ mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
sum_norm_weight += nh->norm_nh_weight;
}
- nh_grp->sum_norm_weight = sum_norm_weight;
+ nhgi->sum_norm_weight = sum_norm_weight;
}
static void
-mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
+mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
{
- int total = nh_grp->sum_norm_weight;
- u16 ecmp_size = nh_grp->ecmp_size;
int i, weight = 0, lower_bound = 0;
+ int total = nhgi->sum_norm_weight;
+ u16 ecmp_size = nhgi->ecmp_size;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
int upper_bound;
if (!nh->should_offload)
@@ -3395,8 +3627,8 @@ mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
{
int i;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
if (nh->offloaded)
nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
@@ -3439,39 +3671,59 @@ mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
}
static void
+mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ /* Do not update the flags if the nexthop group is being destroyed
+ * since:
+ * 1. The nexthop objects is being deleted, in which case the flags are
+ * irrelevant.
+ * 2. The nexthop group was replaced by a newer group, in which case
+ * the flags of the nexthop object were already updated based on the
+ * new group.
+ */
+ if (nh_grp->can_destroy)
+ return;
+
+ nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
+ nh_grp->nhgi->adj_index_valid, false);
+}
+
+static void
mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- switch (mlxsw_sp_nexthop_group_type(nh_grp)) {
- case AF_INET:
+ switch (nh_grp->type) {
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
break;
- case AF_INET6:
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
break;
+ case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
+ mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
+ break;
}
}
-static void
+static int
mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
u16 ecmp_size, old_ecmp_size;
struct mlxsw_sp_nexthop *nh;
bool offload_change = false;
u32 adj_index;
bool old_adj_index_valid;
+ int i, err2, err = 0;
u32 old_adj_index;
- int i;
- int err;
- if (!nh_grp->gateway) {
- mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
- return;
- }
+ if (!nhgi->gateway)
+ return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
if (nh->should_offload != nh->offloaded) {
offload_change = true;
@@ -3483,21 +3735,21 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
/* Nothing was added or removed, so no need to reallocate. Just
* update MAC on existing adjacency indexes.
*/
- err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
+ err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
}
- return;
+ return 0;
}
- mlxsw_sp_nexthop_group_normalize(nh_grp);
- if (!nh_grp->sum_norm_weight)
+ mlxsw_sp_nexthop_group_normalize(nhgi);
+ if (!nhgi->sum_norm_weight)
/* No neigh of this group is connected so we just set
* the trap and let everthing flow through kernel.
*/
goto set_trap;
- ecmp_size = nh_grp->sum_norm_weight;
+ ecmp_size = nhgi->sum_norm_weight;
err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
if (err)
/* No valid allocation size available. */
@@ -3512,14 +3764,14 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
goto set_trap;
}
- old_adj_index_valid = nh_grp->adj_index_valid;
- old_adj_index = nh_grp->adj_index;
- old_ecmp_size = nh_grp->ecmp_size;
- nh_grp->adj_index_valid = 1;
- nh_grp->adj_index = adj_index;
- nh_grp->ecmp_size = ecmp_size;
- mlxsw_sp_nexthop_group_rebalance(nh_grp);
- err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
+ old_adj_index_valid = nhgi->adj_index_valid;
+ old_adj_index = nhgi->adj_index;
+ old_ecmp_size = nhgi->ecmp_size;
+ nhgi->adj_index_valid = 1;
+ nhgi->adj_index = adj_index;
+ nhgi->ecmp_size = ecmp_size;
+ mlxsw_sp_nexthop_group_rebalance(nhgi);
+ err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
@@ -3536,7 +3788,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
goto set_trap;
}
- return;
+ return 0;
}
err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
@@ -3548,22 +3800,23 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
goto set_trap;
}
- return;
+ return 0;
set_trap:
- old_adj_index_valid = nh_grp->adj_index_valid;
- nh_grp->adj_index_valid = 0;
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
+ old_adj_index_valid = nhgi->adj_index_valid;
+ nhgi->adj_index_valid = 0;
+ for (i = 0; i < nhgi->count; i++) {
+ nh = &nhgi->nexthops[i];
nh->offloaded = 0;
}
- err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
- if (err)
+ err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
+ if (err2)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
if (old_adj_index_valid)
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
- nh_grp->ecmp_size, nh_grp->adj_index);
+ nhgi->ecmp_size, nhgi->adj_index);
+ return err;
}
static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
@@ -3589,10 +3842,9 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
nh = list_first_entry(&neigh_entry->nexthop_list,
struct mlxsw_sp_nexthop, neigh_list_node);
- n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
+ n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
if (!n) {
- n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
- nh->rif->dev);
+ n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
if (IS_ERR(n))
return PTR_ERR(n);
neigh_event_send(n, NULL);
@@ -3615,7 +3867,7 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
neigh_release(old_n);
neigh_clone(n);
__mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
neigh_release(n);
@@ -3652,7 +3904,7 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry(nh, &neigh_entry->nexthop_list,
neigh_list_node) {
__mlxsw_sp_nexthop_neigh_update(nh, removing);
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
}
@@ -3683,7 +3935,7 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
u8 nud_state, dead;
int err;
- if (!nh->nh_grp->gateway || nh->neigh_entry)
+ if (!nh->nhgi->gateway || nh->neigh_entry)
return 0;
/* Take a reference of neigh here ensuring that neigh would
@@ -3691,10 +3943,9 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
* The reference is taken either in neigh_lookup() or
* in neigh_create() in case n is not found.
*/
- n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
+ n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
if (!n) {
- n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
- nh->rif->dev);
+ n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
if (IS_ERR(n))
return PTR_ERR(n);
neigh_event_send(n, NULL);
@@ -3775,7 +4026,7 @@ static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
{
bool removing;
- if (!nh->nh_grp->gateway || nh->ipip_entry)
+ if (!nh->nhgi->gateway || nh->ipip_entry)
return;
nh->ipip_entry = ipip_entry;
@@ -3807,27 +4058,11 @@ static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
}
-static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh)
-{
- switch (nh->type) {
- case MLXSW_SP_NEXTHOP_TYPE_ETH:
- mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
- mlxsw_sp_nexthop_rif_fini(nh);
- break;
- case MLXSW_SP_NEXTHOP_TYPE_IPIP:
- mlxsw_sp_nexthop_rif_fini(nh);
- mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
- break;
- }
-}
-
-static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh,
- struct fib_nh *fib_nh)
+static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh,
+ const struct net_device *dev)
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
- struct net_device *dev = fib_nh->fib_nh_dev;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_rif *rif;
int err;
@@ -3835,8 +4070,7 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
if (ipip_entry) {
ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
- if (ipip_ops->can_offload(mlxsw_sp, dev,
- MLXSW_SP_L3_PROTO_IPV4)) {
+ if (ipip_ops->can_offload(mlxsw_sp, dev)) {
nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
return 0;
@@ -3860,10 +4094,19 @@ err_neigh_init:
return err;
}
-static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh)
+static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
{
- mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
+ switch (nh->type) {
+ case MLXSW_SP_NEXTHOP_TYPE_ETH:
+ mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_rif_fini(nh);
+ break;
+ case MLXSW_SP_NEXTHOP_TYPE_IPIP:
+ mlxsw_sp_nexthop_rif_fini(nh);
+ mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
+ break;
+ }
}
static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
@@ -3875,7 +4118,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
struct in_device *in_dev;
int err;
- nh->nh_grp = nh_grp;
+ nh->nhgi = nh_grp->nhgi;
nh->key.fib_nh = fib_nh;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->nh_weight = fib_nh->fib_nh_weight;
@@ -3883,6 +4126,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
nh->nh_weight = 1;
#endif
memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
+ nh->neigh_tbl = &arp_tbl;
err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
if (err)
return err;
@@ -3892,6 +4136,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
if (!dev)
return 0;
+ nh->ifindex = dev->ifindex;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
@@ -3902,7 +4147,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
}
rcu_read_unlock();
- err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
+ err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
if (err)
goto err_nexthop_neigh_init;
@@ -3916,7 +4161,7 @@ err_nexthop_neigh_init:
static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
@@ -3938,14 +4183,14 @@ static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
switch (event) {
case FIB_EVENT_NH_ADD:
- mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
+ mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
break;
case FIB_EVENT_NH_DEL:
- mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
break;
}
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
@@ -3968,7 +4213,7 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
}
__mlxsw_sp_nexthop_neigh_update(nh, removing);
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
}
@@ -3991,8 +4236,448 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
+ }
+}
+
+static int
+mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_single_info *nh,
+ struct netlink_ext_ack *extack)
+{
+ int err = -EINVAL;
+
+ if (nh->is_fdb)
+ NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
+ else if (nh->has_encap)
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
+ else
+ err = 0;
+
+ return err;
+}
+
+static int
+mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_grp_info *nh_grp,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ if (nh_grp->is_fdb) {
+ NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nh_grp->num_nh; i++) {
+ const struct nh_notifier_single_info *nh;
+ int err;
+
+ nh = &nh_grp->nh_entries[i].nh;
+ err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh,
+ extack);
+ if (err)
+ return err;
+
+ /* Device only nexthops with an IPIP device are programmed as
+ * encapsulating adjacency entries.
+ */
+ if (!nh->gw_family && !nh->is_reject &&
+ !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
+ unsigned long event,
+ struct nh_notifier_info *info)
+{
+ if (event != NEXTHOP_EVENT_REPLACE)
+ return 0;
+
+ if (!info->is_grp)
+ return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
+ info->extack);
+ return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp, info->nh_grp,
+ info->extack);
+}
+
+static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
+ const struct nh_notifier_info *info)
+{
+ const struct net_device *dev;
+
+ if (info->is_grp)
+ /* Already validated earlier. */
+ return true;
+
+ dev = info->nh->dev;
+ return info->nh->gw_family || info->nh->is_reject ||
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
+}
+
+static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
+
+ nh->discard = 1;
+ nh->should_offload = 1;
+ /* While nexthops that discard packets do not forward packets
+ * via an egress RIF, they still need to be programmed using a
+ * valid RIF, so use the loopback RIF created during init.
+ */
+ nh->rif = mlxsw_sp->router->rifs[lb_rif_index];
+}
+
+static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ nh->rif = NULL;
+ nh->should_offload = 0;
+}
+
+static int
+mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop *nh,
+ struct nh_notifier_single_info *nh_obj, int weight)
+{
+ struct net_device *dev = nh_obj->dev;
+ int err;
+
+ nh->nhgi = nh_grp->nhgi;
+ nh->nh_weight = weight;
+
+ switch (nh_obj->gw_family) {
+ case AF_INET:
+ memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
+ nh->neigh_tbl = &arp_tbl;
+ break;
+ case AF_INET6:
+ memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
+#if IS_ENABLED(CONFIG_IPV6)
+ nh->neigh_tbl = &nd_tbl;
+#endif
+ break;
+ }
+
+ mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
+ list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
+ nh->ifindex = dev->ifindex;
+
+ err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
+ if (err)
+ goto err_type_init;
+
+ if (nh_obj->is_reject)
+ mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
+
+ return 0;
+
+err_type_init:
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+ return err;
+}
+
+static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ if (nh->discard)
+ mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
+ list_del(&nh->router_list_node);
+ mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
+}
+
+static int
+mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct nh_notifier_info *info)
+{
+ unsigned int nhs = info->is_grp ? info->nh_grp->num_nh : 1;
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_nexthop *nh;
+ int err, i;
+
+ nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
+ if (!nhgi)
+ return -ENOMEM;
+ nh_grp->nhgi = nhgi;
+ nhgi->nh_grp = nh_grp;
+ nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
+ nhgi->count = nhs;
+ for (i = 0; i < nhgi->count; i++) {
+ struct nh_notifier_single_info *nh_obj;
+ int weight;
+
+ nh = &nhgi->nexthops[i];
+ if (info->is_grp) {
+ nh_obj = &info->nh_grp->nh_entries[i].nh;
+ weight = info->nh_grp->nh_entries[i].weight;
+ } else {
+ nh_obj = info->nh;
+ weight = 1;
+ }
+ err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
+ weight);
+ if (err)
+ goto err_nexthop_obj_init;
+ }
+ err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
+ goto err_group_refresh;
+ }
+
+ return 0;
+
+err_group_refresh:
+ i = nhgi->count;
+err_nexthop_obj_init:
+ for (i--; i >= 0; i--) {
+ nh = &nhgi->nexthops[i];
+ mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
+ }
+ kfree(nhgi);
+ return err;
+}
+
+static void
+mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ int i;
+
+ for (i = nhgi->count - 1; i >= 0; i--) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
+
+ mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
+ }
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(nhgi->adj_index_valid);
+ kfree(nhgi);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ int err;
+
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&nh_grp->vr_list);
+ err = rhashtable_init(&nh_grp->vr_ht,
+ &mlxsw_sp_nexthop_group_vr_ht_params);
+ if (err)
+ goto err_nexthop_group_vr_ht_init;
+ INIT_LIST_HEAD(&nh_grp->fib_list);
+ nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
+ nh_grp->obj.id = info->id;
+
+ err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
+ if (err)
+ goto err_nexthop_group_info_init;
+
+ nh_grp->can_destroy = false;
+
+ return nh_grp;
+
+err_nexthop_group_info_init:
+ rhashtable_destroy(&nh_grp->vr_ht);
+err_nexthop_group_vr_ht_init:
+ kfree(nh_grp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ if (!nh_grp->can_destroy)
+ return;
+ mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
+ WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
+ rhashtable_destroy(&nh_grp->vr_ht);
+ kfree(nh_grp);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
+{
+ struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
+
+ cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
+ cmp_arg.id = id;
+ return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
+ &cmp_arg,
+ mlxsw_sp_nexthop_group_ht_params);
+}
+
+static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
+}
+
+static int
+mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop_group *old_nh_grp,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
+ struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
+ int err;
+
+ old_nh_grp->nhgi = new_nhgi;
+ new_nhgi->nh_grp = old_nh_grp;
+ nh_grp->nhgi = old_nhgi;
+ old_nhgi->nh_grp = nh_grp;
+
+ if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
+ /* Both the old adjacency index and the new one are valid.
+ * Routes are currently using the old one. Tell the device to
+ * replace the old adjacency index with the new one.
+ */
+ err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
+ old_nhgi->adj_index,
+ old_nhgi->ecmp_size);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
+ goto err_out;
+ }
+ } else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
+ /* The old adjacency index is valid, while the new one is not.
+ * Iterate over all the routes using the group and change them
+ * to trap packets to the CPU.
+ */
+ err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
+ goto err_out;
+ }
+ } else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
+ /* The old adjacency index is invalid, while the new one is.
+ * Iterate over all the routes using the group and change them
+ * to forward packets using the new valid index.
+ */
+ err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
+ goto err_out;
+ }
+ }
+
+ /* Make sure the flags are set / cleared based on the new nexthop group
+ * information.
+ */
+ mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
+
+ /* At this point 'nh_grp' is just a shell that is not used by anyone
+ * and its nexthop group info is the old info that was just replaced
+ * with the new one. Remove it.
+ */
+ nh_grp->can_destroy = true;
+ mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
+
+ return 0;
+
+err_out:
+ old_nhgi->nh_grp = old_nh_grp;
+ nh_grp->nhgi = new_nhgi;
+ new_nhgi->nh_grp = nh_grp;
+ old_nh_grp->nhgi = old_nhgi;
+ return err;
+}
+
+static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
+ struct netlink_ext_ack *extack = info->extack;
+ int err;
+
+ nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
+ if (IS_ERR(nh_grp))
+ return PTR_ERR(nh_grp);
+
+ old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
+ if (!old_nh_grp)
+ err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
+ else
+ err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
+ old_nh_grp, extack);
+
+ if (err) {
+ nh_grp->can_destroy = true;
+ mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
+ }
+
+ return err;
+}
+
+static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
+ struct nh_notifier_info *info)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
+ if (!nh_grp)
+ return;
+
+ nh_grp->can_destroy = true;
+ mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
+
+ /* If the group still has routes using it, then defer the delete
+ * operation until the last route using it is deleted.
+ */
+ if (!list_empty(&nh_grp->fib_list))
+ return;
+ mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
+}
+
+static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct nh_notifier_info *info = ptr;
+ struct mlxsw_sp_router *router;
+ int err = 0;
+
+ router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
+ err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
+ if (err)
+ goto out;
+
+ mutex_lock(&router->lock);
+
+ ASSERT_RTNL();
+
+ switch (event) {
+ case NEXTHOP_EVENT_REPLACE:
+ err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
+ break;
+ case NEXTHOP_EVENT_DEL:
+ mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
+ break;
+ default:
+ break;
}
+
+ mutex_unlock(&router->lock);
+
+out:
+ return notifier_from_errno(err);
}
static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
@@ -4004,46 +4689,102 @@ static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
}
+static int
+mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
+ struct mlxsw_sp_nexthop_group_info *nhgi;
+ struct mlxsw_sp_nexthop *nh;
+ int err, i;
+
+ nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
+ if (!nhgi)
+ return -ENOMEM;
+ nh_grp->nhgi = nhgi;
+ nhgi->nh_grp = nh_grp;
+ nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
+ nhgi->count = nhs;
+ for (i = 0; i < nhgi->count; i++) {
+ struct fib_nh *fib_nh;
+
+ nh = &nhgi->nexthops[i];
+ fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
+ err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
+ if (err)
+ goto err_nexthop4_init;
+ }
+ err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_group_refresh;
+
+ return 0;
+
+err_group_refresh:
+ i = nhgi->count;
+err_nexthop4_init:
+ for (i--; i >= 0; i--) {
+ nh = &nhgi->nexthops[i];
+ mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
+ }
+ kfree(nhgi);
+ return err;
+}
+
+static void
+mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ int i;
+
+ for (i = nhgi->count - 1; i >= 0; i--) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
+
+ mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
+ }
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(nhgi->adj_index_valid);
+ kfree(nhgi);
+}
+
static struct mlxsw_sp_nexthop_group *
mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
{
- unsigned int nhs = fib_info_num_path(fi);
struct mlxsw_sp_nexthop_group *nh_grp;
- struct mlxsw_sp_nexthop *nh;
- struct fib_nh *fib_nh;
- int i;
int err;
- nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL);
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
if (!nh_grp)
return ERR_PTR(-ENOMEM);
- nh_grp->priv = fi;
+ INIT_LIST_HEAD(&nh_grp->vr_list);
+ err = rhashtable_init(&nh_grp->vr_ht,
+ &mlxsw_sp_nexthop_group_vr_ht_params);
+ if (err)
+ goto err_nexthop_group_vr_ht_init;
INIT_LIST_HEAD(&nh_grp->fib_list);
- nh_grp->neigh_tbl = &arp_tbl;
-
- nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
- nh_grp->count = nhs;
+ nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
+ nh_grp->ipv4.fi = fi;
fib_info_hold(fi);
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
- fib_nh = fib_info_nh(fi, i);
- err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
- if (err)
- goto err_nexthop4_init;
- }
+
+ err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_nexthop_group_info_init;
+
err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
if (err)
goto err_nexthop_group_insert;
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+
+ nh_grp->can_destroy = true;
+
return nh_grp;
err_nexthop_group_insert:
-err_nexthop4_init:
- for (i--; i >= 0; i--) {
- nh = &nh_grp->nexthops[i];
- mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
- }
+ mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
+err_nexthop_group_info_init:
fib_info_put(fi);
+ rhashtable_destroy(&nh_grp->vr_ht);
+err_nexthop_group_vr_ht_init:
kfree(nh_grp);
return ERR_PTR(err);
}
@@ -4052,17 +4793,13 @@ static void
mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- struct mlxsw_sp_nexthop *nh;
- int i;
-
+ if (!nh_grp->can_destroy)
+ return;
mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
- for (i = 0; i < nh_grp->count; i++) {
- nh = &nh_grp->nexthops[i];
- mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
- }
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
- WARN_ON_ONCE(nh_grp->adj_index_valid);
- fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
+ mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
+ fib_info_put(nh_grp->ipv4.fi);
+ WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
+ rhashtable_destroy(&nh_grp->vr_ht);
kfree(nh_grp);
}
@@ -4072,12 +4809,21 @@ static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_nexthop_group *nh_grp;
+ if (fi->nh) {
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
+ fi->nh->id);
+ if (WARN_ON_ONCE(!nh_grp))
+ return -EINVAL;
+ goto out;
+ }
+
nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
if (!nh_grp) {
nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
if (IS_ERR(nh_grp))
return PTR_ERR(nh_grp);
}
+out:
list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
fib_entry->nh_group = nh_grp;
return 0;
@@ -4091,6 +4837,12 @@ static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
list_del(&fib_entry->nexthop_group_node);
if (!list_empty(&nh_grp->fib_list))
return;
+
+ if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
+ mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
+ return;
+ }
+
mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
}
@@ -4120,9 +4872,9 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
- return !!nh_group->adj_index_valid;
+ return !!nh_group->nhgi->adj_index_valid;
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
- return !!nh_group->nh_rif;
+ return !!nh_group->nhgi->nh_rif;
case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
@@ -4138,8 +4890,8 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
{
int i;
- for (i = 0; i < nh_grp->count; i++) {
- struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+ for (i = 0; i < nh_grp->nhgi->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
struct fib6_info *rt = mlxsw_sp_rt6->rt;
if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
@@ -4156,7 +4908,6 @@ static void
mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
int dst_len = fib_entry->fib_node->key.prefix_len;
struct mlxsw_sp_fib4_entry *fib4_entry;
@@ -4166,7 +4917,7 @@ mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
common);
- fri.fi = fi;
+ fri.fi = fib4_entry->fi;
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
@@ -4181,7 +4932,6 @@ static void
mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
int dst_len = fib_entry->fib_node->key.prefix_len;
struct mlxsw_sp_fib4_entry *fib4_entry;
@@ -4189,7 +4939,7 @@ mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
common);
- fri.fi = fi;
+ fri.fi = fib4_entry->fi;
fri.tb_id = fib4_entry->tb_id;
fri.dst = cpu_to_be32(*p_dst);
fri.dst_len = dst_len;
@@ -4264,13 +5014,14 @@ mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
switch (op) {
- case MLXSW_REG_RALUE_OP_WRITE_WRITE:
+ case MLXSW_SP_FIB_ENTRY_OP_WRITE:
+ case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
break;
- case MLXSW_REG_RALUE_OP_WRITE_DELETE:
+ case MLXSW_SP_FIB_ENTRY_OP_DELETE:
mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
break;
default:
@@ -4278,33 +5029,133 @@ mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
}
}
+struct mlxsw_sp_fib_entry_op_ctx_basic {
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+};
+
static void
-mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
- const struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_sp_l3proto proto,
+ enum mlxsw_sp_fib_entry_op op,
+ u16 virtual_router, u8 prefix_len,
+ unsigned char *addr,
+ struct mlxsw_sp_fib_entry_priv *priv)
{
- struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
- enum mlxsw_reg_ralxx_protocol proto;
- u32 *p_dip;
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+ enum mlxsw_reg_ralxx_protocol ralxx_proto;
+ char *ralue_pl = op_ctx_basic->ralue_pl;
+ enum mlxsw_reg_ralue_op ralue_op;
- proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
+ ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
- switch (fib->proto) {
+ switch (op) {
+ case MLXSW_SP_FIB_ENTRY_OP_WRITE:
+ case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
+ ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
+ break;
+ case MLXSW_SP_FIB_ENTRY_OP_DELETE:
+ ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ switch (proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- p_dip = (u32 *) fib_entry->fib_node->key.addr;
- mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
- fib_entry->fib_node->key.prefix_len,
- *p_dip);
+ mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
+ virtual_router, prefix_len, (u32 *) addr);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
- fib_entry->fib_node->key.prefix_len,
- fib_entry->fib_node->key.addr);
+ mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
+ virtual_router, prefix_len, addr);
break;
}
}
-static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
+static void
+mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u32 adjacency_index, u16 ecmp_size)
+{
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+
+ mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
+ trap_id, adjacency_index, ecmp_size);
+}
+
+static void
+mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u16 local_erif)
+{
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+
+ mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
+ trap_id, local_erif);
+}
+
+static void
+mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
+{
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+
+ mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
+}
+
+static void
+mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ u32 tunnel_ptr)
+{
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+
+ mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
+}
+
+static int
+mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ bool *postponed_for_bulk)
+{
+ struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
+ op_ctx_basic->ralue_pl);
+}
+
+static bool
+mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
+{
+ return true;
+}
+
+static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_sp_fib_entry_op op)
+{
+ struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
+
+ mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
+ fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
+ fib_entry->fib_node->key.prefix_len,
+ fib_entry->fib_node->key.addr,
+ fib_entry->priv);
+}
+
+int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ const struct mlxsw_sp_router_ll_ops *ll_ops)
+{
+ bool postponed_for_bulk = false;
+ int err;
+
+ err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
+ if (!postponed_for_bulk)
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
+ return err;
+}
+
+static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp)
{
enum mlxsw_reg_ratr_trap_action trap_action;
char ratr_pl[MLXSW_REG_RATR_LEN];
@@ -4318,11 +5169,13 @@ static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
if (err)
return err;
- trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS;
+ trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
MLXSW_REG_RATR_TYPE_ETHERNET,
- mlxsw_sp->router->adj_discard_index, rif_index);
+ mlxsw_sp->router->adj_discard_index,
+ mlxsw_sp->router->lb_rif_index);
mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
+ mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
if (err)
goto err_ratr_write;
@@ -4338,11 +5191,13 @@ err_ratr_write:
}
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
- char ralue_pl[MLXSW_REG_RALUE_LEN];
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
enum mlxsw_reg_ralue_trap_action trap_action;
u16 trap_id = 0;
u32 adjacency_index = 0;
@@ -4355,12 +5210,10 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
*/
if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
- adjacency_index = fib_entry->nh_group->adj_index;
- ecmp_size = fib_entry->nh_group->ecmp_size;
- } else if (!nh_group->adj_index_valid && nh_group->count &&
- nh_group->nh_rif) {
- err = mlxsw_sp_adj_discard_write(mlxsw_sp,
- nh_group->nh_rif->rif_index);
+ adjacency_index = nhgi->adj_index;
+ ecmp_size = nhgi->ecmp_size;
+ } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
+ err = mlxsw_sp_adj_discard_write(mlxsw_sp);
if (err)
return err;
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
@@ -4371,19 +5224,20 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
- adjacency_index, ecmp_size);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
+ adjacency_index, ecmp_size);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
- struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+ struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
enum mlxsw_reg_ralue_trap_action trap_action;
- char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id = 0;
u16 rif_index = 0;
@@ -4395,58 +5249,62 @@ static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
}
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
- rif_index);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
- char ralue_pl[MLXSW_REG_RALUE_LEN];
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_ip2me_pack(op_ctx);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
- char ralue_pl[MLXSW_REG_RALUE_LEN];
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int
mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
enum mlxsw_reg_ralue_trap_action trap_action;
- char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id;
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int
mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
const struct mlxsw_sp_ipip_ops *ipip_ops;
@@ -4454,52 +5312,53 @@ mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
return -EINVAL;
ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
- return ipip_ops->fib_entry_op(mlxsw_sp, ipip_entry, op,
- fib_entry->decap.tunnel_index);
+ return ipip_ops->fib_entry_op(mlxsw_sp, ll_ops, op_ctx, ipip_entry, op,
+ fib_entry->decap.tunnel_index, fib_entry->priv);
}
static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
- char ralue_pl[MLXSW_REG_RALUE_LEN];
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
- mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
- mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
- fib_entry->decap.tunnel_index);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+ mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
+ ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
+ fib_entry->decap.tunnel_index);
+ return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
- return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
- return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
- return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
- return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
- return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
- op);
+ return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
- return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
- fib_entry, op);
+ return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
- return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
}
return -EINVAL;
}
static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
- enum mlxsw_reg_ralue_op op)
+ enum mlxsw_sp_fib_entry_op op)
{
- int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
+ int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
if (err)
return err;
@@ -4509,18 +5368,35 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
return err;
}
+static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ bool is_new)
+{
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
+ is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
+ MLXSW_SP_FIB_ENTRY_OP_UPDATE);
+}
+
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
- MLXSW_REG_RALUE_OP_WRITE_WRITE);
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
+
+ mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
+ return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
}
static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
- return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
- MLXSW_REG_RALUE_OP_WRITE_DELETE);
+ const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
+
+ if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
+ return 0;
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
+ MLXSW_SP_FIB_ENTRY_OP_DELETE);
}
static int
@@ -4528,17 +5404,17 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
const struct fib_entry_notifier_info *fen_info,
struct mlxsw_sp_fib_entry *fib_entry)
{
- struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
+ struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
struct mlxsw_sp_router *router = mlxsw_sp->router;
u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
+ int ifindex = nhgi->nexthops[0].ifindex;
struct mlxsw_sp_ipip_entry *ipip_entry;
- struct fib_info *fi = fen_info->fi;
switch (fen_info->type) {
case RTN_LOCAL:
- ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
- MLXSW_SP_L3_PROTO_IPV4, dip);
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
+ MLXSW_SP_L3_PROTO_IPV4, dip);
if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
@@ -4571,7 +5447,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
return 0;
case RTN_UNICAST:
- if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
+ if (nhgi->gateway)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
else
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
@@ -4608,15 +5484,27 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib4_entry->common;
- err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
- if (err)
- goto err_fib4_entry_type_set;
+ fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
+ if (IS_ERR(fib_entry->priv)) {
+ err = PTR_ERR(fib_entry->priv);
+ goto err_fib_entry_priv_create;
+ }
err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
if (err)
goto err_nexthop4_group_get;
- fib4_entry->prio = fen_info->fi->fib_priority;
+ err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
+ fib_node->fib);
+ if (err)
+ goto err_nexthop_group_vr_link;
+
+ err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
+ if (err)
+ goto err_fib4_entry_type_set;
+
+ fib4_entry->fi = fen_info->fi;
+ fib_info_hold(fib4_entry->fi);
fib4_entry->tb_id = fen_info->tb_id;
fib4_entry->type = fen_info->type;
fib4_entry->tos = fen_info->tos;
@@ -4625,9 +5513,13 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
return fib4_entry;
-err_nexthop4_group_get:
- mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib_entry);
err_fib4_entry_type_set:
+ mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
+err_nexthop_group_vr_link:
+ mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
+err_nexthop4_group_get:
+ mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
+err_fib_entry_priv_create:
kfree(fib4_entry);
return ERR_PTR(err);
}
@@ -4635,8 +5527,14 @@ err_fib4_entry_type_set:
static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib4_entry *fib4_entry)
{
- mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
+ struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
+
+ fib_info_put(fib4_entry->fi);
mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
+ mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
+ fib_node->fib);
+ mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
+ mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
kfree(fib4_entry);
}
@@ -4665,8 +5563,7 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
if (fib4_entry->tb_id == fen_info->tb_id &&
fib4_entry->tos == fen_info->tos &&
fib4_entry->type == fen_info->type &&
- mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
- fen_info->fi)
+ fib4_entry->fi == fen_info->fi)
return fib4_entry;
return NULL;
@@ -4875,14 +5772,16 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
+ bool is_new = !fib_node->fib_entry;
int err;
fib_node->fib_entry = fib_entry;
- err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
if (err)
goto err_fib_entry_update;
@@ -4893,14 +5792,25 @@ err_fib_entry_update:
return err;
}
-static void
-mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
+static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
+ int err;
- mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+ err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
fib_node->fib_entry = NULL;
+ return err;
+}
+
+static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
+
+ mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
+ __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
}
static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
@@ -4922,6 +5832,7 @@ static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
static int
mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
const struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
@@ -4955,7 +5866,7 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
goto err_fib_node_entry_link;
@@ -4980,23 +5891,26 @@ err_fib4_entry_create:
return err;
}
-static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
- struct fib_entry_notifier_info *fen_info)
+static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct fib_entry_notifier_info *fen_info)
{
struct mlxsw_sp_fib4_entry *fib4_entry;
struct mlxsw_sp_fib_node *fib_node;
+ int err;
if (mlxsw_sp->router->aborted)
- return;
+ return 0;
fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
if (!fib4_entry)
- return;
+ return 0;
fib_node = fib4_entry->common.fib_node;
- mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
+ err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return err;
}
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
@@ -5047,7 +5961,8 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
{
struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
- fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
+ if (!mlxsw_sp_rt6->rt->nh)
+ fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
kfree(mlxsw_sp_rt6);
}
@@ -5081,51 +5996,6 @@ static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
}
-static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop_group *nh_grp,
- struct mlxsw_sp_nexthop *nh,
- const struct fib6_info *rt)
-{
- const struct mlxsw_sp_ipip_ops *ipip_ops;
- struct mlxsw_sp_ipip_entry *ipip_entry;
- struct net_device *dev = rt->fib6_nh->fib_nh_dev;
- struct mlxsw_sp_rif *rif;
- int err;
-
- ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
- if (ipip_entry) {
- ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
- if (ipip_ops->can_offload(mlxsw_sp, dev,
- MLXSW_SP_L3_PROTO_IPV6)) {
- nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
- return 0;
- }
- }
-
- nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!rif)
- return 0;
- mlxsw_sp_nexthop_rif_init(nh, rif);
-
- err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
- if (err)
- goto err_nexthop_neigh_init;
-
- return 0;
-
-err_nexthop_neigh_init:
- mlxsw_sp_nexthop_rif_fini(nh);
- return err;
-}
-
-static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh)
-{
- mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
-}
-
static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
struct mlxsw_sp_nexthop *nh,
@@ -5133,9 +6003,12 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
{
struct net_device *dev = rt->fib6_nh->fib_nh_dev;
- nh->nh_grp = nh_grp;
+ nh->nhgi = nh_grp->nhgi;
nh->nh_weight = rt->fib6_nh->fib_nh_weight;
memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
+#if IS_ENABLED(CONFIG_IPV6)
+ nh->neigh_tbl = &nd_tbl;
+#endif
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
@@ -5144,13 +6017,13 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
return 0;
nh->ifindex = dev->ifindex;
- return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
+ return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
}
static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
list_del(&nh->router_list_node);
mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
}
@@ -5162,51 +6035,105 @@ static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
}
-static struct mlxsw_sp_nexthop_group *
-mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib6_entry *fib6_entry)
+static int
+mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
{
- struct mlxsw_sp_nexthop_group *nh_grp;
+ struct mlxsw_sp_nexthop_group_info *nhgi;
struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
struct mlxsw_sp_nexthop *nh;
- int i = 0;
- int err;
+ int err, i;
- nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6),
- GFP_KERNEL);
- if (!nh_grp)
- return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&nh_grp->fib_list);
-#if IS_ENABLED(CONFIG_IPV6)
- nh_grp->neigh_tbl = &nd_tbl;
-#endif
+ nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
+ GFP_KERNEL);
+ if (!nhgi)
+ return -ENOMEM;
+ nh_grp->nhgi = nhgi;
+ nhgi->nh_grp = nh_grp;
mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
struct mlxsw_sp_rt6, list);
- nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
- nh_grp->count = fib6_entry->nrt6;
- for (i = 0; i < nh_grp->count; i++) {
+ nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
+ nhgi->count = fib6_entry->nrt6;
+ for (i = 0; i < nhgi->count; i++) {
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- nh = &nh_grp->nexthops[i];
+ nh = &nhgi->nexthops[i];
err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
if (err)
goto err_nexthop6_init;
mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
}
+ nh_grp->nhgi = nhgi;
+ err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ if (err)
+ goto err_group_refresh;
+
+ return 0;
+
+err_group_refresh:
+ i = nhgi->count;
+err_nexthop6_init:
+ for (i--; i >= 0; i--) {
+ nh = &nhgi->nexthops[i];
+ mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
+ }
+ kfree(nhgi);
+ return err;
+}
+
+static void
+mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
+ int i;
+
+ for (i = nhgi->count - 1; i >= 0; i--) {
+ struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
+
+ mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
+ }
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(nhgi->adj_index_valid);
+ kfree(nhgi);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ int err;
+
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&nh_grp->vr_list);
+ err = rhashtable_init(&nh_grp->vr_ht,
+ &mlxsw_sp_nexthop_group_vr_ht_params);
+ if (err)
+ goto err_nexthop_group_vr_ht_init;
+ INIT_LIST_HEAD(&nh_grp->fib_list);
+ nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
+
+ err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
+ if (err)
+ goto err_nexthop_group_info_init;
err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
if (err)
goto err_nexthop_group_insert;
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ nh_grp->can_destroy = true;
+
return nh_grp;
err_nexthop_group_insert:
-err_nexthop6_init:
- for (i--; i >= 0; i--) {
- nh = &nh_grp->nexthops[i];
- mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
- }
+ mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
+err_nexthop_group_info_init:
+ rhashtable_destroy(&nh_grp->vr_ht);
+err_nexthop_group_vr_ht_init:
kfree(nh_grp);
return ERR_PTR(err);
}
@@ -5215,24 +6142,29 @@ static void
mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp)
{
- struct mlxsw_sp_nexthop *nh;
- int i = nh_grp->count;
-
+ if (!nh_grp->can_destroy)
+ return;
mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
- for (i--; i >= 0; i--) {
- nh = &nh_grp->nexthops[i];
- mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
- }
- mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
- WARN_ON(nh_grp->adj_index_valid);
+ mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
+ WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
+ rhashtable_destroy(&nh_grp->vr_ht);
kfree(nh_grp);
}
static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib6_entry *fib6_entry)
{
+ struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
struct mlxsw_sp_nexthop_group *nh_grp;
+ if (rt->nh) {
+ nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
+ rt->nh->id);
+ if (WARN_ON_ONCE(!nh_grp))
+ return -EINVAL;
+ goto out;
+ }
+
nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
if (!nh_grp) {
nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
@@ -5240,15 +6172,16 @@ static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(nh_grp);
}
- list_add_tail(&fib6_entry->common.nexthop_group_node,
- &nh_grp->fib_list);
- fib6_entry->common.nh_group = nh_grp;
-
/* The route and the nexthop are described by the same struct, so we
* need to the update the nexthop offload indication for the new route.
*/
__mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
+out:
+ list_add_tail(&fib6_entry->common.nexthop_group_node,
+ &nh_grp->fib_list);
+ fib6_entry->common.nh_group = nh_grp;
+
return 0;
}
@@ -5260,16 +6193,24 @@ static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
list_del(&fib_entry->nexthop_group_node);
if (!list_empty(&nh_grp->fib_list))
return;
+
+ if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
+ mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
+ return;
+ }
+
mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
}
-static int
-mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib6_entry *fib6_entry)
+static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
{
struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
+ struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
int err;
+ mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
fib6_entry->common.nh_group = NULL;
list_del(&fib6_entry->common.nexthop_group_node);
@@ -5277,11 +6218,17 @@ mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_nexthop6_group_get;
+ err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
+ fib_node->fib);
+ if (err)
+ goto err_nexthop_group_vr_link;
+
/* In case this entry is offloaded, then the adjacency index
* currently associated with it in the device's table is that
* of the old group. Start using the new one instead.
*/
- err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
+ err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
+ &fib6_entry->common, false);
if (err)
goto err_fib_entry_update;
@@ -5291,16 +6238,21 @@ mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
return 0;
err_fib_entry_update:
+ mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
+ fib_node->fib);
+err_nexthop_group_vr_link:
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
err_nexthop6_group_get:
list_add_tail(&fib6_entry->common.nexthop_group_node,
&old_nh_grp->fib_list);
fib6_entry->common.nh_group = old_nh_grp;
+ mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
return err;
}
static int
mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -5318,7 +6270,7 @@ mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
fib6_entry->nrt6++;
}
- err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
+ err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
if (err)
goto err_nexthop6_group_update;
@@ -5339,6 +6291,7 @@ err_rt6_create:
static void
mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib6_entry *fib6_entry,
struct fib6_info **rt_arr, unsigned int nrt6)
{
@@ -5356,26 +6309,20 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
- mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
+ mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
}
static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
const struct fib6_info *rt)
{
- /* Packets hitting RTF_REJECT routes need to be discarded by the
- * stack. We can rely on their destination device not having a
- * RIF (it's the loopback device) and can thus use action type
- * local, which will cause them to be trapped with a lower
- * priority than packets that need to be locally received.
- */
if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
else if (rt->fib6_type == RTN_BLACKHOLE)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
else if (rt->fib6_flags & RTF_REJECT)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
- else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
+ else if (fib_entry->nh_group->nhgi->gateway)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
else
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
@@ -5409,6 +6356,12 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
return ERR_PTR(-ENOMEM);
fib_entry = &fib6_entry->common;
+ fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
+ if (IS_ERR(fib_entry->priv)) {
+ err = PTR_ERR(fib_entry->priv);
+ goto err_fib_entry_priv_create;
+ }
+
INIT_LIST_HEAD(&fib6_entry->rt6_list);
for (i = 0; i < nrt6; i++) {
@@ -5421,16 +6374,23 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
fib6_entry->nrt6++;
}
- mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
-
err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
if (err)
goto err_nexthop6_group_get;
+ err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
+ fib_node->fib);
+ if (err)
+ goto err_nexthop_group_vr_link;
+
+ mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
+
fib_entry->fib_node = fib_node;
return fib6_entry;
+err_nexthop_group_vr_link:
+ mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
err_nexthop6_group_get:
i = nrt6;
err_rt6_create:
@@ -5441,6 +6401,8 @@ err_rt6_create:
list_del(&mlxsw_sp_rt6->list);
mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
}
+ mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
+err_fib_entry_priv_create:
kfree(fib6_entry);
return ERR_PTR(err);
}
@@ -5448,9 +6410,14 @@ err_rt6_create:
static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib6_entry *fib6_entry)
{
+ struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
+
+ mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
+ fib_node->fib);
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
WARN_ON(fib6_entry->nrt6);
+ mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
kfree(fib6_entry);
}
@@ -5508,8 +6475,8 @@ static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
}
static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info **rt_arr,
- unsigned int nrt6)
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct fib6_info **rt_arr, unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
struct mlxsw_sp_fib_entry *replaced;
@@ -5548,7 +6515,7 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
}
replaced = fib_node->fib_entry;
- err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
+ err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
if (err)
goto err_fib_node_entry_link;
@@ -5572,8 +6539,8 @@ err_fib6_entry_create:
}
static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info **rt_arr,
- unsigned int nrt6)
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct fib6_info **rt_arr, unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
@@ -5604,8 +6571,7 @@ static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
fib6_entry = container_of(fib_node->fib_entry,
struct mlxsw_sp_fib6_entry, common);
- err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
- nrt6);
+ err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
if (err)
goto err_fib6_entry_nexthop_add;
@@ -5616,19 +6582,20 @@ err_fib6_entry_nexthop_add:
return err;
}
-static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info **rt_arr,
- unsigned int nrt6)
+static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct fib6_info **rt_arr, unsigned int nrt6)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
struct fib6_info *rt = rt_arr[0];
+ int err;
if (mlxsw_sp->router->aborted)
- return;
+ return 0;
if (mlxsw_sp_fib6_rt_should_ignore(rt))
- return;
+ return 0;
/* Multipath routes are first added to the FIB trie and only then
* notified. If we vetoed the addition, we will get a delete
@@ -5637,58 +6604,66 @@ static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
*/
fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
if (!fib6_entry)
- return;
+ return 0;
/* If not all the nexthops are deleted, then only reduce the nexthop
* group.
*/
if (nrt6 != fib6_entry->nrt6) {
- mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
- nrt6);
- return;
+ mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
+ return 0;
}
fib_node = fib6_entry->common.fib_node;
- mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
+ err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
+ return err;
}
static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_reg_ralxx_protocol proto,
+ enum mlxsw_sp_l3proto proto,
u8 tree_id)
{
- char ralta_pl[MLXSW_REG_RALTA_LEN];
- char ralst_pl[MLXSW_REG_RALST_LEN];
+ const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
+ enum mlxsw_reg_ralxx_protocol ralxx_proto =
+ (enum mlxsw_reg_ralxx_protocol) proto;
+ struct mlxsw_sp_fib_entry_priv *priv;
+ char xralta_pl[MLXSW_REG_XRALTA_LEN];
+ char xralst_pl[MLXSW_REG_XRALST_LEN];
int i, err;
- mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+ mlxsw_reg_xralta_pack(xralta_pl, true, ralxx_proto, tree_id);
+ err = ll_ops->ralta_write(mlxsw_sp, xralta_pl);
if (err)
return err;
- mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+ mlxsw_reg_xralst_pack(xralst_pl, 0xff, tree_id);
+ err = ll_ops->ralst_write(mlxsw_sp, xralst_pl);
if (err)
return err;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
- char raltb_pl[MLXSW_REG_RALTB_LEN];
- char ralue_pl[MLXSW_REG_RALUE_LEN];
+ char xraltb_pl[MLXSW_REG_XRALTB_LEN];
- mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
- raltb_pl);
+ mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
+ mlxsw_reg_xraltb_pack(xraltb_pl, vr->id, ralxx_proto, tree_id);
+ err = ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
if (err)
return err;
- mlxsw_reg_ralue_pack(ralue_pl, proto,
- MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
- mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
- ralue_pl);
+ priv = mlxsw_sp_fib_entry_priv_create(ll_ops);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ ll_ops->fib_entry_pack(op_ctx, proto, MLXSW_SP_FIB_ENTRY_OP_WRITE,
+ vr->id, 0, NULL, priv);
+ ll_ops->fib_entry_act_ip2me_pack(op_ctx);
+ err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, NULL);
+ mlxsw_sp_fib_entry_priv_put(priv);
if (err)
return err;
}
@@ -5784,7 +6759,7 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
{
- enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
+ enum mlxsw_sp_l3proto proto = MLXSW_SP_L3_PROTO_IPV4;
int err;
err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
@@ -5796,7 +6771,7 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
* packets that don't match any routes are trapped to the CPU.
*/
- proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
+ proto = MLXSW_SP_L3_PROTO_IPV6;
return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
MLXSW_SP_LPM_TREE_MIN + 1);
}
@@ -5901,15 +6876,15 @@ static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
}
-struct mlxsw_sp_fib6_event_work {
+struct mlxsw_sp_fib6_event {
struct fib6_info **rt_arr;
unsigned int nrt6;
};
-struct mlxsw_sp_fib_event_work {
- struct work_struct work;
+struct mlxsw_sp_fib_event {
+ struct list_head list; /* node in fib queue */
union {
- struct mlxsw_sp_fib6_event_work fib6_work;
+ struct mlxsw_sp_fib6_event fib6_event;
struct fib_entry_notifier_info fen_info;
struct fib_rule_notifier_info fr_info;
struct fib_nh_notifier_info fnh_info;
@@ -5918,11 +6893,12 @@ struct mlxsw_sp_fib_event_work {
};
struct mlxsw_sp *mlxsw_sp;
unsigned long event;
+ int family;
};
static int
-mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
- struct fib6_entry_notifier_info *fen6_info)
+mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
+ struct fib6_entry_notifier_info *fen6_info)
{
struct fib6_info *rt = fen6_info->rt;
struct fib6_info **rt_arr;
@@ -5936,8 +6912,8 @@ mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
if (!rt_arr)
return -ENOMEM;
- fib6_work->rt_arr = rt_arr;
- fib6_work->nrt6 = nrt6;
+ fib6_event->rt_arr = rt_arr;
+ fib6_event->nrt6 = nrt6;
rt_arr[0] = rt;
fib6_info_hold(rt);
@@ -5959,170 +6935,232 @@ mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
}
static void
-mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
+mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
{
int i;
- for (i = 0; i < fib6_work->nrt6; i++)
- mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
- kfree(fib6_work->rt_arr);
+ for (i = 0; i < fib6_event->nrt6; i++)
+ mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
+ kfree(fib6_event->rt_arr);
}
-static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
+static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_event *fib_event)
{
- struct mlxsw_sp_fib_event_work *fib_work =
- container_of(work, struct mlxsw_sp_fib_event_work, work);
- struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
- mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
- &fib_work->fen_info);
- if (err)
+ err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
+ if (err) {
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
mlxsw_sp_router_fib_abort(mlxsw_sp);
- fib_info_put(fib_work->fen_info.fi);
+ }
+ fib_info_put(fib_event->fen_info.fi);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
- fib_info_put(fib_work->fen_info.fi);
+ err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
+ if (err)
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
+ fib_info_put(fib_event->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
- mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
- fib_work->fnh_info.fib_nh);
- fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
+ mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
+ fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
break;
}
- mutex_unlock(&mlxsw_sp->router->lock);
- kfree(fib_work);
}
-static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
+static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ struct mlxsw_sp_fib_event *fib_event)
{
- struct mlxsw_sp_fib_event_work *fib_work =
- container_of(work, struct mlxsw_sp_fib_event_work, work);
- struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
int err;
- mutex_lock(&mlxsw_sp->router->lock);
mlxsw_sp_span_respin(mlxsw_sp);
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
- err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
- fib_work->fib6_work.rt_arr,
- fib_work->fib6_work.nrt6);
- if (err)
+ err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
+ fib_event->fib6_event.nrt6);
+ if (err) {
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
mlxsw_sp_router_fib_abort(mlxsw_sp);
- mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
+ }
+ mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
break;
case FIB_EVENT_ENTRY_APPEND:
- err = mlxsw_sp_router_fib6_append(mlxsw_sp,
- fib_work->fib6_work.rt_arr,
- fib_work->fib6_work.nrt6);
- if (err)
+ err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
+ fib_event->fib6_event.nrt6);
+ if (err) {
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
mlxsw_sp_router_fib_abort(mlxsw_sp);
- mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
+ }
+ mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fib6_del(mlxsw_sp,
- fib_work->fib6_work.rt_arr,
- fib_work->fib6_work.nrt6);
- mlxsw_sp_router_fib6_work_fini(&fib_work->fib6_work);
+ err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
+ fib_event->fib6_event.nrt6);
+ if (err)
+ mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
+ mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
break;
}
- mutex_unlock(&mlxsw_sp->router->lock);
- kfree(fib_work);
}
-static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
+static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_event *fib_event)
{
- struct mlxsw_sp_fib_event_work *fib_work =
- container_of(work, struct mlxsw_sp_fib_event_work, work);
- struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
bool replace;
int err;
rtnl_lock();
mutex_lock(&mlxsw_sp->router->lock);
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
- replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
+ replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
- err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
- replace);
+ err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
- mr_cache_put(fib_work->men_info.mfc);
+ mr_cache_put(fib_event->men_info.mfc);
break;
case FIB_EVENT_ENTRY_DEL:
- mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
- mr_cache_put(fib_work->men_info.mfc);
+ mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
+ mr_cache_put(fib_event->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
- &fib_work->ven_info);
+ &fib_event->ven_info);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
- dev_put(fib_work->ven_info.dev);
+ dev_put(fib_event->ven_info.dev);
break;
case FIB_EVENT_VIF_DEL:
- mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
- &fib_work->ven_info);
- dev_put(fib_work->ven_info.dev);
+ mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
+ dev_put(fib_event->ven_info.dev);
break;
}
mutex_unlock(&mlxsw_sp->router->lock);
rtnl_unlock();
- kfree(fib_work);
}
-static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
+static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
+{
+ struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
+ struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
+ struct mlxsw_sp_fib_event *next_fib_event;
+ struct mlxsw_sp_fib_event *fib_event;
+ int last_family = AF_UNSPEC;
+ LIST_HEAD(fib_event_queue);
+
+ spin_lock_bh(&router->fib_event_queue_lock);
+ list_splice_init(&router->fib_event_queue, &fib_event_queue);
+ spin_unlock_bh(&router->fib_event_queue_lock);
+
+ /* Router lock is held here to make sure per-instance
+ * operation context is not used in between FIB4/6 events
+ * processing.
+ */
+ mutex_lock(&router->lock);
+ mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
+ list_for_each_entry_safe(fib_event, next_fib_event,
+ &fib_event_queue, list) {
+ /* Check if the next entry in the queue exists and it is
+ * of the same type (family and event) as the currect one.
+ * In that case it is permitted to do the bulking
+ * of multiple FIB entries to a single register write.
+ */
+ op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
+ fib_event->family == next_fib_event->family &&
+ fib_event->event == next_fib_event->event;
+
+ /* In case family of this and the previous entry are different, context
+ * reinitialization is going to be needed now, indicate that.
+ * Note that since last_family is initialized to AF_UNSPEC, this is always
+ * going to happen for the first entry processed in the work.
+ */
+ if (fib_event->family != last_family)
+ op_ctx->initialized = false;
+
+ switch (fib_event->family) {
+ case AF_INET:
+ mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
+ fib_event);
+ break;
+ case AF_INET6:
+ mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
+ fib_event);
+ break;
+ case RTNL_FAMILY_IP6MR:
+ case RTNL_FAMILY_IPMR:
+ /* Unlock here as inside FIBMR the lock is taken again
+ * under RTNL. The per-instance operation context
+ * is not used by FIBMR.
+ */
+ mutex_unlock(&router->lock);
+ mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
+ fib_event);
+ mutex_lock(&router->lock);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ last_family = fib_event->family;
+ kfree(fib_event);
+ cond_resched();
+ }
+ WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
+ mutex_unlock(&router->lock);
+}
+
+static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
struct fib_notifier_info *info)
{
struct fib_entry_notifier_info *fen_info;
struct fib_nh_notifier_info *fnh_info;
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_DEL:
fen_info = container_of(info, struct fib_entry_notifier_info,
info);
- fib_work->fen_info = *fen_info;
+ fib_event->fen_info = *fen_info;
/* Take reference on fib_info to prevent it from being
- * freed while work is queued. Release it afterwards.
+ * freed while event is queued. Release it afterwards.
*/
- fib_info_hold(fib_work->fen_info.fi);
+ fib_info_hold(fib_event->fen_info.fi);
break;
case FIB_EVENT_NH_ADD:
case FIB_EVENT_NH_DEL:
fnh_info = container_of(info, struct fib_nh_notifier_info,
info);
- fib_work->fnh_info = *fnh_info;
- fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
+ fib_event->fnh_info = *fnh_info;
+ fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
break;
}
}
-static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
+static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
struct fib_notifier_info *info)
{
struct fib6_entry_notifier_info *fen6_info;
int err;
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_APPEND:
case FIB_EVENT_ENTRY_DEL:
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
- err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
- fen6_info);
+ err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
+ fen6_info);
if (err)
return err;
break;
@@ -6132,20 +7170,20 @@ static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
}
static void
-mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
+mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
struct fib_notifier_info *info)
{
- switch (fib_work->event) {
+ switch (fib_event->event) {
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_ADD:
case FIB_EVENT_ENTRY_DEL:
- memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
- mr_cache_hold(fib_work->men_info.mfc);
+ memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
+ mr_cache_hold(fib_event->men_info.mfc);
break;
case FIB_EVENT_VIF_ADD:
case FIB_EVENT_VIF_DEL:
- memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
- dev_hold(fib_work->ven_info.dev);
+ memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
+ dev_hold(fib_event->ven_info.dev);
break;
}
}
@@ -6202,7 +7240,7 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct mlxsw_sp_fib_event_work *fib_work;
+ struct mlxsw_sp_fib_event *fib_event;
struct fib_notifier_info *info = ptr;
struct mlxsw_sp_router *router;
int err;
@@ -6234,55 +7272,43 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
return notifier_from_errno(-EINVAL);
}
- if (fen_info->fi->nh) {
- NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
- return notifier_from_errno(-EINVAL);
- }
- } else if (info->family == AF_INET6) {
- struct fib6_entry_notifier_info *fen6_info;
-
- fen6_info = container_of(info,
- struct fib6_entry_notifier_info,
- info);
- if (fen6_info->rt->nh) {
- NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
- return notifier_from_errno(-EINVAL);
- }
}
break;
}
- fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
- if (!fib_work)
+ fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
+ if (!fib_event)
return NOTIFY_BAD;
- fib_work->mlxsw_sp = router->mlxsw_sp;
- fib_work->event = event;
+ fib_event->mlxsw_sp = router->mlxsw_sp;
+ fib_event->event = event;
+ fib_event->family = info->family;
switch (info->family) {
case AF_INET:
- INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
- mlxsw_sp_router_fib4_event(fib_work, info);
+ mlxsw_sp_router_fib4_event(fib_event, info);
break;
case AF_INET6:
- INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
- err = mlxsw_sp_router_fib6_event(fib_work, info);
+ err = mlxsw_sp_router_fib6_event(fib_event, info);
if (err)
goto err_fib_event;
break;
case RTNL_FAMILY_IP6MR:
case RTNL_FAMILY_IPMR:
- INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
- mlxsw_sp_router_fibmr_event(fib_work, info);
+ mlxsw_sp_router_fibmr_event(fib_event, info);
break;
}
- mlxsw_core_schedule_work(&fib_work->work);
+ /* Enqueue the event and trigger the work */
+ spin_lock_bh(&router->fib_event_queue_lock);
+ list_add_tail(&fib_event->list, &router->fib_event_queue);
+ spin_unlock_bh(&router->fib_event_queue_lock);
+ mlxsw_core_schedule_work(&router->fib_event_work);
return NOTIFY_DONE;
err_fib_event:
- kfree(fib_work);
+ kfree(fib_event);
return NOTIFY_BAD;
}
@@ -6831,6 +7857,15 @@ static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
switch (event) {
case NETDEV_UP:
+ if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
+ u16 proto;
+
+ br_vlan_get_proto(l3_dev, &proto);
+ if (proto == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
+ return -EOPNOTSUPP;
+ }
+ }
rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
if (IS_ERR(rif))
return PTR_ERR(rif);
@@ -8057,6 +9092,69 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
+static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
+ .ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
+ .ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
+ .raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
+ .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
+ .fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
+ .fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
+ .fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
+ .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
+ .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
+ .fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
+ .fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
+};
+
+static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
+{
+ size_t max_size = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
+ size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
+
+ if (size > max_size)
+ max_size = size;
+ }
+ router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
+ GFP_KERNEL);
+ if (!router->ll_op_ctx)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
+ return 0;
+}
+
+static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
+{
+ WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
+ kfree(router->ll_op_ctx);
+}
+
+static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 lb_rif_index;
+ int err;
+
+ /* Create a generic loopback RIF associated with the main table
+ * (default VRF). Any table can be used, but the main table exists
+ * anyway, so we do not waste resources.
+ */
+ err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN,
+ &lb_rif_index);
+ if (err)
+ return err;
+
+ mlxsw_sp->router->lb_rif_index = lb_rif_index;
+
+ return 0;
+}
+
+static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
+}
+
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack)
{
@@ -8070,6 +9168,13 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp->router = router;
router->mlxsw_sp = mlxsw_sp;
+ router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = &mlxsw_sp_router_ll_basic_ops;
+ router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
+
+ err = mlxsw_sp_router_ll_op_ctx_init(router);
+ if (err)
+ goto err_ll_op_ctx_init;
+
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -8106,6 +9211,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_vrs_init;
+ err = mlxsw_sp_lb_rif_init(mlxsw_sp);
+ if (err)
+ goto err_lb_rif_init;
+
err = mlxsw_sp_neigh_init(mlxsw_sp);
if (err)
goto err_neigh_init;
@@ -8118,6 +9227,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_dscp_init;
+ INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
+ INIT_LIST_HEAD(&router->fib_event_queue);
+ spin_lock_init(&router->fib_event_queue_lock);
+
router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
err = register_inetaddr_notifier(&router->inetaddr_nb);
if (err)
@@ -8134,6 +9247,14 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_register_netevent_notifier;
+ mlxsw_sp->router->nexthop_nb.notifier_call =
+ mlxsw_sp_nexthop_obj_event;
+ err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->nexthop_nb,
+ extack);
+ if (err)
+ goto err_register_nexthop_notifier;
+
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb,
@@ -8144,6 +9265,9 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_register_fib_notifier:
+ unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->nexthop_nb);
+err_register_nexthop_notifier:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
unregister_inet6addr_notifier(&router->inet6addr_nb);
@@ -8151,10 +9275,13 @@ err_register_inet6addr_notifier:
unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
mlxsw_core_flush_owq();
+ WARN_ON(!list_empty(&router->fib_event_queue));
err_dscp_init:
err_mp_hash_init:
mlxsw_sp_neigh_fini(mlxsw_sp);
err_neigh_init:
+ mlxsw_sp_lb_rif_fini(mlxsw_sp);
+err_lb_rif_init:
mlxsw_sp_vrs_fini(mlxsw_sp);
err_vrs_init:
mlxsw_sp_mr_fini(mlxsw_sp);
@@ -8171,6 +9298,8 @@ err_ipips_init:
err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ mlxsw_sp_router_ll_op_ctx_fini(router);
+err_ll_op_ctx_init:
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
return err;
@@ -8180,11 +9309,15 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb);
+ unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
+ &mlxsw_sp->router->nexthop_nb);
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mlxsw_core_flush_owq();
+ WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
mlxsw_sp_neigh_fini(mlxsw_sp);
+ mlxsw_sp_lb_rif_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
mlxsw_sp_mr_fini(mlxsw_sp);
mlxsw_sp_lpm_fini(mlxsw_sp);
@@ -8193,6 +9326,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_ipips_fini(mlxsw_sp);
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
+ mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 8418dc3ae967..96d8bf7a9a67 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -15,6 +15,26 @@ struct mlxsw_sp_router_nve_decap {
u8 valid:1;
};
+struct mlxsw_sp_fib_entry_op_ctx {
+ u8 bulk_ok:1, /* Indicate to the low-level op it is ok to bulk
+ * the actual entry with the one that is the next
+ * in queue.
+ */
+ initialized:1; /* Bit that the low-level op sets in case
+ * the context priv is initialized.
+ */
+ struct list_head fib_entry_priv_list;
+ unsigned long ll_priv[];
+};
+
+static inline void
+mlxsw_sp_fib_entry_op_ctx_clear(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
+{
+ WARN_ON_ONCE(!list_empty(&op_ctx->fib_entry_priv_list));
+ memset(op_ctx, 0, sizeof(*op_ctx));
+ INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
+}
+
struct mlxsw_sp_router {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif **rifs;
@@ -38,6 +58,7 @@ struct mlxsw_sp_router {
struct list_head nexthop_neighs_list;
struct list_head ipip_list;
bool aborted;
+ struct notifier_block nexthop_nb;
struct notifier_block fib_nb;
struct notifier_block netevent_nb;
struct notifier_block inetaddr_nb;
@@ -48,8 +69,59 @@ struct mlxsw_sp_router {
bool adj_discard_index_valid;
struct mlxsw_sp_router_nve_decap nve_decap_config;
struct mutex lock; /* Protects shared router resources */
+ struct work_struct fib_event_work;
+ struct list_head fib_event_queue;
+ spinlock_t fib_event_queue_lock; /* Protects fib event queue list */
+ /* One set of ops for each protocol: IPv4 and IPv6 */
+ const struct mlxsw_sp_router_ll_ops *proto_ll_ops[MLXSW_SP_L3_PROTO_MAX];
+ struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx;
+ u16 lb_rif_index;
+};
+
+struct mlxsw_sp_fib_entry_priv {
+ refcount_t refcnt;
+ struct list_head list; /* Member in op_ctx->fib_entry_priv_list */
+ unsigned long priv[];
+};
+
+enum mlxsw_sp_fib_entry_op {
+ MLXSW_SP_FIB_ENTRY_OP_WRITE,
+ MLXSW_SP_FIB_ENTRY_OP_UPDATE,
+ MLXSW_SP_FIB_ENTRY_OP_DELETE,
};
+/* Low-level router ops. Basically this is to handle the different
+ * register sets to work with ordinary and XM trees and FIB entries.
+ */
+struct mlxsw_sp_router_ll_ops {
+ int (*ralta_write)(struct mlxsw_sp *mlxsw_sp, char *xralta_pl);
+ int (*ralst_write)(struct mlxsw_sp *mlxsw_sp, char *xralst_pl);
+ int (*raltb_write)(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl);
+ size_t fib_entry_op_ctx_size;
+ size_t fib_entry_priv_size;
+ void (*fib_entry_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_sp_l3proto proto, enum mlxsw_sp_fib_entry_op op,
+ u16 virtual_router, u8 prefix_len, unsigned char *addr,
+ struct mlxsw_sp_fib_entry_priv *priv);
+ void (*fib_entry_act_remote_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u32 adjacency_index, u16 ecmp_size);
+ void (*fib_entry_act_local_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u16 local_erif);
+ void (*fib_entry_act_ip2me_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx);
+ void (*fib_entry_act_ip2me_tun_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ u32 tunnel_ptr);
+ int (*fib_entry_commit)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ bool *postponed_for_bulk);
+ bool (*fib_entry_is_committed)(struct mlxsw_sp_fib_entry_priv *priv);
+};
+
+int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
+ const struct mlxsw_sp_router_ll_ops *ll_ops);
+
struct mlxsw_sp_rif_ipip_lb;
struct mlxsw_sp_rif_ipip_lb_config {
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
@@ -129,6 +201,7 @@ int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
u32 *p_adj_size, u32 *p_adj_hash_index);
struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh);
bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh);
+bool mlxsw_sp_nexthop_is_discard(const struct mlxsw_sp_nexthop *nh);
#define mlxsw_sp_nexthop_for_each(nh, router) \
for (nh = mlxsw_sp_nexthop_next(router, NULL); nh; \
nh = mlxsw_sp_nexthop_next(router, nh))
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 6501ce94ace5..9c4e17607e6a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -41,6 +41,7 @@ struct mlxsw_sp_bridge {
DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
+ const struct mlxsw_sp_bridge_ops *bridge_8021ad_ops;
};
struct mlxsw_sp_bridge_device {
@@ -228,8 +229,14 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
bridge_device->mrouter = br_multicast_router(br_dev);
INIT_LIST_HEAD(&bridge_device->ports_list);
if (vlan_enabled) {
+ u16 proto;
+
bridge->vlan_enabled_exists = true;
- bridge_device->ops = bridge->bridge_8021q_ops;
+ br_vlan_get_proto(br_dev, &proto);
+ if (proto == ETH_P_8021AD)
+ bridge_device->ops = bridge->bridge_8021ad_ops;
+ else
+ bridge_device->ops = bridge->bridge_8021q_ops;
} else {
bridge_device->ops = bridge->bridge_8021d_ops;
}
@@ -757,6 +764,25 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
return -EINVAL;
}
+static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ u16 vlan_proto)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ if (!switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
+ if (WARN_ON(!bridge_device))
+ return -EINVAL;
+
+ netdev_err(bridge_device->dev, "VLAN protocol can't be changed on existing bridge\n");
+ return -EINVAL;
+}
+
static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct switchdev_trans *trans,
struct net_device *orig_dev,
@@ -926,6 +952,11 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
attr->orig_dev,
attr->u.vlan_filtering);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
+ err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
+ attr->u.vlan_protocol);
+ break;
case SWITCHDEV_ATTR_ID_PORT_MROUTER:
err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
attr->orig_dev,
@@ -1129,6 +1160,7 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
u16 old_pvid = mlxsw_sp_port->pvid;
+ u16 proto;
int err;
/* The only valid scenario in which a port-vlan already exists, is if
@@ -1152,7 +1184,8 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
goto err_port_vlan_set;
- err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
+ br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
if (err)
goto err_port_pvid_set;
@@ -1164,7 +1197,7 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
err_port_vlan_bridge_join:
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid, proto);
err_port_pvid_set:
mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
err_port_vlan_set:
@@ -1821,13 +1854,15 @@ mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
{
u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ u16 proto;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return;
mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
+ br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
}
@@ -1975,10 +2010,9 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
}
static int
-mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
- struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_port *mlxsw_sp_port,
- struct netlink_ext_ack *extack)
+mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
{
if (is_vlan_dev(bridge_port->dev)) {
NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
@@ -1992,13 +2026,30 @@ mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
return 0;
}
+static int
+mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
+{
+ return mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
+ extack);
+}
+
+static void
+mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ /* Make sure untagged frames are allowed to ingress */
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
+ ETH_P_8021Q);
+}
+
static void
mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port)
{
- /* Make sure untagged frames are allowed to ingress */
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
+ mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
}
static int
@@ -2246,6 +2297,57 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
.fid_vid = mlxsw_sp_bridge_8021d_fid_vid,
};
+static int
+mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, false);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
+ extack);
+ if (err)
+ goto err_bridge_vlan_aware_port_join;
+
+ return 0;
+
+err_bridge_vlan_aware_port_join:
+ mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
+ return err;
+}
+
+static void
+mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
+ mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
+}
+
+static int
+mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "VXLAN is not supported with 802.1ad");
+ return -EOPNOTSUPP;
+}
+
+static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021ad_ops = {
+ .port_join = mlxsw_sp_bridge_8021ad_port_join,
+ .port_leave = mlxsw_sp_bridge_8021ad_port_leave,
+ .vxlan_join = mlxsw_sp_bridge_8021ad_vxlan_join,
+ .fid_get = mlxsw_sp_bridge_8021q_fid_get,
+ .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
+ .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
+};
+
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev,
@@ -3507,6 +3609,7 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
+ bridge->bridge_8021ad_ops = &mlxsw_sp_bridge_8021ad_ops;
return mlxsw_sp_fdb_init(mlxsw_sp);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 433f14ade464..4ef12e3e021a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -617,7 +617,7 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
TRAP_TO_CPU),
MLXSW_SP_RXL_EXCEPTION(HOST_MISS_IPV6, L3_EXCEPTIONS,
TRAP_TO_CPU),
- MLXSW_SP_RXL_EXCEPTION(DISCARD_ROUTER3, L3_EXCEPTIONS,
+ MLXSW_SP_RXL_EXCEPTION(RTR_EGRESS0, L3_EXCEPTIONS,
TRAP_EXCEPTION_TO_CPU),
},
},
@@ -1007,6 +1007,12 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
false),
},
},
+ {
+ .trap = MLXSW_SP_TRAP_DROP(BLACKHOLE_NEXTHOP, L3_DROPS),
+ .listeners_arr = {
+ MLXSW_SP_RXL_DISCARD(ROUTER3, L3_DISCARDS),
+ },
+ },
};
static struct mlxsw_sp_trap_policer_item *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 5023d91269f4..40e2e79d4517 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
#include <linux/device.h>
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 57f9e24602d0..9e070ab3ed76 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -52,6 +52,7 @@ enum {
MLXSW_TRAP_ID_RTR_INGRESS1 = 0x71,
MLXSW_TRAP_ID_IPV6_PIM = 0x79,
MLXSW_TRAP_ID_IPV6_VRRP = 0x7A,
+ MLXSW_TRAP_ID_RTR_EGRESS0 = 0x80,
MLXSW_TRAP_ID_IPV4_BGP = 0x88,
MLXSW_TRAP_ID_IPV6_BGP = 0x89,
MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A,
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index dcde496da7fb..c5de8f46cdd3 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -780,7 +780,9 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
wol->supported = 0;
wol->wolopts = 0;
- phy_ethtool_get_wol(netdev->phydev, wol);
+
+ if (netdev->phydev)
+ phy_ethtool_get_wol(netdev->phydev, wol);
wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
@@ -809,9 +811,8 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
- phy_ethtool_set_wol(netdev->phydev, wol);
-
- return 0;
+ return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
+ : -ENETDOWN;
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index b319c22c211c..dbd6c3946aa6 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -140,18 +140,14 @@ clean_up:
return result;
}
-static void lan743x_intr_software_isr(void *context)
+static void lan743x_intr_software_isr(struct lan743x_adapter *adapter)
{
- struct lan743x_adapter *adapter = context;
struct lan743x_intr *intr = &adapter->intr;
- u32 int_sts;
- int_sts = lan743x_csr_read(adapter, INT_STS);
- if (int_sts & INT_BIT_SW_GP_) {
- /* disable the interrupt to prevent repeated re-triggering */
- lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
- intr->software_isr_flag = 1;
- }
+ /* disable the interrupt to prevent repeated re-triggering */
+ lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
+ intr->software_isr_flag = true;
+ wake_up(&intr->software_isr_wq);
}
static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
@@ -348,27 +344,22 @@ irq_done:
static int lan743x_intr_test_isr(struct lan743x_adapter *adapter)
{
struct lan743x_intr *intr = &adapter->intr;
- int result = -ENODEV;
- int timeout = 10;
+ int ret;
- intr->software_isr_flag = 0;
+ intr->software_isr_flag = false;
- /* enable interrupt */
+ /* enable and activate test interrupt */
lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_);
-
- /* activate interrupt here */
lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_);
- while ((timeout > 0) && (!(intr->software_isr_flag))) {
- usleep_range(1000, 20000);
- timeout--;
- }
- if (intr->software_isr_flag)
- result = 0;
+ ret = wait_event_timeout(intr->software_isr_wq,
+ intr->software_isr_flag,
+ msecs_to_jiffies(200));
- /* disable interrupts */
+ /* disable test interrupt */
lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_);
- return result;
+
+ return ret > 0 ? 0 : -ENODEV;
}
static int lan743x_intr_register_isr(struct lan743x_adapter *adapter,
@@ -542,6 +533,8 @@ static int lan743x_intr_open(struct lan743x_adapter *adapter)
flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C;
}
+ init_waitqueue_head(&intr->software_isr_wq);
+
ret = lan743x_intr_register_isr(adapter, 0, flags,
INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ |
INT_BIT_ALL_OTHER_,
@@ -830,14 +823,13 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
static int lan743x_mac_open(struct lan743x_adapter *adapter)
{
- int ret = 0;
u32 temp;
temp = lan743x_csr_read(adapter, MAC_RX);
lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_);
temp = lan743x_csr_read(adapter, MAC_TX);
lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_);
- return ret;
+ return 0;
}
static void lan743x_mac_close(struct lan743x_adapter *adapter)
@@ -958,7 +950,7 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
data = lan743x_csr_read(adapter, MAC_CR);
/* set interface mode */
- if (phy_interface_mode_is_rgmii(adapter->phy_mode))
+ if (phy_interface_is_rgmii(phydev))
/* RGMII */
data &= ~MAC_CR_MII_EN_;
else
@@ -1014,33 +1006,14 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter)
static int lan743x_phy_open(struct lan743x_adapter *adapter)
{
+ struct net_device *netdev = adapter->netdev;
struct lan743x_phy *phy = &adapter->phy;
- struct phy_device *phydev = NULL;
- struct device_node *phynode;
- struct net_device *netdev;
+ struct phy_device *phydev;
int ret = -EIO;
- netdev = adapter->netdev;
- phynode = of_node_get(adapter->pdev->dev.of_node);
-
- if (phynode) {
- /* try devicetree phy, or fixed link */
- of_get_phy_mode(phynode, &adapter->phy_mode);
-
- if (of_phy_is_fixed_link(phynode)) {
- ret = of_phy_register_fixed_link(phynode);
- if (ret) {
- netdev_err(netdev,
- "cannot register fixed PHY\n");
- of_node_put(phynode);
- goto return_error;
- }
- }
- phydev = of_phy_connect(netdev, phynode,
- lan743x_phy_link_status_change, 0,
- adapter->phy_mode);
- of_node_put(phynode);
- }
+ /* try devicetree phy, or fixed link */
+ phydev = of_phy_get_and_connect(netdev, adapter->pdev->dev.of_node,
+ lan743x_phy_link_status_change);
if (!phydev) {
/* try internal phy */
@@ -1048,10 +1021,9 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
if (!phydev)
goto return_error;
- adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
ret = phy_connect_direct(netdev, phydev,
lan743x_phy_link_status_change,
- adapter->phy_mode);
+ PHY_INTERFACE_MODE_GMII);
if (ret)
goto return_error;
}
@@ -1066,6 +1038,7 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
phy_start(phydev);
phy_start_aneg(phydev);
+ phy_attached_info(phydev);
return 0;
return_error:
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index a536f4a4994d..404af3f4635e 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -616,7 +616,8 @@ struct lan743x_intr {
int number_of_vectors;
bool using_vectors;
- int software_isr_flag;
+ bool software_isr_flag;
+ wait_queue_head_t software_isr_wq;
};
#define LAN743X_MAX_FRAME_SIZE (9 * 1024)
@@ -703,7 +704,6 @@ struct lan743x_rx {
struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
- phy_interface_t phy_mode;
int msg_enable;
#ifdef CONFIG_PM
u32 wolopts;
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 70bf8c67d7ef..2632fe2d2448 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -147,42 +147,20 @@ static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
return ocelot_vlant_wait_for_completion(ocelot);
}
-static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
- u16 vid)
+static void ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
+ struct ocelot_vlan native_vlan)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 val = 0;
- if (ocelot_port->vid != vid) {
- /* Always permit deleting the native VLAN (vid = 0) */
- if (ocelot_port->vid && vid) {
- dev_err(ocelot->dev,
- "Port already has a native VLAN: %d\n",
- ocelot_port->vid);
- return -EBUSY;
- }
- ocelot_port->vid = vid;
- }
+ ocelot_port->native_vlan = native_vlan;
- ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(vid),
+ ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(native_vlan.vid),
REW_PORT_VLAN_CFG_PORT_VID_M,
REW_PORT_VLAN_CFG, port);
- if (ocelot_port->vlan_aware && !ocelot_port->vid)
- /* If port is vlan-aware and tagged, drop untagged and priority
- * tagged frames.
- */
- val = ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
- ocelot_rmw_gix(ocelot, val,
- ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
- ANA_PORT_DROP_CFG, port);
-
if (ocelot_port->vlan_aware) {
- if (ocelot_port->vid)
+ if (native_vlan.valid)
/* Tag all frames except when VID == DEFAULT_VLAN */
val = REW_TAG_CFG_TAG_CFG(1);
else
@@ -195,8 +173,38 @@ static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
ocelot_rmw_gix(ocelot, val,
REW_TAG_CFG_TAG_CFG_M,
REW_TAG_CFG, port);
+}
- return 0;
+/* Default vlan to clasify for untagged frames (may be zero) */
+static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
+ struct ocelot_vlan pvid_vlan)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u32 val = 0;
+
+ ocelot_port->pvid_vlan = pvid_vlan;
+
+ if (!ocelot_port->vlan_aware)
+ pvid_vlan.vid = 0;
+
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_VLAN_CFG_VLAN_VID(pvid_vlan.vid),
+ ANA_PORT_VLAN_CFG_VLAN_VID_M,
+ ANA_PORT_VLAN_CFG, port);
+
+ /* If there's no pvid, we should drop not only untagged traffic (which
+ * happens automatically), but also 802.1p traffic which gets
+ * classified to VLAN 0, but that is always in our RX filter, so it
+ * would get accepted were it not for this setting.
+ */
+ if (!pvid_vlan.valid && ocelot_port->vlan_aware)
+ val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
+
+ ocelot_rmw_gix(ocelot, val,
+ ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
+ ANA_PORT_DROP_CFG, port);
}
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
@@ -233,24 +241,30 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
ANA_PORT_VLAN_CFG, port);
- ocelot_port_set_native_vlan(ocelot, port, ocelot_port->vid);
+ ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
+ ocelot_port_set_native_vlan(ocelot, port, ocelot_port->native_vlan);
return 0;
}
EXPORT_SYMBOL(ocelot_port_vlan_filtering);
-/* Default vlan to clasify for untagged frames (may be zero) */
-static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, u16 pvid)
+int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ bool untagged)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- ocelot_rmw_gix(ocelot,
- ANA_PORT_VLAN_CFG_VLAN_VID(pvid),
- ANA_PORT_VLAN_CFG_VLAN_VID_M,
- ANA_PORT_VLAN_CFG, port);
+ /* Deny changing the native VLAN, but always permit deleting it */
+ if (untagged && ocelot_port->native_vlan.vid != vid &&
+ ocelot_port->native_vlan.valid) {
+ dev_err(ocelot->dev,
+ "Port already has a native VLAN: %d\n",
+ ocelot_port->native_vlan.vid);
+ return -EBUSY;
+ }
- ocelot_port->pvid = pvid;
+ return 0;
}
+EXPORT_SYMBOL(ocelot_vlan_prepare);
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged)
@@ -264,14 +278,21 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
return ret;
/* Default ingress vlan classification */
- if (pvid)
- ocelot_port_set_pvid(ocelot, port, vid);
+ if (pvid) {
+ struct ocelot_vlan pvid_vlan;
+
+ pvid_vlan.vid = vid;
+ pvid_vlan.valid = true;
+ ocelot_port_set_pvid(ocelot, port, pvid_vlan);
+ }
/* Untagged egress vlan clasification */
if (untagged) {
- ret = ocelot_port_set_native_vlan(ocelot, port, vid);
- if (ret)
- return ret;
+ struct ocelot_vlan native_vlan;
+
+ native_vlan.vid = vid;
+ native_vlan.valid = true;
+ ocelot_port_set_native_vlan(ocelot, port, native_vlan);
}
return 0;
@@ -290,12 +311,18 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
return ret;
/* Ingress */
- if (ocelot_port->pvid == vid)
- ocelot_port_set_pvid(ocelot, port, 0);
+ if (ocelot_port->pvid_vlan.vid == vid) {
+ struct ocelot_vlan pvid_vlan = {0};
+
+ ocelot_port_set_pvid(ocelot, port, pvid_vlan);
+ }
/* Egress */
- if (ocelot_port->vid == vid)
- ocelot_port_set_native_vlan(ocelot, port, 0);
+ if (ocelot_port->native_vlan.vid == vid) {
+ struct ocelot_vlan native_vlan = {0};
+
+ ocelot_port_set_native_vlan(ocelot, port, native_vlan);
+ }
return 0;
}
@@ -542,26 +569,11 @@ EXPORT_SYMBOL(ocelot_get_txtstamp);
int ocelot_fdb_add(struct ocelot *ocelot, int port,
const unsigned char *addr, u16 vid)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
int pgid = port;
if (port == ocelot->npi)
pgid = PGID_CPU;
- if (!vid) {
- if (!ocelot_port->vlan_aware)
- /* If the bridge is not VLAN aware and no VID was
- * provided, set it to pvid to ensure the MAC entry
- * matches incoming untagged packets
- */
- vid = ocelot_port->pvid;
- else
- /* If the bridge is VLAN aware a VID must be provided as
- * otherwise the learnt entry wouldn't match any frame.
- */
- return -EINVAL;
- }
-
return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED);
}
EXPORT_SYMBOL(ocelot_fdb_add);
@@ -958,52 +970,88 @@ static enum macaccess_entry_type ocelot_classify_mdb(const unsigned char *addr)
return ENTRYTYPE_MACv4;
if (addr[0] == 0x33 && addr[1] == 0x33)
return ENTRYTYPE_MACv6;
- return ENTRYTYPE_NORMAL;
+ return ENTRYTYPE_LOCKED;
+}
+
+static struct ocelot_pgid *ocelot_pgid_alloc(struct ocelot *ocelot, int index,
+ unsigned long ports)
+{
+ struct ocelot_pgid *pgid;
+
+ pgid = kzalloc(sizeof(*pgid), GFP_KERNEL);
+ if (!pgid)
+ return ERR_PTR(-ENOMEM);
+
+ pgid->ports = ports;
+ pgid->index = index;
+ refcount_set(&pgid->refcount, 1);
+ list_add_tail(&pgid->list, &ocelot->pgids);
+
+ return pgid;
+}
+
+static void ocelot_pgid_free(struct ocelot *ocelot, struct ocelot_pgid *pgid)
+{
+ if (!refcount_dec_and_test(&pgid->refcount))
+ return;
+
+ list_del(&pgid->list);
+ kfree(pgid);
}
-static int ocelot_mdb_get_pgid(struct ocelot *ocelot,
- enum macaccess_entry_type entry_type)
+static struct ocelot_pgid *ocelot_mdb_get_pgid(struct ocelot *ocelot,
+ const struct ocelot_multicast *mc)
{
- int pgid;
+ struct ocelot_pgid *pgid;
+ int index;
/* According to VSC7514 datasheet 3.9.1.5 IPv4 Multicast Entries and
* 3.9.1.6 IPv6 Multicast Entries, "Instead of a lookup in the
* destination mask table (PGID), the destination set is programmed as
* part of the entry MAC address.", and the DEST_IDX is set to 0.
*/
- if (entry_type == ENTRYTYPE_MACv4 ||
- entry_type == ENTRYTYPE_MACv6)
- return 0;
+ if (mc->entry_type == ENTRYTYPE_MACv4 ||
+ mc->entry_type == ENTRYTYPE_MACv6)
+ return ocelot_pgid_alloc(ocelot, 0, mc->ports);
- for_each_nonreserved_multicast_dest_pgid(ocelot, pgid) {
- struct ocelot_multicast *mc;
+ list_for_each_entry(pgid, &ocelot->pgids, list) {
+ /* When searching for a nonreserved multicast PGID, ignore the
+ * dummy PGID of zero that we have for MACv4/MACv6 entries
+ */
+ if (pgid->index && pgid->ports == mc->ports) {
+ refcount_inc(&pgid->refcount);
+ return pgid;
+ }
+ }
+
+ /* Search for a free index in the nonreserved multicast PGID area */
+ for_each_nonreserved_multicast_dest_pgid(ocelot, index) {
bool used = false;
- list_for_each_entry(mc, &ocelot->multicast, list) {
- if (mc->pgid == pgid) {
+ list_for_each_entry(pgid, &ocelot->pgids, list) {
+ if (pgid->index == index) {
used = true;
break;
}
}
if (!used)
- return pgid;
+ return ocelot_pgid_alloc(ocelot, index, mc->ports);
}
- return -1;
+ return ERR_PTR(-ENOSPC);
}
static void ocelot_encode_ports_to_mdb(unsigned char *addr,
- struct ocelot_multicast *mc,
- enum macaccess_entry_type entry_type)
+ struct ocelot_multicast *mc)
{
- memcpy(addr, mc->addr, ETH_ALEN);
+ ether_addr_copy(addr, mc->addr);
- if (entry_type == ENTRYTYPE_MACv4) {
+ if (mc->entry_type == ENTRYTYPE_MACv4) {
addr[0] = 0;
addr[1] = mc->ports >> 8;
addr[2] = mc->ports & 0xff;
- } else if (entry_type == ENTRYTYPE_MACv6) {
+ } else if (mc->entry_type == ENTRYTYPE_MACv6) {
addr[0] = mc->ports >> 8;
addr[1] = mc->ports & 0xff;
}
@@ -1012,80 +1060,78 @@ static void ocelot_encode_ports_to_mdb(unsigned char *addr,
int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
const struct switchdev_obj_port_mdb *mdb)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- enum macaccess_entry_type entry_type;
unsigned char addr[ETH_ALEN];
struct ocelot_multicast *mc;
+ struct ocelot_pgid *pgid;
u16 vid = mdb->vid;
- bool new = false;
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
- if (!vid)
- vid = ocelot_port->pvid;
-
- entry_type = ocelot_classify_mdb(mdb->addr);
-
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc) {
- int pgid = ocelot_mdb_get_pgid(ocelot, entry_type);
-
- if (pgid < 0) {
- dev_err(ocelot->dev,
- "No more PGIDs available for mdb %pM vid %d\n",
- mdb->addr, vid);
- return -ENOSPC;
- }
-
+ /* New entry */
mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL);
if (!mc)
return -ENOMEM;
- memcpy(mc->addr, mdb->addr, ETH_ALEN);
+ mc->entry_type = ocelot_classify_mdb(mdb->addr);
+ ether_addr_copy(mc->addr, mdb->addr);
mc->vid = vid;
- mc->pgid = pgid;
list_add_tail(&mc->list, &ocelot->multicast);
- new = true;
- }
-
- if (!new) {
- ocelot_encode_ports_to_mdb(addr, mc, entry_type);
+ } else {
+ /* Existing entry. Clean up the current port mask from
+ * hardware now, because we'll be modifying it.
+ */
+ ocelot_pgid_free(ocelot, mc->pgid);
+ ocelot_encode_ports_to_mdb(addr, mc);
ocelot_mact_forget(ocelot, addr, vid);
}
mc->ports |= BIT(port);
- ocelot_encode_ports_to_mdb(addr, mc, entry_type);
- return ocelot_mact_learn(ocelot, mc->pgid, addr, vid, entry_type);
+ pgid = ocelot_mdb_get_pgid(ocelot, mc);
+ if (IS_ERR(pgid)) {
+ dev_err(ocelot->dev,
+ "Cannot allocate PGID for mdb %pM vid %d\n",
+ mc->addr, mc->vid);
+ devm_kfree(ocelot->dev, mc);
+ return PTR_ERR(pgid);
+ }
+ mc->pgid = pgid;
+
+ ocelot_encode_ports_to_mdb(addr, mc);
+
+ if (mc->entry_type != ENTRYTYPE_MACv4 &&
+ mc->entry_type != ENTRYTYPE_MACv6)
+ ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
+ pgid->index);
+
+ return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
+ mc->entry_type);
}
EXPORT_SYMBOL(ocelot_port_mdb_add);
int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
const struct switchdev_obj_port_mdb *mdb)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- enum macaccess_entry_type entry_type;
unsigned char addr[ETH_ALEN];
struct ocelot_multicast *mc;
+ struct ocelot_pgid *pgid;
u16 vid = mdb->vid;
if (port == ocelot->npi)
port = ocelot->num_phys_ports;
- if (!vid)
- vid = ocelot_port->pvid;
-
mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
if (!mc)
return -ENOENT;
- entry_type = ocelot_classify_mdb(mdb->addr);
-
- ocelot_encode_ports_to_mdb(addr, mc, entry_type);
+ ocelot_encode_ports_to_mdb(addr, mc);
ocelot_mact_forget(ocelot, addr, vid);
+ ocelot_pgid_free(ocelot, mc->pgid);
mc->ports &= ~BIT(port);
if (!mc->ports) {
list_del(&mc->list);
@@ -1093,9 +1139,21 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
return 0;
}
- ocelot_encode_ports_to_mdb(addr, mc, entry_type);
+ /* We have a PGID with fewer ports now */
+ pgid = ocelot_mdb_get_pgid(ocelot, mc);
+ if (IS_ERR(pgid))
+ return PTR_ERR(pgid);
+ mc->pgid = pgid;
+
+ ocelot_encode_ports_to_mdb(addr, mc);
+
+ if (mc->entry_type != ENTRYTYPE_MACv4 &&
+ mc->entry_type != ENTRYTYPE_MACv6)
+ ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID,
+ pgid->index);
- return ocelot_mact_learn(ocelot, mc->pgid, addr, vid, entry_type);
+ return ocelot_mact_learn(ocelot, pgid->index, addr, vid,
+ mc->entry_type);
}
EXPORT_SYMBOL(ocelot_port_mdb_del);
@@ -1120,6 +1178,7 @@ EXPORT_SYMBOL(ocelot_port_bridge_join);
int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
struct net_device *bridge)
{
+ struct ocelot_vlan pvid = {0}, native_vlan = {0};
struct switchdev_trans trans;
int ret;
@@ -1138,8 +1197,10 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
if (ret)
return ret;
- ocelot_port_set_pvid(ocelot, port, 0);
- return ocelot_port_set_native_vlan(ocelot, port, 0);
+ ocelot_port_set_pvid(ocelot, port, pvid);
+ ocelot_port_set_native_vlan(ocelot, port, native_vlan);
+
+ return 0;
}
EXPORT_SYMBOL(ocelot_port_bridge_leave);
@@ -1453,6 +1514,7 @@ int ocelot_init(struct ocelot *ocelot)
return -ENOMEM;
INIT_LIST_HEAD(&ocelot->multicast);
+ INIT_LIST_HEAD(&ocelot->pgids);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
ocelot_vcap_init(ocelot);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index abb407dff93c..291d39d49c4e 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -41,14 +41,6 @@ struct frame_info {
u32 timestamp; /* rew_val */
};
-struct ocelot_multicast {
- struct list_head list;
- unsigned char addr[ETH_ALEN];
- u16 vid;
- u16 ports;
- int pgid;
-};
-
struct ocelot_port_tc {
bool block_shared;
unsigned long offload_cnt;
@@ -87,6 +79,29 @@ enum macaccess_entry_type {
ENTRYTYPE_MACv6,
};
+/* A (PGID) port mask structure, encoding the 2^ocelot->num_phys_ports
+ * possibilities of egress port masks for L2 multicast traffic.
+ * For a switch with 9 user ports, there are 512 possible port masks, but the
+ * hardware only has 46 individual PGIDs that it can forward multicast traffic
+ * to. So we need a structure that maps the limited PGID indices to the port
+ * destinations requested by the user for L2 multicast.
+ */
+struct ocelot_pgid {
+ unsigned long ports;
+ int index;
+ refcount_t refcount;
+ struct list_head list;
+};
+
+struct ocelot_multicast {
+ struct list_head list;
+ enum macaccess_entry_type entry_type;
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+ u16 ports;
+ struct ocelot_pgid *pgid;
+};
+
int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
bool is_static, void *data);
int ocelot_mact_learn(struct ocelot *ocelot, int port,
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index b34da11acf65..c65ae6f75a16 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -206,6 +206,17 @@ static void ocelot_port_adjust_link(struct net_device *dev)
ocelot_adjust_link(ocelot, port, dev->phydev);
}
+static int ocelot_vlan_vid_prepare(struct net_device *dev, u16 vid, bool pvid,
+ bool untagged)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_vlan_prepare(ocelot, port, vid, pvid, untagged);
+}
+
static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
bool untagged)
{
@@ -409,7 +420,7 @@ static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- return ocelot_mact_forget(ocelot, addr, ocelot_port->pvid);
+ return ocelot_mact_forget(ocelot, addr, ocelot_port->pvid_vlan.vid);
}
static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
@@ -418,8 +429,8 @@ static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- return ocelot_mact_learn(ocelot, PGID_CPU, addr, ocelot_port->pvid,
- ENTRYTYPE_LOCKED);
+ return ocelot_mact_learn(ocelot, PGID_CPU, addr,
+ ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
}
static void ocelot_set_rx_mode(struct net_device *dev)
@@ -462,10 +473,10 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
const struct sockaddr *addr = p;
/* Learn the new net device MAC address in the mac table. */
- ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, ocelot_port->pvid,
- ENTRYTYPE_LOCKED);
+ ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data,
+ ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
/* Then forget the previous one. */
- ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid);
+ ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid_vlan.vid);
ether_addr_copy(dev->dev_addr, addr->sa_data);
return 0;
@@ -812,9 +823,14 @@ static int ocelot_port_obj_add_vlan(struct net_device *dev,
u16 vid;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- ret = ocelot_vlan_vid_add(dev, vid,
- vlan->flags & BRIDGE_VLAN_INFO_PVID,
- vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+
+ if (switchdev_trans_ph_prepare(trans))
+ ret = ocelot_vlan_vid_prepare(dev, vid, pvid,
+ untagged);
+ else
+ ret = ocelot_vlan_vid_add(dev, vid, pvid, untagged);
if (ret)
return ret;
}
@@ -1074,8 +1090,8 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
dev->dev_addr[ETH_ALEN - 1] += port;
- ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
- ENTRYTYPE_LOCKED);
+ ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr,
+ ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
ocelot_init_port(ocelot, port);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index d13d92bf7447..8f2f091bce89 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -1106,7 +1106,7 @@ static int s2io_print_pci_mode(struct s2io_nic *nic)
* '-1' on failure
*/
-static int init_tti(struct s2io_nic *nic, int link)
+static int init_tti(struct s2io_nic *nic, int link, bool may_sleep)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
register u64 val64 = 0;
@@ -1166,7 +1166,7 @@ static int init_tti(struct s2io_nic *nic, int link)
if (wait_for_cmd_complete(&bar0->tti_command_mem,
TTI_CMD_MEM_STROBE_NEW_CMD,
- S2IO_BIT_RESET) != SUCCESS)
+ S2IO_BIT_RESET, may_sleep) != SUCCESS)
return FAILURE;
}
@@ -1659,7 +1659,7 @@ static int init_nic(struct s2io_nic *nic)
*/
/* Initialize TTI */
- if (SUCCESS != init_tti(nic, nic->last_link_state))
+ if (SUCCESS != init_tti(nic, nic->last_link_state, true))
return -ENODEV;
/* RTI Initialization */
@@ -3331,7 +3331,7 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
*/
static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
- int bit_state)
+ int bit_state, bool may_sleep)
{
int ret = FAILURE, cnt = 0, delay = 1;
u64 val64;
@@ -3353,7 +3353,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
}
}
- if (in_interrupt())
+ if (!may_sleep)
mdelay(delay);
else
msleep(delay);
@@ -4877,8 +4877,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
* Return value:
* void.
*/
-
-static void s2io_set_multicast(struct net_device *dev)
+static void s2io_set_multicast(struct net_device *dev, bool may_sleep)
{
int i, j, prev_cnt;
struct netdev_hw_addr *ha;
@@ -4903,7 +4902,7 @@ static void s2io_set_multicast(struct net_device *dev)
/* Wait till command completes */
wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET);
+ S2IO_BIT_RESET, may_sleep);
sp->m_cast_flg = 1;
sp->all_multi_pos = config->max_mc_addr - 1;
@@ -4920,7 +4919,7 @@ static void s2io_set_multicast(struct net_device *dev)
/* Wait till command completes */
wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET);
+ S2IO_BIT_RESET, may_sleep);
sp->m_cast_flg = 0;
sp->all_multi_pos = 0;
@@ -5000,7 +4999,7 @@ static void s2io_set_multicast(struct net_device *dev)
/* Wait for command completes */
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET)) {
+ S2IO_BIT_RESET, may_sleep)) {
DBG_PRINT(ERR_DBG,
"%s: Adding Multicasts failed\n",
dev->name);
@@ -5030,7 +5029,7 @@ static void s2io_set_multicast(struct net_device *dev)
/* Wait for command completes */
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET)) {
+ S2IO_BIT_RESET, may_sleep)) {
DBG_PRINT(ERR_DBG,
"%s: Adding Multicasts failed\n",
dev->name);
@@ -5041,6 +5040,12 @@ static void s2io_set_multicast(struct net_device *dev)
}
}
+/* NDO wrapper for s2io_set_multicast */
+static void s2io_ndo_set_multicast(struct net_device *dev)
+{
+ s2io_set_multicast(dev, false);
+}
+
/* read from CAM unicast & multicast addresses and store it in
* def_mac_addr structure
*/
@@ -5127,7 +5132,7 @@ static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
/* Wait till command completes */
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET)) {
+ S2IO_BIT_RESET, true)) {
DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
return FAILURE;
}
@@ -5171,7 +5176,7 @@ static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
/* Wait till command completes */
if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET)) {
+ S2IO_BIT_RESET, true)) {
DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
return FAILURE;
}
@@ -7141,7 +7146,7 @@ static int s2io_card_up(struct s2io_nic *sp)
}
/* Setting its receive mode */
- s2io_set_multicast(dev);
+ s2io_set_multicast(dev, true);
if (dev->features & NETIF_F_LRO) {
/* Initialize max aggregatable pkts per session based on MTU */
@@ -7447,7 +7452,7 @@ static void s2io_link(struct s2io_nic *sp, int link)
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
if (link != sp->last_link_state) {
- init_tti(sp, link);
+ init_tti(sp, link, false);
if (link == LINK_DOWN) {
DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
s2io_stop_all_tx_queue(sp);
@@ -7604,7 +7609,7 @@ static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
- S2IO_BIT_RESET);
+ S2IO_BIT_RESET, true);
}
static const struct net_device_ops s2io_netdev_ops = {
@@ -7613,7 +7618,7 @@ static const struct net_device_ops s2io_netdev_ops = {
.ndo_get_stats = s2io_get_stats,
.ndo_start_xmit = s2io_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_rx_mode = s2io_set_multicast,
+ .ndo_set_rx_mode = s2io_ndo_set_multicast,
.ndo_do_ioctl = s2io_ioctl,
.ndo_set_mac_address = s2io_set_mac_addr,
.ndo_change_mtu = s2io_change_mtu,
@@ -7929,7 +7934,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
writeq(val64, &bar0->rmac_addr_cmd_mem);
wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
- S2IO_BIT_RESET);
+ S2IO_BIT_RESET, true);
tmp64 = readq(&bar0->rmac_addr_data0_mem);
mac_down = (u32)tmp64;
mac_up = (u32) (tmp64 >> 32);
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index 6fa3159a977f..5a6032212c19 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1066,7 +1066,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data);
static void s2io_handle_errors(void * dev_id);
static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue);
-static void s2io_set_multicast(struct net_device *dev);
+static void s2io_set_multicast(struct net_device *dev, bool may_sleep);
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
static void s2io_link(struct s2io_nic * sp, int link);
static void s2io_reset(struct s2io_nic * sp);
@@ -1087,7 +1087,7 @@ static int s2io_set_swapper(struct s2io_nic * sp);
static void s2io_card_down(struct s2io_nic *nic);
static int s2io_card_up(struct s2io_nic *nic);
static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
- int bit_state);
+ int bit_state, bool may_sleep);
static int s2io_add_isr(struct s2io_nic * sp);
static void s2io_rem_isr(struct s2io_nic * sp);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index f5d48d7c4ce2..da48dd85770c 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -1121,7 +1121,7 @@ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
list_for_each_safe(p, n, &blockpool->free_entry_list) {
list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
- kfree((void *)p);
+ kfree(p);
}
return;
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/tls.c b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
index 76c51da5b66f..9b32ae46011c 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/tls.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/tls.c
@@ -295,8 +295,8 @@ nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
ipv6 = true;
break;
}
-#endif
fallthrough;
+#endif
case AF_INET:
req_sz = sizeof(struct nfp_crypto_req_add_v4);
ipv6 = false;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 97d2b03208de..713ee3041d49 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -333,7 +333,7 @@ nfp_devlink_flash_update(struct devlink *devlink,
struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
- return nfp_flash_update_common(devlink_priv(devlink), params->file_name, extack);
+ return nfp_flash_update_common(devlink_priv(devlink), params->fw, extack);
}
const struct devlink_ops nfp_devlink_ops = {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 7ff2ccbd43b0..742a420152b3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -301,11 +301,10 @@ static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs)
return nfp_pcie_sriov_enable(pdev, num_vfs);
}
-int nfp_flash_update_common(struct nfp_pf *pf, const char *path,
+int nfp_flash_update_common(struct nfp_pf *pf, const struct firmware *fw,
struct netlink_ext_ack *extack)
{
struct device *dev = &pf->pdev->dev;
- const struct firmware *fw;
struct nfp_nsp *nsp;
int err;
@@ -319,24 +318,12 @@ int nfp_flash_update_common(struct nfp_pf *pf, const char *path,
return err;
}
- err = request_firmware_direct(&fw, path, dev);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack,
- "unable to read flash file from disk");
- goto exit_close_nsp;
- }
-
- dev_info(dev, "Please be patient while writing flash image: %s\n",
- path);
-
err = nfp_nsp_write_flash(nsp, fw);
if (err < 0)
- goto exit_release_fw;
+ goto exit_close_nsp;
dev_info(dev, "Finished writing flash image\n");
err = 0;
-exit_release_fw:
- release_firmware(fw);
exit_close_nsp:
nfp_nsp_close(nsp);
return err;
@@ -724,10 +711,8 @@ static int nfp_pci_probe(struct pci_dev *pdev,
}
pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev);
- if (IS_ERR_OR_NULL(pf->cpp)) {
+ if (IS_ERR(pf->cpp)) {
err = PTR_ERR(pf->cpp);
- if (err >= 0)
- err = -ENOMEM;
goto err_disable_msix;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index fa6b13a05941..a7dede946a33 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -166,7 +166,7 @@ nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
unsigned int min_size, struct nfp_cpp_area **area);
int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length,
void *out_data, u64 out_length);
-int nfp_flash_update_common(struct nfp_pf *pf, const char *path,
+int nfp_flash_update_common(struct nfp_pf *pf, const struct firmware *fw,
struct netlink_ext_ack *extack);
enum nfp_dump_diag {
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 2fc10a36afa4..8724d6a9ed02 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1043,8 +1043,7 @@ static int using_multi_irqs(struct net_device *dev)
struct fe_priv *np = get_nvpriv(dev);
if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
- ((np->msi_flags & NV_MSI_X_ENABLED) &&
- ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
+ ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))
return 0;
else
return 1;
@@ -1666,11 +1665,7 @@ static void nv_update_stats(struct net_device *dev)
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- /* If it happens that this is run in top-half context, then
- * replace the spin_lock of hwstats_lock with
- * spin_lock_irqsave() in calling functions. */
- WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half");
- assert_spin_locked(&np->hwstats_lock);
+ lockdep_assert_held(&np->hwstats_lock);
/* query hardware */
np->estats.tx_bytes += readl(base + NvRegTxCnt);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index ade8c44c01cd..140cee7c459d 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1816,7 +1816,8 @@ void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
pch_gbe_clean_tx_ring(adapter, tx_ring);
vfree(tx_ring->buffer_info);
tx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+ dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+ tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -1833,7 +1834,8 @@ void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
pch_gbe_clean_rx_ring(adapter, rx_ring);
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+ dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+ rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -1954,8 +1956,8 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
- pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
- rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
+ dma_free_coherent(&adapter->pdev->dev, rx_ring->rx_buff_pool_size,
+ rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
rx_ring->rx_buff_pool_logic = 0;
rx_ring->rx_buff_pool_size = 0;
rx_ring->rx_buff_pool = NULL;
@@ -2412,7 +2414,6 @@ static int __pch_gbe_suspend(struct pci_dev *pdev)
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_hw *hw = &adapter->hw;
u32 wufc = adapter->wake_up_evt;
- int retval = 0;
netif_device_detach(netdev);
if (netif_running(netdev))
@@ -2432,7 +2433,7 @@ static int __pch_gbe_suspend(struct pci_dev *pdev)
pch_gbe_mac_set_wol_event(hw, wufc);
pci_disable_device(pdev);
}
- return retval;
+ return 0;
}
#ifdef CONFIG_PM
@@ -2503,17 +2504,11 @@ static int pch_gbe_probe(struct pci_dev *pdev,
if (ret)
return ret;
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
- ret = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
- if (ret) {
- dev_err(&pdev->dev, "ERR: No usable DMA "
- "configuration, aborting\n");
- return ret;
- }
+ dev_err(&pdev->dev, "ERR: No usable DMA configuration, aborting\n");
+ return ret;
}
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index dc5fbc2704f3..fb2b5bf179d7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -25,7 +25,7 @@ static void ionic_watchdog_cb(struct timer_list *t)
hb = ionic_heartbeat_check(ionic);
if (hb >= 0)
- ionic_link_status_check_request(ionic->lif, false);
+ ionic_link_status_check_request(ionic->lif, CAN_NOT_SLEEP);
}
void ionic_init_devinfo(struct ionic *ionic)
@@ -142,7 +142,7 @@ int ionic_heartbeat_check(struct ionic *ionic)
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
- dev_err(ionic->dev, "%s OOM\n", __func__);
+ dev_err(ionic->dev, "LIF reset trigger dropped\n");
} else {
work->type = IONIC_DW_TYPE_LIF_RESET;
if (fw_status & IONIC_FW_STS_F_RUNNING &&
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 6c243b17312c..690768ff0143 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -12,8 +12,10 @@
#define IONIC_MAX_TX_DESC 8192
#define IONIC_MAX_RX_DESC 16384
-#define IONIC_MIN_TXRX_DESC 16
+#define IONIC_MIN_TXRX_DESC 64
#define IONIC_DEF_TXRX_DESC 4096
+#define IONIC_RX_FILL_THRESHOLD 16
+#define IONIC_RX_FILL_DIV 8
#define IONIC_LIFS_MAX 1024
#define IONIC_WATCHDOG_SECS 5
#define IONIC_ITR_COAL_USEC_DEFAULT 64
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index 51d64718ed9f..b41301a5b0df 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -15,7 +15,7 @@ static int ionic_dl_flash_update(struct devlink *dl,
{
struct ionic *ionic = devlink_priv(dl);
- return ionic_firmware_update(ionic->lif, params->file_name, extack);
+ return ionic_firmware_update(ionic->lif, params->fw, extack);
}
static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.h b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h
index 5c01a9e306d8..0a77e8e810c5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h
@@ -6,7 +6,7 @@
#include <net/devlink.h>
-int ionic_firmware_update(struct ionic_lif *lif, const char *fw_name,
+int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw,
struct netlink_ext_ack *extack);
struct ionic *ionic_devlink_alloc(struct device *dev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
index d7bbf336c6f6..5f40324cd243 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_fw.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
@@ -91,7 +91,7 @@ static int ionic_fw_status_long_wait(struct ionic *ionic,
return err;
}
-int ionic_firmware_update(struct ionic_lif *lif, const char *fw_name,
+int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw,
struct netlink_ext_ack *extack)
{
struct ionic_dev *idev = &lif->ionic->idev;
@@ -99,24 +99,16 @@ int ionic_firmware_update(struct ionic_lif *lif, const char *fw_name,
struct ionic *ionic = lif->ionic;
union ionic_dev_cmd_comp comp;
u32 buf_sz, copy_sz, offset;
- const struct firmware *fw;
struct devlink *dl;
int next_interval;
int err = 0;
u8 fw_slot;
- netdev_info(netdev, "Installing firmware %s\n", fw_name);
+ netdev_info(netdev, "Installing firmware\n");
dl = priv_to_devlink(ionic);
- devlink_flash_update_begin_notify(dl);
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
- err = request_firmware(&fw, fw_name, ionic->dev);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to find firmware file");
- goto err_out;
- }
-
buf_sz = sizeof(idev->dev_cmd_regs->data);
netdev_dbg(netdev,
@@ -200,7 +192,5 @@ err_out:
devlink_flash_update_status_notify(dl, "Flash failed", NULL, 0, 0);
else
devlink_flash_update_status_notify(dl, "Flash done", NULL, 0, 0);
- release_firmware(fw);
- devlink_flash_update_end_notify(dl);
return err;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index a12df3946a07..11140915c2da 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+#include <linux/ethtool.h>
#include <linux/printk.h>
#include <linux/dynamic_debug.h>
#include <linux/netdevice.h>
@@ -123,6 +124,12 @@ static void ionic_link_status_check(struct ionic_lif *lif)
link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
if (link_up) {
+ if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
+ mutex_lock(&lif->queue_lock);
+ ionic_start_queues(lif);
+ mutex_unlock(&lif->queue_lock);
+ }
+
if (!netif_carrier_ok(netdev)) {
u32 link_speed;
@@ -132,12 +139,6 @@ static void ionic_link_status_check(struct ionic_lif *lif)
link_speed / 1000);
netif_carrier_on(netdev);
}
-
- if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
- mutex_lock(&lif->queue_lock);
- ionic_start_queues(lif);
- mutex_unlock(&lif->queue_lock);
- }
} else {
if (netif_carrier_ok(netdev)) {
netdev_info(netdev, "Link down\n");
@@ -841,7 +842,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
case IONIC_EVENT_RESET:
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
- netdev_err(lif->netdev, "%s OOM\n", __func__);
+ netdev_err(lif->netdev, "Reset event dropped\n");
} else {
work->type = IONIC_DW_TYPE_LIF_RESET;
ionic_lif_deferred_enqueue(&lif->deferred, work);
@@ -1050,10 +1051,8 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
if (!can_sleep) {
work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- netdev_err(lif->netdev, "%s OOM\n", __func__);
+ if (!work)
return -ENOMEM;
- }
work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
IONIC_DW_TYPE_RX_ADDR_DEL;
memcpy(work->addr, addr, ETH_ALEN);
@@ -1074,22 +1073,22 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, true, true);
+ return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_SLEEP);
}
static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, true, false);
+ return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_NOT_SLEEP);
}
static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, false, true);
+ return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_SLEEP);
}
static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, false, false);
+ return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_NOT_SLEEP);
}
static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
@@ -1129,38 +1128,10 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
lif->rx_mode = rx_mode;
}
-static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode,
- bool from_ndo)
-{
- struct ionic_deferred_work *work;
-
- if (from_ndo) {
- work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- netdev_err(lif->netdev, "%s OOM\n", __func__);
- return;
- }
- work->type = IONIC_DW_TYPE_RX_MODE;
- work->rx_mode = rx_mode;
- netdev_dbg(lif->netdev, "deferred: rx_mode\n");
- ionic_lif_deferred_enqueue(&lif->deferred, work);
- } else {
- ionic_lif_rx_mode(lif, rx_mode);
- }
-}
-
-static void ionic_dev_uc_sync(struct net_device *netdev, bool from_ndo)
-{
- if (from_ndo)
- __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
- else
- __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
-
-}
-
-static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo)
+static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_deferred_work *work;
unsigned int nfilters;
unsigned int rx_mode;
@@ -1177,7 +1148,10 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo)
* we remove our overflow flag and check the netdev flags
* to see if we can disable NIC PROMISC
*/
- ionic_dev_uc_sync(netdev, from_ndo);
+ if (can_sleep)
+ __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
+ else
+ __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
if (netdev_uc_count(netdev) + 1 > nfilters) {
rx_mode |= IONIC_RX_MODE_F_PROMISC;
@@ -1189,7 +1163,10 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo)
}
/* same for multicast */
- ionic_dev_uc_sync(netdev, from_ndo);
+ if (can_sleep)
+ __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
+ else
+ __dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
if (netdev_mc_count(netdev) > nfilters) {
rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
@@ -1200,13 +1177,26 @@ static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo)
rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
}
- if (lif->rx_mode != rx_mode)
- _ionic_lif_rx_mode(lif, rx_mode, from_ndo);
+ if (lif->rx_mode != rx_mode) {
+ if (!can_sleep) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ netdev_err(lif->netdev, "rxmode change dropped\n");
+ return;
+ }
+ work->type = IONIC_DW_TYPE_RX_MODE;
+ work->rx_mode = rx_mode;
+ netdev_dbg(lif->netdev, "deferred: rx_mode\n");
+ ionic_lif_deferred_enqueue(&lif->deferred, work);
+ } else {
+ ionic_lif_rx_mode(lif, rx_mode);
+ }
+ }
}
static void ionic_ndo_set_rx_mode(struct net_device *netdev)
{
- ionic_set_rx_mode(netdev, true);
+ ionic_set_rx_mode(netdev, CAN_NOT_SLEEP);
}
static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
@@ -1475,12 +1465,14 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
if (err)
return err;
- netdev->mtu = new_mtu;
/* if we're not running, nothing more to do */
- if (!netif_running(netdev))
+ if (!netif_running(netdev)) {
+ netdev->mtu = new_mtu;
return 0;
+ }
ionic_stop_queues_reconfig(lif);
+ netdev->mtu = new_mtu;
return ionic_start_queues_reconfig(lif);
}
@@ -1625,6 +1617,24 @@ static void ionic_lif_rss_deinit(struct ionic_lif *lif)
ionic_lif_rss_config(lif, 0x0, NULL, NULL);
}
+static void ionic_lif_quiesce(struct ionic_lif *lif)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.lif_setattr = {
+ .opcode = IONIC_CMD_LIF_SETATTR,
+ .index = cpu_to_le16(lif->index),
+ .attr = IONIC_LIF_ATTR_STATE,
+ .state = IONIC_LIF_QUIESCE,
+ },
+ };
+ int err;
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err)
+ netdev_err(lif->netdev, "lif quiesce failed %d\n", err);
+}
+
static void ionic_txrx_disable(struct ionic_lif *lif)
{
unsigned int i;
@@ -1639,6 +1649,8 @@ static void ionic_txrx_disable(struct ionic_lif *lif)
for (i = 0; i < lif->nxqs; i++)
err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
}
+
+ ionic_lif_quiesce(lif);
}
static void ionic_txrx_deinit(struct ionic_lif *lif)
@@ -1773,7 +1785,7 @@ static int ionic_txrx_init(struct ionic_lif *lif)
if (lif->netdev->features & NETIF_F_RXHASH)
ionic_lif_rss_init(lif);
- ionic_set_rx_mode(lif->netdev, false);
+ ionic_set_rx_mode(lif->netdev, CAN_SLEEP);
return 0;
@@ -2781,7 +2793,7 @@ static int ionic_station_set(struct ionic_lif *lif)
*/
if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
netdev->dev_addr))
- ionic_lif_addr(lif, netdev->dev_addr, true, true);
+ ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
} else {
/* Update the netdev mac with the device's mac */
memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
@@ -2798,7 +2810,7 @@ static int ionic_station_set(struct ionic_lif *lif)
netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
netdev->dev_addr);
- ionic_lif_addr(lif, netdev->dev_addr, true, true);
+ ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
return 0;
}
@@ -2959,6 +2971,8 @@ int ionic_lif_register(struct ionic_lif *lif)
dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
return err;
}
+
+ ionic_link_status_check_request(lif, true);
lif->registered = true;
ionic_lif_set_netdev_info(lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 0224dfd24b8a..9bed42719ae5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -13,6 +13,12 @@
#define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1)
#define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1)
+
+#define ADD_ADDR true
+#define DEL_ADDR false
+#define CAN_SLEEP true
+#define CAN_NOT_SLEEP false
+
#define IONIC_RX_COPYBREAK_DEFAULT 256
#define IONIC_TX_BUDGET_DEFAULT 256
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index d355676f6c16..fbc57de6683e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -511,10 +511,8 @@ int ionic_port_init(struct ionic *ionic)
idev->port_info_sz,
&idev->port_info_pa,
GFP_KERNEL);
- if (!idev->port_info) {
- dev_err(ionic->dev, "Failed to allocate port info\n");
+ if (!idev->port_info)
return -ENOMEM;
- }
}
sz = min(sizeof(ident->port.config), sizeof(idev->dev_cmd_regs->data));
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index ff20a2ac4c2f..6ae75b771a15 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
+#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index b3d2250c77d0..9156c9825a16 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -392,11 +392,6 @@ void ionic_rx_fill(struct ionic_queue *q)
q->dbval | q->head_idx);
}
-static void ionic_rx_fill_cb(void *arg)
-{
- ionic_rx_fill(arg);
-}
-
void ionic_rx_empty(struct ionic_queue *q)
{
struct ionic_desc_info *desc_info;
@@ -480,6 +475,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
struct ionic_cq *cq = napi_to_cq(napi);
struct ionic_dev *idev;
struct ionic_lif *lif;
+ u16 rx_fill_threshold;
u32 work_done = 0;
u32 flags = 0;
@@ -489,7 +485,9 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
work_done = ionic_cq_service(cq, budget,
ionic_rx_service, NULL, NULL);
- if (work_done)
+ rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
+ cq->num_descs / IONIC_RX_FILL_DIV);
+ if (work_done && ionic_q_space_avail(cq->bound_q) >= rx_fill_threshold)
ionic_rx_fill(cq->bound_q);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -518,6 +516,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
struct ionic_dev *idev;
struct ionic_lif *lif;
struct ionic_cq *txcq;
+ u16 rx_fill_threshold;
u32 rx_work_done = 0;
u32 tx_work_done = 0;
u32 flags = 0;
@@ -531,8 +530,11 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
rx_work_done = ionic_cq_service(rxcq, budget,
ionic_rx_service, NULL, NULL);
- if (rx_work_done)
- ionic_rx_fill_cb(rxcq->bound_q);
+
+ rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
+ rxcq->num_descs / IONIC_RX_FILL_DIV);
+ if (rx_work_done && ionic_q_space_avail(rxcq->bound_q) >= rx_fill_threshold)
+ ionic_rx_fill(rxcq->bound_q);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
ionic_dim_update(qcq);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index d58b51d277f1..ca1535ebb6e7 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -5,6 +5,7 @@
*/
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <net/pkt_sched.h>
#include "rmnet_config.h"
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 85d9c3e30c69..3ef1b31c95d1 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -67,7 +67,7 @@
#define R8169_REGS_SIZE 256
#define R8169_RX_BUF_SIZE (SZ_16K - 1)
-#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
+#define NUM_TX_DESC 256 /* Number of Tx descriptor registers */
#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
@@ -584,12 +584,6 @@ enum rtl_flag {
RTL_FLAG_MAX
};
-struct rtl8169_stats {
- u64 packets;
- u64 bytes;
- struct u64_stats_sync syncp;
-};
-
struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev;
@@ -600,8 +594,6 @@ struct rtl8169_private {
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
u32 dirty_tx;
- struct rtl8169_stats rx_stats;
- struct rtl8169_stats tx_stats;
struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
dma_addr_t TxPhyAddr;
@@ -700,27 +692,6 @@ static bool rtl_supports_eee(struct rtl8169_private *tp)
tp->mac_version != RTL_GIGA_MAC_VER_39;
}
-static void rtl_get_priv_stats(struct rtl8169_stats *stats,
- u64 *pkts, u64 *bytes)
-{
- unsigned int start;
-
- do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- *pkts = stats->packets;
- *bytes = stats->bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-}
-
-static void rtl_inc_priv_stats(struct rtl8169_stats *stats,
- u64 pkts, u64 bytes)
-{
- u64_stats_update_begin(&stats->syncp);
- stats->packets += pkts;
- stats->bytes += bytes;
- u64_stats_update_end(&stats->syncp);
-}
-
static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg)
{
int i;
@@ -1591,16 +1562,6 @@ static void rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd)
rtl_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
}
-static void rtl8169_reset_counters(struct rtl8169_private *tp)
-{
- /*
- * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
- * tally counters.
- */
- if (tp->mac_version >= RTL_GIGA_MAC_VER_19)
- rtl8169_do_counters(tp, CounterReset);
-}
-
static void rtl8169_update_counters(struct rtl8169_private *tp)
{
u8 val = RTL_R8(tp, ChipCmd);
@@ -1635,13 +1596,16 @@ static void rtl8169_init_counter_offsets(struct rtl8169_private *tp)
if (tp->tc_offset.inited)
return;
- rtl8169_reset_counters(tp);
- rtl8169_update_counters(tp);
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_19) {
+ rtl8169_do_counters(tp, CounterReset);
+ } else {
+ rtl8169_update_counters(tp);
+ tp->tc_offset.tx_errors = counters->tx_errors;
+ tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
+ tp->tc_offset.tx_aborted = counters->tx_aborted;
+ tp->tc_offset.rx_missed = counters->rx_missed;
+ }
- tp->tc_offset.tx_errors = counters->tx_errors;
- tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
- tp->tc_offset.tx_aborted = counters->tx_aborted;
- tp->tc_offset.rx_missed = counters->rx_missed;
tp->tc_offset.inited = true;
}
@@ -4170,13 +4134,13 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
return true;
}
-static bool rtl_tx_slots_avail(struct rtl8169_private *tp,
- unsigned int nr_frags)
+static bool rtl_tx_slots_avail(struct rtl8169_private *tp)
{
- unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx;
+ unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC
+ - READ_ONCE(tp->cur_tx);
/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
- return slots_avail > nr_frags;
+ return slots_avail > MAX_SKB_FRAGS;
}
/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
@@ -4209,17 +4173,12 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
bool stop_queue, door_bell;
u32 opts[2];
- txd_first = tp->TxDescArray + entry;
-
- if (unlikely(!rtl_tx_slots_avail(tp, frags))) {
+ if (unlikely(!rtl_tx_slots_avail(tp))) {
if (net_ratelimit())
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop_0;
}
- if (unlikely(le32_to_cpu(txd_first->opts1) & DescOwn))
- goto err_stop_0;
-
opts[1] = rtl8169_tx_vlan_tag(skb);
opts[0] = 0;
@@ -4232,6 +4191,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
entry, false)))
goto err_dma_0;
+ txd_first = tp->TxDescArray + entry;
+
if (frags) {
if (rtl8169_xmit_frags(tp, skb, opts, entry))
goto err_dma_1;
@@ -4254,22 +4215,15 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
/* rtl_tx needs to see descriptor changes before updated tp->cur_tx */
smp_wmb();
- tp->cur_tx += frags + 1;
+ WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1);
- stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
+ stop_queue = !rtl_tx_slots_avail(tp);
if (unlikely(stop_queue)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
smp_wmb();
netif_stop_queue(dev);
- door_bell = true;
- }
-
- if (door_bell)
- rtl8169_doorbell(tp);
-
- if (unlikely(stop_queue)) {
/* Sync with rtl_tx:
* - publish queue status and cur_tx ring index (write barrier)
* - refresh dirty_tx ring index (read barrier).
@@ -4277,11 +4231,15 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
* status and forget to wake up queue, a racing rtl_tx thread
* can't.
*/
- smp_mb();
- if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
+ smp_mb__after_atomic();
+ if (rtl_tx_slots_avail(tp))
netif_start_queue(dev);
+ door_bell = true;
}
+ if (door_bell)
+ rtl8169_doorbell(tp);
+
return NETDEV_TX_OK;
err_dma_1:
@@ -4390,20 +4348,20 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
int budget)
{
- unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0;
+ unsigned int dirty_tx, bytes_compl = 0, pkts_compl = 0;
+ struct sk_buff *skb;
dirty_tx = tp->dirty_tx;
- smp_rmb();
- for (tx_left = tp->cur_tx - dirty_tx; tx_left > 0; tx_left--) {
+ while (READ_ONCE(tp->cur_tx) != dirty_tx) {
unsigned int entry = dirty_tx % NUM_TX_DESC;
- struct sk_buff *skb = tp->tx_skb[entry].skb;
u32 status;
status = le32_to_cpu(tp->TxDescArray[entry].opts1);
if (status & DescOwn)
break;
+ skb = tp->tx_skb[entry].skb;
rtl8169_unmap_tx_skb(tp, entry);
if (skb) {
@@ -4416,10 +4374,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
if (tp->dirty_tx != dirty_tx) {
netdev_completed_queue(dev, pkts_compl, bytes_compl);
+ dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl);
- rtl_inc_priv_stats(&tp->tx_stats, pkts_compl, bytes_compl);
-
- tp->dirty_tx = dirty_tx;
/* Sync with rtl8169_start_xmit:
* - publish dirty_tx ring index (write barrier)
* - refresh cur_tx ring index and queue status (read barrier)
@@ -4427,18 +4383,18 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
* a racing xmit thread can only have a right view of the
* ring status.
*/
- smp_mb();
- if (netif_queue_stopped(dev) &&
- rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
+ smp_store_mb(tp->dirty_tx, dirty_tx);
+ if (netif_queue_stopped(dev) && rtl_tx_slots_avail(tp))
netif_wake_queue(dev);
- }
/*
* 8168 hack: TxPoll requests are lost when the Tx packets are
* too close. Let's kick an extra TxPoll request when a burst
* of start_xmit activity is detected (if it is not detected,
* it is slow enough). -- FR
+ * If skb is NULL then we come here again once a tx irq is
+ * triggered after the last fragment is marked transmitted.
*/
- if (tp->cur_tx != dirty_tx)
+ if (tp->cur_tx != dirty_tx && skb)
rtl8169_doorbell(tp);
}
}
@@ -4539,7 +4495,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
napi_gro_receive(&tp->napi, skb);
- rtl_inc_priv_stats(&tp->rx_stats, 1, pkt_size);
+ dev_sw_netstats_rx_add(dev, pkt_size);
release_descriptor:
rtl8169_mark_to_asic(desc);
}
@@ -4721,6 +4677,7 @@ static int rtl_open(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
+ unsigned long irqflags;
int retval = -ENOMEM;
pm_runtime_get_sync(&pdev->dev);
@@ -4732,7 +4689,7 @@ static int rtl_open(struct net_device *dev)
tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
&tp->TxPhyAddr, GFP_KERNEL);
if (!tp->TxDescArray)
- goto err_pm_runtime_put;
+ goto out;
tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
&tp->RxPhyAddr, GFP_KERNEL);
@@ -4745,8 +4702,9 @@ static int rtl_open(struct net_device *dev)
rtl_request_firmware(tp);
+ irqflags = pci_dev_msi_enabled(pdev) ? IRQF_NO_THREAD : IRQF_SHARED;
retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
- IRQF_SHARED, dev->name, tp);
+ irqflags, dev->name, tp);
if (retval < 0)
goto err_release_fw_2;
@@ -4757,9 +4715,9 @@ static int rtl_open(struct net_device *dev)
rtl8169_up(tp);
rtl8169_init_counter_offsets(tp);
netif_start_queue(dev);
-
- pm_runtime_put_sync(&pdev->dev);
out:
+ pm_runtime_put_sync(&pdev->dev);
+
return retval;
err_free_irq:
@@ -4775,8 +4733,6 @@ err_free_tx_0:
dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
tp->TxPhyAddr);
tp->TxDescArray = NULL;
-err_pm_runtime_put:
- pm_runtime_put_noidle(&pdev->dev);
goto out;
}
@@ -4790,9 +4746,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
pm_runtime_get_noresume(&pdev->dev);
netdev_stats_to_stats64(stats, &dev->stats);
-
- rtl_get_priv_stats(&tp->rx_stats, &stats->rx_packets, &stats->rx_bytes);
- rtl_get_priv_stats(&tp->tx_stats, &stats->tx_packets, &stats->tx_bytes);
+ dev_fetch_sw_netstats(stats, dev->tstats);
/*
* Fetch additional counter values missing in stats collected by driver
@@ -5204,8 +5158,8 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp)
if (rc == -ENOENT)
/* clk-core allows NULL (for suspend / resume) */
rc = 0;
- else if (rc != -EPROBE_DEFER)
- dev_err(d, "failed to get clk: %d\n", rc);
+ else
+ dev_err_probe(d, rc, "failed to get clk\n");
} else {
tp->clk = clk;
rc = clk_prepare_enable(clk);
@@ -5263,6 +5217,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->eee_adv = -1;
tp->ocp_base = OCP_STD_PHY_BASE;
+ dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
+ struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
/* Get the *optional* external "ether_clk" used on some boards */
rc = rtl_get_ether_clk(tp);
if (rc)
@@ -5340,8 +5299,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
INIT_WORK(&tp->wk.work, rtl_task);
- u64_stats_init(&tp->rx_stats.syncp);
- u64_stats_init(&tp->tx_stats.syncp);
rtl_init_mac_address(tp);
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 2590cab53e54..1f981dfe4bdc 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -285,7 +285,13 @@ typedef union efx_oword {
field10, value10, \
field11, value11, \
field12, value12, \
- field13, value13) \
+ field13, value13, \
+ field14, value14, \
+ field15, value15, \
+ field16, value16, \
+ field17, value17, \
+ field18, value18, \
+ field19, value19) \
(EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
@@ -298,7 +304,13 @@ typedef union efx_oword {
EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field11, (value11)) | \
EFX_INSERT_FIELD_NATIVE((min), (max), field12, (value12)) | \
- EFX_INSERT_FIELD_NATIVE((min), (max), field13, (value13)))
+ EFX_INSERT_FIELD_NATIVE((min), (max), field13, (value13)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field14, (value14)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field15, (value15)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field16, (value16)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field17, (value17)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field18, (value18)) | \
+ EFX_INSERT_FIELD_NATIVE((min), (max), field19, (value19)))
#define EFX_INSERT_FIELDS64(...) \
cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
@@ -340,7 +352,19 @@ typedef union efx_oword {
#endif
/* Populate an octword field with various numbers of arguments */
-#define EFX_POPULATE_OWORD_13 EFX_POPULATE_OWORD
+#define EFX_POPULATE_OWORD_19 EFX_POPULATE_OWORD
+#define EFX_POPULATE_OWORD_18(oword, ...) \
+ EFX_POPULATE_OWORD_19(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_17(oword, ...) \
+ EFX_POPULATE_OWORD_18(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_16(oword, ...) \
+ EFX_POPULATE_OWORD_17(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_15(oword, ...) \
+ EFX_POPULATE_OWORD_16(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_14(oword, ...) \
+ EFX_POPULATE_OWORD_15(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_13(oword, ...) \
+ EFX_POPULATE_OWORD_14(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_12(oword, ...) \
EFX_POPULATE_OWORD_13(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_OWORD_11(oword, ...) \
@@ -375,7 +399,19 @@ typedef union efx_oword {
EFX_DWORD_3, 0xffffffff)
/* Populate a quadword field with various numbers of arguments */
-#define EFX_POPULATE_QWORD_13 EFX_POPULATE_QWORD
+#define EFX_POPULATE_QWORD_19 EFX_POPULATE_QWORD
+#define EFX_POPULATE_QWORD_18(qword, ...) \
+ EFX_POPULATE_QWORD_19(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_17(qword, ...) \
+ EFX_POPULATE_QWORD_18(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_16(qword, ...) \
+ EFX_POPULATE_QWORD_17(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_15(qword, ...) \
+ EFX_POPULATE_QWORD_16(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_14(qword, ...) \
+ EFX_POPULATE_QWORD_15(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_13(qword, ...) \
+ EFX_POPULATE_QWORD_14(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_12(qword, ...) \
EFX_POPULATE_QWORD_13(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_QWORD_11(qword, ...) \
@@ -408,7 +444,19 @@ typedef union efx_oword {
EFX_DWORD_1, 0xffffffff)
/* Populate a dword field with various numbers of arguments */
-#define EFX_POPULATE_DWORD_13 EFX_POPULATE_DWORD
+#define EFX_POPULATE_DWORD_19 EFX_POPULATE_DWORD
+#define EFX_POPULATE_DWORD_18(dword, ...) \
+ EFX_POPULATE_DWORD_19(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_17(dword, ...) \
+ EFX_POPULATE_DWORD_18(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_16(dword, ...) \
+ EFX_POPULATE_DWORD_17(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_15(dword, ...) \
+ EFX_POPULATE_DWORD_16(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_14(dword, ...) \
+ EFX_POPULATE_DWORD_15(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_13(dword, ...) \
+ EFX_POPULATE_DWORD_14(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_12(dword, ...) \
EFX_POPULATE_DWORD_13(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
#define EFX_POPULATE_DWORD_11(dword, ...) \
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 3148fe770356..518268ce2064 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -182,8 +182,20 @@ static int efx_ef100_init_datapath_caps(struct efx_nic *efx)
if (rc)
return rc;
- if (efx_ef100_has_cap(nic_data->datapath_caps2, TX_TSO_V3))
- efx->net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (efx_ef100_has_cap(nic_data->datapath_caps2, TX_TSO_V3)) {
+ struct net_device *net_dev = efx->net_dev;
+ netdev_features_t tso = NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM;
+
+ net_dev->features |= tso;
+ net_dev->hw_features |= tso;
+ net_dev->hw_enc_features |= tso;
+ /* EF100 HW can only offload outer checksums if they are UDP,
+ * so for GRE_CSUM we have to use GSO_PARTIAL.
+ */
+ net_dev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+ }
efx->num_mac_stats = MCDI_WORD(outbuf,
GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
netif_dbg(efx, probe, efx->net_dev,
@@ -686,7 +698,7 @@ static unsigned int ef100_check_caps(const struct efx_nic *efx,
#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \
NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
- NETIF_F_TSO_MANGLEID | NETIF_F_HW_VLAN_CTAG_TX)
+ NETIF_F_HW_VLAN_CTAG_TX)
const struct efx_nic_type ef100_pf_nic_type = {
.revision = EFX_REV_EF100,
@@ -1101,6 +1113,9 @@ static int ef100_probe_main(struct efx_nic *efx)
nic_data->efx = efx;
net_dev->features |= efx->type->offload_features;
net_dev->hw_features |= efx->type->offload_features;
+ net_dev->hw_enc_features |= efx->type->offload_features;
+ net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
+ NETIF_F_HIGHDMA | NETIF_F_ALL_TSO;
/* Populate design-parameter defaults */
nic_data->tso_max_hdr_len = ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT;
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index a90e5a9d2a37..26ef51d6b542 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -54,8 +54,6 @@ static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
struct efx_nic *efx = tx_queue->efx;
struct ef100_nic_data *nic_data;
struct efx_tx_buffer *buffer;
- struct tcphdr *tcphdr;
- struct iphdr *iphdr;
size_t header_len;
u32 mss;
@@ -98,20 +96,6 @@ static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
buffer->unmap_len = 0;
buffer->skb = skb;
++tx_queue->insert_count;
-
- /* Adjust the TCP checksum to exclude the total length, since we set
- * ED_INNER_IP_LEN in the descriptor.
- */
- tcphdr = tcp_hdr(skb);
- if (skb_is_gso_v6(skb)) {
- tcphdr->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- } else {
- iphdr = ip_hdr(skb);
- tcphdr->check = ~csum_tcpudp_magic(iphdr->saddr, iphdr->daddr,
- 0, IPPROTO_TCP, 0);
- }
return true;
}
@@ -203,34 +187,66 @@ static void ef100_make_tso_desc(struct efx_nic *efx,
struct efx_tx_buffer *buffer, efx_oword_t *txd,
unsigned int segment_count)
{
- u32 mangleid = (efx->net_dev->features & NETIF_F_TSO_MANGLEID) ||
- skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID ?
- ESE_GZ_TX_DESC_IP4_ID_NO_OP :
- ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
- u16 vlan_enable = efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX ?
- skb_vlan_tag_present(skb) : 0;
+ bool gso_partial = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
unsigned int len, ip_offset, tcp_offset, payload_segs;
+ u32 mangleid = ESE_GZ_TX_DESC_IP4_ID_INC_MOD16;
+ unsigned int outer_ip_offset, outer_l4_offset;
u16 vlan_tci = skb_vlan_tag_get(skb);
u32 mss = skb_shinfo(skb)->gso_size;
+ bool encap = skb->encapsulation;
+ bool udp_encap = false;
+ u16 vlan_enable = 0;
+ struct tcphdr *tcp;
+ bool outer_csum;
+ u32 paylen;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID)
+ mangleid = ESE_GZ_TX_DESC_IP4_ID_NO_OP;
+ if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX)
+ vlan_enable = skb_vlan_tag_present(skb);
len = skb->len - buffer->len;
/* We use 1 for the TSO descriptor and 1 for the header */
payload_segs = segment_count - 2;
- ip_offset = skb_network_offset(skb);
- tcp_offset = skb_transport_offset(skb);
+ if (encap) {
+ outer_ip_offset = skb_network_offset(skb);
+ outer_l4_offset = skb_transport_offset(skb);
+ ip_offset = skb_inner_network_offset(skb);
+ tcp_offset = skb_inner_transport_offset(skb);
+ if (skb_shinfo(skb)->gso_type &
+ (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM))
+ udp_encap = true;
+ } else {
+ ip_offset = skb_network_offset(skb);
+ tcp_offset = skb_transport_offset(skb);
+ outer_ip_offset = outer_l4_offset = 0;
+ }
+ outer_csum = skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM;
+
+ /* subtract TCP payload length from inner checksum */
+ tcp = (void *)skb->data + tcp_offset;
+ paylen = skb->len - tcp_offset;
+ csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
- EFX_POPULATE_OWORD_13(*txd,
+ EFX_POPULATE_OWORD_19(*txd,
ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO,
ESF_GZ_TX_TSO_MSS, mss,
ESF_GZ_TX_TSO_HDR_NUM_SEGS, 1,
ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, payload_segs,
ESF_GZ_TX_TSO_HDR_LEN_W, buffer->len >> 1,
ESF_GZ_TX_TSO_PAYLOAD_LEN, len,
+ ESF_GZ_TX_TSO_CSO_OUTER_L4, outer_csum,
ESF_GZ_TX_TSO_CSO_INNER_L4, 1,
ESF_GZ_TX_TSO_INNER_L3_OFF_W, ip_offset >> 1,
ESF_GZ_TX_TSO_INNER_L4_OFF_W, tcp_offset >> 1,
ESF_GZ_TX_TSO_ED_INNER_IP4_ID, mangleid,
ESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1,
+ ESF_GZ_TX_TSO_OUTER_L3_OFF_W, outer_ip_offset >> 1,
+ ESF_GZ_TX_TSO_OUTER_L4_OFF_W, outer_l4_offset >> 1,
+ ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN, udp_encap && !gso_partial,
+ ESF_GZ_TX_TSO_ED_OUTER_IP_LEN, encap && !gso_partial,
+ ESF_GZ_TX_TSO_ED_OUTER_IP4_ID, encap ? mangleid :
+ ESE_GZ_TX_DESC_IP4_ID_NO_OP,
ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable,
ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci
);
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index b77e427e6729..c52a38df0e0d 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -8,7 +8,7 @@ config NET_VENDOR_SMSC
default y
depends on ARM || ARM64 || ATARI_ETHERNAT || COLDFIRE || \
ISA || MAC || MIPS || NIOS2 || PCI || \
- PCMCIA || SUPERH || XTENSA || H8300
+ PCMCIA || SUPERH || XTENSA || H8300 || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -39,7 +39,7 @@ config SMC91X
select MII
depends on !OF || GPIOLIB
depends on ARM || ARM64 || ATARI_ETHERNAT || COLDFIRE || \
- MIPS || NIOS2 || SUPERH || XTENSA || H8300
+ MIPS || NIOS2 || SUPERH || XTENSA || H8300 || COMPILE_TEST
help
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
@@ -78,7 +78,7 @@ config SMC911X
tristate "SMSC LAN911[5678] support"
select CRC32
select MII
- depends on (ARM || SUPERH)
+ depends on (ARM || SUPERH || COMPILE_TEST)
help
This is a driver for SMSC's LAN911x series of Ethernet chipsets
including the new LAN9115, LAN9116, LAN9117, and LAN9118.
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 01069dfaf75c..22cdbf12c823 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -102,7 +102,10 @@ MODULE_ALIAS("platform:smc911x");
#define PRINTK(dev, args...) netdev_info(dev, args)
#else
-#define DBG(n, dev, args...) do { } while (0)
+#define DBG(n, dev, args...) \
+ while (0) { \
+ netdev_dbg(dev, args); \
+ }
#define PRINTK(dev, args...) netdev_dbg(dev, args)
#endif
@@ -462,9 +465,9 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
skb->len;
#else
- buf = (char*)((u32)skb->data & ~0x3);
- len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3;
- cmdA = (((u32)skb->data & 0x3) << 16) |
+ buf = (char *)((uintptr_t)skb->data & ~0x3);
+ len = (skb->len + 3 + ((uintptr_t)skb->data & 3)) & ~0x3;
+ cmdA = (((uintptr_t)skb->data & 0x3) << 16) |
TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
skb->len;
#endif
@@ -879,7 +882,7 @@ static void smc911x_phy_configure(struct work_struct *work)
int phyaddr = lp->mii.phy_id;
int my_phy_caps; /* My PHY capabilities */
int my_ad_caps; /* My Advertised capabilities */
- int status;
+ int status __always_unused;
unsigned long flags;
DBG(SMC_DEBUG_FUNC, dev, "--> %s()\n", __func__);
@@ -973,7 +976,7 @@ static void smc911x_phy_interrupt(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
int phyaddr = lp->mii.phy_id;
- int status;
+ int status __always_unused;
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
@@ -2044,8 +2047,6 @@ static int smc911x_drv_probe(struct platform_device *pdev)
void __iomem *addr;
int ret;
- /* ndev is not valid yet, so avoid passing it in. */
- DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index f6b73afd1879..742a1f7a838c 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -703,7 +703,8 @@ static void smc_tx(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- unsigned int saved_packet, packet_no, tx_status, pkt_len;
+ unsigned int saved_packet, packet_no, tx_status;
+ unsigned int pkt_len __always_unused;
DBG(3, dev, "%s\n", __func__);
@@ -2191,6 +2192,12 @@ MODULE_DEVICE_TABLE(of, smc91x_match);
/**
* of_try_set_control_gpio - configure a gpio if it exists
+ * @dev: net device
+ * @desc: where to store the GPIO descriptor, if it exists
+ * @name: name of the GPIO in DT
+ * @index: index of the GPIO in DT
+ * @value: set the GPIO to this value
+ * @nsdelay: delay before setting the GPIO
*/
static int try_toggle_control_gpio(struct device *dev,
struct gpio_desc **desc,
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 1503cc9ec6e2..536aa8961dc6 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -631,6 +631,7 @@ static void netsec_set_rx_de(struct netsec_priv *priv,
static bool netsec_clean_tx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+ struct xdp_frame_bulk bq;
struct netsec_de *entry;
int tail = dring->tail;
unsigned int bytes;
@@ -639,8 +640,11 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv)
spin_lock(&dring->lock);
bytes = 0;
+ xdp_frame_bulk_init(&bq);
entry = dring->vaddr + DESC_SZ * tail;
+ rcu_read_lock(); /* need for xdp_return_frame_bulk */
+
while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) &&
cnt < DESC_NUM) {
struct netsec_desc *desc;
@@ -665,7 +669,10 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv)
dev_kfree_skb(desc->skb);
} else {
bytes += desc->xdpf->len;
- xdp_return_frame(desc->xdpf);
+ if (desc->buf_type == TYPE_NETSEC_XDP_TX)
+ xdp_return_frame_rx_napi(desc->xdpf);
+ else
+ xdp_return_frame_bulk(desc->xdpf, &bq);
}
next:
/* clean up so netsec_uninit_pkt_dring() won't free the skb
@@ -684,6 +691,9 @@ next:
entry = dring->vaddr + DESC_SZ * tail;
cnt++;
}
+ xdp_flush_frame_bulk(&bq);
+
+ rcu_read_unlock();
spin_unlock(&dring->lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index df7de50497a0..6f271c46368d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -402,6 +402,7 @@ struct dma_features {
/* Default LPI timers */
#define STMMAC_DEFAULT_LIT_LS 0x3E8
#define STMMAC_DEFAULT_TWT_LS 0x1E
+#define STMMAC_ET_MAX 0xFFFFF
#define STMMAC_CHAIN_MODE 0x1
#define STMMAC_RING_MODE 0x2
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 2342d497348e..27254b27d7ed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -119,23 +119,23 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
return 0;
}
-static void *dwc_qos_probe(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat_dat,
- struct stmmac_resources *stmmac_res)
+static int dwc_qos_probe(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *stmmac_res)
{
int err;
plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
if (IS_ERR(plat_dat->stmmac_clk)) {
dev_err(&pdev->dev, "apb_pclk clock not found.\n");
- return ERR_CAST(plat_dat->stmmac_clk);
+ return PTR_ERR(plat_dat->stmmac_clk);
}
err = clk_prepare_enable(plat_dat->stmmac_clk);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
err);
- return ERR_PTR(err);
+ return err;
}
plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
@@ -152,11 +152,11 @@ static void *dwc_qos_probe(struct platform_device *pdev,
goto disable;
}
- return NULL;
+ return 0;
disable:
clk_disable_unprepare(plat_dat->stmmac_clk);
- return ERR_PTR(err);
+ return err;
}
static int dwc_qos_remove(struct platform_device *pdev)
@@ -267,19 +267,17 @@ static int tegra_eqos_init(struct platform_device *pdev, void *priv)
return 0;
}
-static void *tegra_eqos_probe(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
- struct stmmac_resources *res)
+static int tegra_eqos_probe(struct platform_device *pdev,
+ struct plat_stmmacenet_data *data,
+ struct stmmac_resources *res)
{
struct device *dev = &pdev->dev;
struct tegra_eqos *eqos;
int err;
eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL);
- if (!eqos) {
- err = -ENOMEM;
- goto error;
- }
+ if (!eqos)
+ return -ENOMEM;
eqos->dev = &pdev->dev;
eqos->regs = res->addr;
@@ -368,9 +366,7 @@ bypass_clk_reset_gpio:
if (err < 0)
goto reset;
-out:
- return eqos;
-
+ return 0;
reset:
reset_control_assert(eqos->rst);
reset_phy:
@@ -384,8 +380,7 @@ disable_slave:
disable_master:
clk_disable_unprepare(eqos->clk_master);
error:
- eqos = ERR_PTR(err);
- goto out;
+ return err;
}
static int tegra_eqos_remove(struct platform_device *pdev)
@@ -403,9 +398,9 @@ static int tegra_eqos_remove(struct platform_device *pdev)
}
struct dwc_eth_dwmac_data {
- void *(*probe)(struct platform_device *pdev,
- struct plat_stmmacenet_data *data,
- struct stmmac_resources *res);
+ int (*probe)(struct platform_device *pdev,
+ struct plat_stmmacenet_data *data,
+ struct stmmac_resources *res);
int (*remove)(struct platform_device *pdev);
};
@@ -424,7 +419,6 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
const struct dwc_eth_dwmac_data *data;
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
- void *priv;
int ret;
data = device_get_match_data(&pdev->dev);
@@ -448,10 +442,8 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
- priv = data->probe(pdev, plat_dat, &stmmac_res);
- if (IS_ERR(priv)) {
- ret = PTR_ERR(priv);
-
+ ret = data->probe(pdev, plat_dat, &stmmac_res);
+ if (ret < 0) {
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "failed to probe subdriver: %d\n",
ret);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 81ee0a071b4e..a2e80c89de2d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -236,6 +236,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
int ret;
int i;
+ plat->phy_addr = -1;
plat->clk_csr = 5;
plat->has_gmac = 0;
plat->has_gmac4 = 1;
@@ -345,7 +346,6 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_addr = 0;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
@@ -362,7 +362,6 @@ static int ehl_rgmii_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_addr = 0;
plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
return ehl_common_data(pdev, plat);
@@ -376,7 +375,6 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 2;
- plat->phy_addr = 1;
return ehl_common_data(pdev, plat);
}
@@ -408,7 +406,6 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 3;
- plat->phy_addr = 1;
return ehl_common_data(pdev, plat);
}
@@ -450,7 +447,6 @@ static int tgl_sgmii_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->bus_id = 1;
- plat->phy_addr = 0;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 5afcf05bbf9c..dc0b8b6d180d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -299,7 +299,7 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
dev_err(dwmac->dev, "unsupported phy-mode %s\n",
phy_modes(dwmac->phy_mode));
return -EINVAL;
- };
+ }
if (rx_dly_config & PRG_ETH0_ADJ_ENABLE) {
if (!dwmac->timing_adj_clk) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 592b043f9676..82df91c130f7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -176,9 +176,11 @@ enum power_event {
*/
#define GMAC4_LPI_CTRL_STATUS 0xd0
#define GMAC4_LPI_TIMER_CTRL 0xd4
+#define GMAC4_LPI_ENTRY_TIMER 0xd8
/* LPI control and status defines */
#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */
+#define GMAC4_LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable */
#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index ced6d76a0d85..29f765a246a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -379,6 +379,27 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
}
+static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int value = et & STMMAC_ET_MAX;
+ int regval;
+
+ /* Program LPI entry timer value into register */
+ writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
+
+ /* Enable/disable LPI entry timer */
+ regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
+ regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
+
+ if (et)
+ regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
+ else
+ regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
+
+ writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
+}
+
static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
{
void __iomem *ioaddr = hw->pcsr;
@@ -1164,6 +1185,7 @@ const struct stmmac_ops dwmac4_ops = {
.get_umac_addr = dwmac4_get_umac_addr,
.set_eee_mode = dwmac4_set_eee_mode,
.reset_eee_mode = dwmac4_reset_eee_mode,
+ .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
@@ -1205,6 +1227,7 @@ const struct stmmac_ops dwmac410_ops = {
.get_umac_addr = dwmac4_get_umac_addr,
.set_eee_mode = dwmac4_set_eee_mode,
.reset_eee_mode = dwmac4_reset_eee_mode,
+ .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
@@ -1249,6 +1272,7 @@ const struct stmmac_ops dwmac510_ops = {
.get_umac_addr = dwmac4_get_umac_addr,
.set_eee_mode = dwmac4_set_eee_mode,
.reset_eee_mode = dwmac4_reset_eee_mode,
+ .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index e2dca9b6e992..b40b2e0667bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -337,6 +337,7 @@ struct stmmac_ops {
void (*set_eee_mode)(struct mac_device_info *hw,
bool en_tx_lpi_clockgating);
void (*reset_eee_mode)(struct mac_device_info *hw);
+ void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, int et);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x,
@@ -439,6 +440,8 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, set_eee_mode, __args)
#define stmmac_reset_eee_mode(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args)
+#define stmmac_set_eee_lpi_timer(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_eee_lpi_entry_timer, __args)
#define stmmac_set_eee_timer(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_eee_timer, __args)
#define stmmac_set_eee_pls(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 727e68dfaf1c..e553b9a1f785 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -13,6 +13,7 @@
#define DRV_MODULE_VERSION "Jan_2016"
#include <linux/clk.h>
+#include <linux/hrtimer.h>
#include <linux/if_vlan.h>
#include <linux/stmmac.h>
#include <linux/phylink.h>
@@ -46,7 +47,7 @@ struct stmmac_tx_info {
struct stmmac_tx_queue {
u32 tx_count_frames;
int tbs;
- struct timer_list txtimer;
+ struct hrtimer txtimer;
u32 queue_index;
struct stmmac_priv *priv_data;
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
@@ -207,6 +208,7 @@ struct stmmac_priv {
int tx_lpi_timer;
int tx_lpi_enabled;
int eee_tw_timer;
+ bool eee_sw_timer_en;
unsigned int mode;
unsigned int chain_mode;
int extend_desc;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ba45fe237512..8c1ac75901ce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -111,7 +111,7 @@ static void stmmac_init_fs(struct net_device *dev);
static void stmmac_exit_fs(struct net_device *dev);
#endif
-#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
+#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
/**
* stmmac_verify_args - verify the driver parameters.
@@ -294,6 +294,16 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
return dirty;
}
+static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
+{
+ int tx_lpi_timer;
+
+ /* Clear/set the SW EEE timer flag based on LPI ET enablement */
+ priv->eee_sw_timer_en = en ? 0 : 1;
+ tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
+ stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
+}
+
/**
* stmmac_enable_eee_mode - check and enter in LPI mode
* @priv: driver private structure
@@ -327,6 +337,11 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
*/
void stmmac_disable_eee_mode(struct stmmac_priv *priv)
{
+ if (!priv->eee_sw_timer_en) {
+ stmmac_lpi_entry_timer_config(priv, 0);
+ return;
+ }
+
stmmac_reset_eee_mode(priv, priv->hw);
del_timer_sync(&priv->eee_ctrl_timer);
priv->tx_path_in_lpi_mode = false;
@@ -376,6 +391,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
if (!priv->eee_active) {
if (priv->eee_enabled) {
netdev_dbg(priv->dev, "disable EEE\n");
+ stmmac_lpi_entry_timer_config(priv, 0);
del_timer_sync(&priv->eee_ctrl_timer);
stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
}
@@ -389,7 +405,15 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
eee_tw_timer);
}
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->tx_path_in_lpi_mode = false;
+ stmmac_lpi_entry_timer_config(priv, 1);
+ } else {
+ stmmac_lpi_entry_timer_config(priv, 0);
+ mod_timer(&priv->eee_ctrl_timer,
+ STMMAC_LPI_T(priv->tx_lpi_timer));
+ }
mutex_unlock(&priv->lock);
netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
@@ -2044,14 +2068,16 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
}
- if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
+ if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
+ priv->eee_sw_timer_en) {
stmmac_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
- mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
+ hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer),
+ HRTIMER_MODE_REL);
__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
@@ -2335,7 +2361,8 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
- mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
+ hrtimer_start(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer),
+ HRTIMER_MODE_REL);
}
/**
@@ -2344,9 +2371,9 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
* Description:
* This is the timer handler to directly invoke the stmmac_tx_clean.
*/
-static void stmmac_tx_timer(struct timer_list *t)
+static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
{
- struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
+ struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
struct stmmac_priv *priv = tx_q->priv_data;
struct stmmac_channel *ch;
@@ -2360,6 +2387,8 @@ static void stmmac_tx_timer(struct timer_list *t)
spin_unlock_irqrestore(&ch->lock, flags);
__napi_schedule(&ch->tx_napi);
}
+
+ return HRTIMER_NORESTART;
}
/**
@@ -2382,7 +2411,8 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv)
for (chan = 0; chan < tx_channel_count; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
- timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
+ hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tx_q->txtimer.function = stmmac_tx_timer;
}
}
@@ -2874,7 +2904,7 @@ irq_error:
phylink_stop(priv->phylink);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- del_timer_sync(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
stmmac_hw_teardown(dev);
init_error:
@@ -2907,7 +2937,7 @@ static int stmmac_release(struct net_device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- del_timer_sync(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
/* Free the IRQ lines */
free_irq(dev->irq, dev);
@@ -3306,7 +3336,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q = &priv->tx_queue[queue];
first_tx = tx_q->cur_tx;
- if (priv->tx_path_in_lpi_mode)
+ if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
stmmac_disable_eee_mode(priv);
/* Manage oversized TCP frames for GMAC4 device */
@@ -5140,7 +5170,7 @@ int stmmac_suspend(struct device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- del_timer_sync(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
/* Stop TX/RX DMA */
stmmac_stop_all_dma(priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index af34a4cadbb0..6dc9f10414e4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -399,6 +399,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
+ void *ret;
int rc;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
@@ -576,12 +577,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
clk_prepare_enable(plat->stmmac_clk);
}
- plat->pclk = devm_clk_get(&pdev->dev, "pclk");
+ plat->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
if (IS_ERR(plat->pclk)) {
- if (PTR_ERR(plat->pclk) == -EPROBE_DEFER)
- goto error_pclk_get;
-
- plat->pclk = NULL;
+ ret = plat->pclk;
+ goto error_pclk_get;
}
clk_prepare_enable(plat->pclk);
@@ -596,14 +595,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
}
- plat->stmmac_rst = devm_reset_control_get(&pdev->dev,
- STMMAC_RESOURCE_NAME);
+ plat->stmmac_rst = devm_reset_control_get_optional(&pdev->dev,
+ STMMAC_RESOURCE_NAME);
if (IS_ERR(plat->stmmac_rst)) {
- if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER)
- goto error_hw_init;
-
- dev_info(&pdev->dev, "no reset control found\n");
- plat->stmmac_rst = NULL;
+ ret = plat->stmmac_rst;
+ goto error_hw_init;
}
return plat;
@@ -613,7 +609,7 @@ error_hw_init:
error_pclk_get:
clk_disable_unprepare(plat->stmmac_clk);
- return ERR_PTR(-EPROBE_DEFER);
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 501d676fd88b..766e8866bbef 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -241,8 +241,8 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
if (!vid)
unreg_mcast = port_mask;
dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid);
- ret = cpsw_ale_add_vlan(common->ale, vid, port_mask,
- unreg_mcast, port_mask, 0);
+ ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask,
+ unreg_mcast, port_mask, 0);
pm_runtime_put(common->dev);
return ret;
@@ -252,6 +252,7 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret;
if (!netif_running(ndev) || !vid)
@@ -264,14 +265,15 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
}
dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
- ret = cpsw_ale_del_vlan(common->ale, vid, 0);
+ ret = cpsw_ale_del_vlan(common->ale, vid,
+ BIT(port->port_id) | ALE_PORT_HOST);
pm_runtime_put(common->dev);
return ret;
}
-static void am65_cpsw_slave_set_promisc_2g(struct am65_cpsw_port *port,
- bool promisc)
+static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port,
+ bool promisc)
{
struct am65_cpsw_common *common = port->common;
@@ -296,7 +298,7 @@ static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
bool promisc;
promisc = !!(ndev->flags & IFF_PROMISC);
- am65_cpsw_slave_set_promisc_2g(port, promisc);
+ am65_cpsw_slave_set_promisc(port, promisc);
if (promisc)
return;
@@ -373,7 +375,7 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE);
- cppi5_hdesc_attach_buf(desc_rx, 0, 0, buf_dma, skb_tailroom(skb));
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
swdata = cppi5_hdesc_get_swdata(desc_rx);
*((void **)swdata) = skb;
@@ -426,9 +428,7 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
writel(common->rx_flow_id_base,
host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET);
/* en tx crc offload */
- if (features & NETIF_F_HW_CSUM)
- writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN,
- host_p->port_base + AM65_CPSW_P0_REG_CTL);
+ writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN, host_p->port_base + AM65_CPSW_P0_REG_CTL);
am65_cpsw_nuss_set_p0_ptype(common);
@@ -629,13 +629,13 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
- if (port->slave.mac_only)
+ if (port->slave.mac_only) {
/* enable mac-only mode on port */
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_MACONLY, 1);
- if (AM65_CPSW_IS_CPSW2G(common))
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_NOLEARN, 1);
+ }
port_mask = BIT(port->port_id) | ALE_PORT_HOST;
cpsw_ale_add_ucast(common->ale, ndev->dev_addr,
@@ -767,7 +767,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
return ret;
}
- if (desc_dma & 0x1) {
+ if (cppi5_desc_is_tdcm(desc_dma)) {
dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
return 0;
}
@@ -911,10 +911,57 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
dev_kfree_skb_any(skb);
}
+static struct sk_buff *
+am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
+ dma_addr_t desc_dma)
+{
+ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct am65_cpsw_ndev_stats *stats;
+ struct cppi5_host_desc_t *desc_tx;
+ struct net_device *ndev;
+ struct sk_buff *skb;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
+
+ ndev = skb->dev;
+
+ am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
+
+ ndev_priv = netdev_priv(ndev);
+ stats = this_cpu_ptr(ndev_priv->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ return skb;
+}
+
+static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
+ struct netdev_queue *netif_txq)
+{
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* Check whether the queue is stopped due to stalled
+ * tx dma, if the queue is stopped then wake the queue
+ * as we have free desc for tx
+ */
+ __netif_tx_lock(netif_txq, smp_processor_id());
+ if (netif_running(ndev) &&
+ (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS))
+ netif_tx_wake_queue(netif_txq);
+
+ __netif_tx_unlock(netif_txq);
+ }
+}
+
static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
int chn, unsigned int budget)
{
- struct cppi5_host_desc_t *desc_tx;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
@@ -923,41 +970,68 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
- void **swdata;
tx_chn = &common->tx_chns[chn];
while (true) {
- struct am65_cpsw_ndev_priv *ndev_priv;
- struct am65_cpsw_ndev_stats *stats;
-
+ spin_lock(&tx_chn->lock);
res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ spin_unlock(&tx_chn->lock);
if (res == -ENODATA)
break;
- if (desc_dma & 0x1) {
+ if (cppi5_desc_is_tdcm(desc_dma)) {
if (atomic_dec_and_test(&common->tdown_cnt))
complete(&common->tdown_complete);
break;
}
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, dev, desc_tx);
-
+ skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
+ total_bytes = skb->len;
ndev = skb->dev;
+ napi_consume_skb(skb, budget);
+ num_tx++;
- am65_cpts_tx_timestamp(common->cpts, skb);
+ netif_txq = netdev_get_tx_queue(ndev, chn);
- ndev_priv = netdev_priv(ndev);
- stats = this_cpu_ptr(ndev_priv->stats);
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
+ netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
+ am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
+ }
+
+ dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
+
+ return num_tx;
+}
+
+static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
+ int chn, unsigned int budget)
+{
+ struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ unsigned int total_bytes = 0;
+ struct net_device *ndev;
+ struct sk_buff *skb;
+ dma_addr_t desc_dma;
+ int res, num_tx = 0;
+
+ tx_chn = &common->tx_chns[chn];
+
+ while (true) {
+ res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ if (res == -ENODATA)
+ break;
+
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ if (atomic_dec_and_test(&common->tdown_cnt))
+ complete(&common->tdown_complete);
+ break;
+ }
+
+ skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
+
+ ndev = skb->dev;
total_bytes += skb->len;
napi_consume_skb(skb, budget);
num_tx++;
@@ -970,19 +1044,8 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
- if (netif_tx_queue_stopped(netif_txq)) {
- /* Check whether the queue is stopped due to stalled tx dma,
- * if the queue is stopped then wake the queue as
- * we have free desc for tx
- */
- __netif_tx_lock(netif_txq, smp_processor_id());
- if (netif_running(ndev) &&
- (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
- MAX_SKB_FRAGS))
- netif_tx_wake_queue(netif_txq);
+ am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
- __netif_tx_unlock(netif_txq);
- }
dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
return num_tx;
@@ -993,8 +1056,11 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
int num_tx;
- num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id,
- budget);
+ if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
+ num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id, budget);
+ else
+ num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, budget);
+
num_tx = min(num_tx, budget);
if (num_tx < budget) {
napi_complete(napi_tx);
@@ -1139,7 +1205,13 @@ done_tx:
cppi5_hdesc_set_pktlen(first_desc, pkt_len);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
- ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (AM65_CPSW_IS_CPSW2G(common)) {
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ } else {
+ spin_lock_bh(&tx_chn->lock);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ spin_unlock_bh(&tx_chn->lock);
+ }
if (ret) {
dev_err(dev, "can't push desc %d\n", ret);
/* inform bql */
@@ -1369,32 +1441,7 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
stats->tx_dropped = dev->stats.tx_dropped;
}
-static int am65_cpsw_nuss_ndo_slave_set_features(struct net_device *ndev,
- netdev_features_t features)
-{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- netdev_features_t changes = features ^ ndev->features;
- struct am65_cpsw_host *host_p;
-
- host_p = am65_common_get_host(common);
-
- if (changes & NETIF_F_HW_CSUM) {
- bool enable = !!(features & NETIF_F_HW_CSUM);
-
- dev_info(common->dev, "Turn %s tx-checksum-ip-generic\n",
- enable ? "ON" : "OFF");
- if (enable)
- writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN,
- host_p->port_base + AM65_CPSW_P0_REG_CTL);
- else
- writel(0,
- host_p->port_base + AM65_CPSW_P0_REG_CTL);
- }
-
- return 0;
-}
-
-static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = {
+static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_open = am65_cpsw_nuss_ndo_slave_open,
.ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
.ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit,
@@ -1406,7 +1453,6 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = {
.ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid,
.ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
.ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
- .ndo_set_features = am65_cpsw_nuss_ndo_slave_set_features,
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
};
@@ -1417,7 +1463,6 @@ static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
if (!port->disabled)
return;
- common->disabled_ports_mask |= BIT(port->port_id);
cpsw_ale_control_set(common->ale, port->port_id,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
@@ -1496,6 +1541,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
snprintf(tx_chn->tx_chn_name,
sizeof(tx_chn->tx_chn_name), "tx%d", i);
+ spin_lock_init(&tx_chn->lock);
tx_chn->common = common;
tx_chn->id = i;
tx_chn->descs_num = max_desc_num;
@@ -1515,9 +1561,8 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
tx_chn->tx_chn_name,
&tx_cfg);
if (IS_ERR(tx_chn->tx_chn)) {
- ret = PTR_ERR(tx_chn->tx_chn);
- dev_err(dev, "Failed to request tx dma channel %d\n",
- ret);
+ ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn),
+ "Failed to request tx dma channel\n");
goto err;
}
@@ -1588,8 +1633,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
if (IS_ERR(rx_chn->rx_chn)) {
- ret = PTR_ERR(rx_chn->rx_chn);
- dev_err(dev, "Failed to request rx dma channel %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn),
+ "Failed to request rx dma channel\n");
goto err;
}
@@ -1606,7 +1651,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
};
struct k3_ring_cfg fdqring_cfg = {
.elm_size = K3_RINGACC_RING_ELSIZE_8,
- .mode = K3_RINGACC_RING_MODE_MESSAGE,
.flags = K3_RINGACC_RING_SHARED,
};
struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
@@ -1620,6 +1664,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
rx_flow_cfg.rx_cfg.size = max_desc_num;
rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
i, &rx_flow_cfg);
@@ -1725,6 +1770,13 @@ static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
return ret;
}
common->cpts = cpts;
+ /* Forbid PM runtime if CPTS is running.
+ * K3 CPSWxG modules may completely lose context during ON->OFF
+ * transitions depending on integration.
+ * AM65x/J721E MCU CPSW2G: false
+ * J721E MAIN_CPSW9G: true
+ */
+ pm_runtime_forbid(dev);
return 0;
}
@@ -1778,8 +1830,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
return PTR_ERR(port->slave.mac_sl);
port->disabled = !of_device_is_available(port_np);
- if (port->disabled)
+ if (port->disabled) {
+ common->disabled_ports_mask |= BIT(port->port_id);
continue;
+ }
port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL);
if (IS_ERR(port->slave.ifphy)) {
@@ -1795,12 +1849,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
/* get phy/link info */
if (of_phy_is_fixed_link(port_np)) {
ret = of_phy_register_fixed_link(port_np);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "%pOF failed to register fixed-link phy: %d\n",
- port_np, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to register fixed-link phy %pOF\n",
+ port_np);
port->slave.phy_node = of_node_get(port_np);
} else {
port->slave.phy_node =
@@ -1833,6 +1885,12 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
}
of_node_put(node);
+ /* is there at least one ext.port */
+ if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) {
+ dev_err(dev, "No Ext. port are available\n");
+ return -ENODEV;
+ }
+
return 0;
}
@@ -1843,14 +1901,18 @@ static void am65_cpsw_pcpu_stats_free(void *data)
free_percpu(stats);
}
-static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
+static int
+am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
{
struct am65_cpsw_ndev_priv *ndev_priv;
struct device *dev = common->dev;
struct am65_cpsw_port *port;
int ret;
- port = am65_common_get_port(common, 1);
+ port = &common->ports[port_idx];
+
+ if (port->disabled)
+ return 0;
/* alloc netdev */
port->ndev = devm_alloc_etherdev_mqs(common->dev,
@@ -1879,7 +1941,7 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
port->ndev->features = port->ndev->hw_features |
NETIF_F_HW_VLAN_CTAG_FILTER;
port->ndev->vlan_features |= NETIF_F_SG;
- port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops_2g;
+ port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
/* Disable TX checksum offload by default due to HW bug */
@@ -1892,29 +1954,41 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
ndev_priv->stats);
- if (ret) {
- dev_err(dev, "Failed to add percpu stat free action %d\n", ret);
- return ret;
+ if (ret)
+ dev_err(dev, "failed to add percpu stat free action %d\n", ret);
+
+ if (!common->dma_ndev)
+ common->dma_ndev = port->ndev;
+
+ return ret;
+}
+
+static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ ret = am65_cpsw_nuss_init_port_ndev(common, i);
+ if (ret)
+ return ret;
}
- netif_napi_add(port->ndev, &common->napi_rx,
+ netif_napi_add(common->dma_ndev, &common->napi_rx,
am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT);
return ret;
}
-static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common)
+static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
- struct am65_cpsw_port *port;
int i, ret = 0;
- port = am65_common_get_port(common, 1);
-
for (i = 0; i < common->tx_ch_num; i++) {
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
- netif_tx_napi_add(port->ndev, &tx_chn->napi_tx,
+ netif_tx_napi_add(common->dma_ndev, &tx_chn->napi_tx,
am65_cpsw_nuss_tx_poll, NAPI_POLL_WEIGHT);
ret = devm_request_irq(dev, tx_chn->irq,
@@ -1932,16 +2006,27 @@ err:
return ret;
}
-static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common)
+static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ if (port->ndev)
+ unregister_netdev(port->ndev);
+ }
+}
+
+static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
struct am65_cpsw_port *port;
- int ret = 0;
+ int ret = 0, i;
- port = am65_common_get_port(common, 1);
- ret = am65_cpsw_nuss_ndev_add_napi_2g(common);
+ ret = am65_cpsw_nuss_ndev_add_tx_napi(common);
if (ret)
- goto err;
+ return ret;
ret = devm_request_irq(dev, common->rx_chns.irq,
am65_cpsw_nuss_rx_irq,
@@ -1949,17 +2034,31 @@ static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common)
if (ret) {
dev_err(dev, "failure requesting rx irq %u, %d\n",
common->rx_chns.irq, ret);
- goto err;
+ return ret;
+ }
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+
+ if (!port->ndev)
+ continue;
+
+ ret = register_netdev(port->ndev);
+ if (ret) {
+ dev_err(dev, "error registering slave net device%i %d\n",
+ i, ret);
+ goto err_cleanup_ndev;
+ }
}
- ret = register_netdev(port->ndev);
- if (ret)
- dev_err(dev, "error registering slave net device %d\n", ret);
/* can't auto unregister ndev using devm_add_action() due to
* devres release sequence in DD core for DMA
*/
-err:
+ return 0;
+
+err_cleanup_ndev:
+ am65_cpsw_nuss_cleanup_ndev(common);
return ret;
}
@@ -1972,19 +2071,7 @@ int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
if (ret)
return ret;
- return am65_cpsw_nuss_ndev_add_napi_2g(common);
-}
-
-static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
-{
- struct am65_cpsw_port *port;
- int i;
-
- for (i = 0; i < common->port_num; i++) {
- port = &common->ports[i];
- if (port->ndev)
- unregister_netdev(port->ndev);
- }
+ return am65_cpsw_nuss_ndev_add_tx_napi(common);
}
struct am65_cpsw_soc_pdata {
@@ -2005,10 +2092,14 @@ static const struct soc_device_attribute am65_cpsw_socinfo[] = {
static const struct am65_cpsw_pdata am65x_sr1_0 = {
.quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
+ .ale_dev_id = "am65x-cpsw2g",
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
};
static const struct am65_cpsw_pdata j721e_pdata = {
.quirks = 0,
+ .ale_dev_id = "am65x-cpsw2g",
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
};
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
@@ -2068,9 +2159,6 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return -ENOENT;
of_node_put(node);
- if (common->port_num != 1)
- return -EOPNOTSUPP;
-
common->rx_flow_id_base = -1;
init_completion(&common->tdown_complete);
common->tx_ch_num = 1;
@@ -2089,13 +2177,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
return -ENOMEM;
clk = devm_clk_get(dev, "fck");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
-
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "error getting fck clock %d\n", ret);
- return ret;
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n");
common->bus_freq = clk_get_rate(clk);
pm_runtime_enable(dev);
@@ -2145,7 +2228,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
ale_params.ale_ports = common->port_num + 1;
ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
- ale_params.dev_id = "am65x-cpsw2g";
+ ale_params.dev_id = common->pdata.ale_dev_id;
ale_params.bus_freq = common->bus_freq;
common->ale = cpsw_ale_create(&ale_params);
@@ -2165,11 +2248,11 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
dev_set_drvdata(dev, common);
- ret = am65_cpsw_nuss_init_ndev_2g(common);
+ ret = am65_cpsw_nuss_init_ndevs(common);
if (ret)
goto err_of_clear;
- ret = am65_cpsw_nuss_ndev_reg_2g(common);
+ ret = am65_cpsw_nuss_register_ndevs(common);
if (ret)
goto err_of_clear;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 993e1d4d3222..02aed4c0ceba 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
#include "am65-cpsw-qos.h"
struct am65_cpts;
@@ -59,6 +60,7 @@ struct am65_cpsw_tx_chn {
struct am65_cpsw_common *common;
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_tx_channel *tx_chn;
+ spinlock_t lock; /* protect TX rings in multi-port mode */
int irq;
u32 id;
u32 descs_num;
@@ -77,6 +79,8 @@ struct am65_cpsw_rx_chn {
struct am65_cpsw_pdata {
u32 quirks;
+ enum k3_ring_mode fdqring_mode;
+ const char *ale_dev_id;
};
struct am65_cpsw_common {
@@ -91,6 +95,7 @@ struct am65_cpsw_common {
struct am65_cpsw_host host;
struct am65_cpsw_port *ports;
u32 disabled_ports_mask;
+ struct net_device *dma_ndev;
int usage_count; /* number of opened ports */
struct cpsw_ale *ale;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index a6a455c32628..cdc308a2aa3e 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -634,8 +634,8 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag,
return 0;
}
-static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
- u16 vid, int port_mask)
+static void cpsw_ale_vlan_del_modify_int(struct cpsw_ale *ale, u32 *ale_entry,
+ u16 vid, int port_mask)
{
int reg_mcast, unreg_mcast;
int members, untag;
@@ -644,6 +644,7 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
ALE_ENT_VID_MEMBER_LIST);
members &= ~port_mask;
if (!members) {
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0);
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
return;
}
@@ -673,7 +674,7 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
ALE_ENT_VID_MEMBER_LIST, members);
}
-int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
+int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
@@ -684,11 +685,39 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
cpsw_ale_read(ale, idx, ale_entry);
- if (port_mask) {
- cpsw_ale_del_vlan_modify(ale, ale_entry, vid, port_mask);
- } else {
+ cpsw_ale_vlan_del_modify_int(ale, ale_entry, vid, port_mask);
+ cpsw_ale_write(ale, idx, ale_entry);
+
+ return 0;
+}
+
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int members, idx;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx < 0)
+ return -ENOENT;
+
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ /* if !port_mask - force remove VLAN (legacy).
+ * Check if there are other VLAN members ports
+ * if no - remove VLAN.
+ * if yes it means same VLAN was added to >1 port in multi port mode, so
+ * remove port_mask ports from VLAN ALE entry excluding Host port.
+ */
+ members = cpsw_ale_vlan_get_fld(ale, ale_entry, ALE_ENT_VID_MEMBER_LIST);
+ members &= ~port_mask;
+
+ if (!port_mask || !members) {
+ /* last port or force remove - remove VLAN */
cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0);
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ } else {
+ port_mask &= ~ALE_PORT_HOST;
+ cpsw_ale_vlan_del_modify_int(ale, ale_entry, vid, port_mask);
}
cpsw_ale_write(ale, idx, ale_entry);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 5e4a69662c5f..13fe47687fde 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -134,6 +134,7 @@ static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid)
int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
int untag_mask, int reg_mcast, int unreg_mcast);
+int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask);
void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
bool add);
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
index 985a929bb957..29747da5c514 100644
--- a/drivers/net/ethernet/ti/cpsw_switchdev.c
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -227,7 +227,7 @@ static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid,
else
port_mask = BIT(priv->emac_port);
- ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask);
+ ret = cpsw_ale_vlan_del_modify(cpsw->ale, vid, port_mask);
if (ret != 0)
return ret;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 267c080ee084..0b2ce4bdc2c3 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -186,6 +186,7 @@ static void tlan_reset_adapter(struct net_device *);
static void tlan_finish_reset(struct net_device *);
static void tlan_set_mac(struct net_device *, int areg, char *mac);
+static void __tlan_phy_print(struct net_device *);
static void tlan_phy_print(struct net_device *);
static void tlan_phy_detect(struct net_device *);
static void tlan_phy_power_down(struct net_device *);
@@ -201,9 +202,11 @@ static void tlan_phy_finish_auto_neg(struct net_device *);
static int tlan_phy_dp83840a_check(struct net_device *);
*/
-static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static bool __tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static void tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
static void tlan_mii_send_data(u16, u32, unsigned);
static void tlan_mii_sync(u16);
+static void __tlan_mii_write_reg(struct net_device *, u16, u16, u16);
static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
static void tlan_ee_send_start(u16);
@@ -242,23 +245,20 @@ static u32
tlan_handle_rx_eoc
};
-static inline void
+static void
tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
{
struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
- if (!in_irq())
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
if (priv->timer.function != NULL &&
priv->timer_type != TLAN_TIMER_ACTIVITY) {
- if (!in_irq())
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
return;
}
priv->timer.function = tlan_timer;
- if (!in_irq())
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
priv->timer_set_at = jiffies;
priv->timer_type = type;
@@ -1703,22 +1703,22 @@ static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
dev->name, (unsigned) net_sts);
}
if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
- tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
- tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ __tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
+ __tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
if (!(tlphy_sts & TLAN_TS_POLOK) &&
!(tlphy_ctl & TLAN_TC_SWAPOL)) {
tlphy_ctl |= TLAN_TC_SWAPOL;
- tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
- tlphy_ctl);
+ __tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
} else if ((tlphy_sts & TLAN_TS_POLOK) &&
(tlphy_ctl & TLAN_TC_SWAPOL)) {
tlphy_ctl &= ~TLAN_TC_SWAPOL;
- tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
- tlphy_ctl);
+ __tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
}
if (debug)
- tlan_phy_print(dev);
+ __tlan_phy_print(dev);
}
}
@@ -2379,7 +2379,7 @@ ThunderLAN driver PHY layer routines
/*********************************************************************
- * tlan_phy_print
+ * __tlan_phy_print
*
* Returns:
* Nothing
@@ -2391,11 +2391,13 @@ ThunderLAN driver PHY layer routines
*
********************************************************************/
-static void tlan_phy_print(struct net_device *dev)
+static void __tlan_phy_print(struct net_device *dev)
{
struct tlan_priv *priv = netdev_priv(dev);
u16 i, data0, data1, data2, data3, phy;
+ lockdep_assert_held(&priv->lock);
+
phy = priv->phy[priv->phy_num];
if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
@@ -2404,10 +2406,10 @@ static void tlan_phy_print(struct net_device *dev)
netdev_info(dev, "PHY 0x%02x\n", phy);
pr_info(" Off. +0 +1 +2 +3\n");
for (i = 0; i < 0x20; i += 4) {
- tlan_mii_read_reg(dev, phy, i, &data0);
- tlan_mii_read_reg(dev, phy, i + 1, &data1);
- tlan_mii_read_reg(dev, phy, i + 2, &data2);
- tlan_mii_read_reg(dev, phy, i + 3, &data3);
+ __tlan_mii_read_reg(dev, phy, i, &data0);
+ __tlan_mii_read_reg(dev, phy, i + 1, &data1);
+ __tlan_mii_read_reg(dev, phy, i + 2, &data2);
+ __tlan_mii_read_reg(dev, phy, i + 3, &data3);
pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
i, data0, data1, data2, data3);
}
@@ -2417,7 +2419,15 @@ static void tlan_phy_print(struct net_device *dev)
}
+static void tlan_phy_print(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ spin_lock_irqsave(&priv->lock, flags);
+ __tlan_phy_print(dev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
/*********************************************************************
@@ -2795,7 +2805,7 @@ these routines are based on the information in chap. 2 of the
/***************************************************************
- * tlan_mii_read_reg
+ * __tlan_mii_read_reg
*
* Returns:
* false if ack received ok
@@ -2819,7 +2829,7 @@ these routines are based on the information in chap. 2 of the
**************************************************************/
static bool
-tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
+__tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
{
u8 nack;
u16 sio, tmp;
@@ -2827,15 +2837,13 @@ tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
bool err;
int minten;
struct tlan_priv *priv = netdev_priv(dev);
- unsigned long flags = 0;
+
+ lockdep_assert_held(&priv->lock);
err = false;
outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
- if (!in_irq())
- spin_lock_irqsave(&priv->lock, flags);
-
tlan_mii_sync(dev->base_addr);
minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
@@ -2881,15 +2889,19 @@ tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
*val = tmp;
- if (!in_irq())
- spin_unlock_irqrestore(&priv->lock, flags);
-
return err;
-
}
+static void tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg,
+ u16 *val)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags;
-
+ spin_lock_irqsave(&priv->lock, flags);
+ __tlan_mii_read_reg(dev, phy, reg, val);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
/***************************************************************
* tlan_mii_send_data
@@ -2971,7 +2983,7 @@ static void tlan_mii_sync(u16 base_port)
/***************************************************************
- * tlan_mii_write_reg
+ * __tlan_mii_write_reg
*
* Returns:
* Nothing
@@ -2991,19 +3003,17 @@ static void tlan_mii_sync(u16 base_port)
**************************************************************/
static void
-tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
+__tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
{
u16 sio;
int minten;
- unsigned long flags = 0;
struct tlan_priv *priv = netdev_priv(dev);
+ lockdep_assert_held(&priv->lock);
+
outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
- if (!in_irq())
- spin_lock_irqsave(&priv->lock, flags);
-
tlan_mii_sync(dev->base_addr);
minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
@@ -3024,12 +3034,18 @@ tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
if (minten)
tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
- if (!in_irq())
- spin_unlock_irqrestore(&priv->lock, flags);
-
}
+static void
+tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ spin_lock_irqsave(&priv->lock, flags);
+ __tlan_mii_write_reg(dev, phy, reg, val);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
/*****************************************************************************
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index d0d0d4fe9d40..3b2137d1f4c6 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_XILINX
config XILINX_EMACLITE
tristate "Xilinx 10/100 Ethernet Lite support"
- depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS
+ depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || COMPILE_TEST
select PHYLIB
help
This driver supports the 10/100 Ethernet Lite from Xilinx.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index f34c7903ff52..a03c3ca1b28d 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -378,6 +378,7 @@ struct axidma_bd {
* @dev: Pointer to device structure
* @phy_node: Pointer to device node structure
* @mii_bus: Pointer to MII bus structure
+ * @mii_clk_div: MII bus clock divider value
* @regs_start: Resource start for axienet device addresses
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
@@ -419,11 +420,15 @@ struct axienet_local {
struct phylink *phylink;
struct phylink_config phylink_config;
+ /* Reference to PCS/PMA PHY if used */
+ struct mdio_device *pcs_phy;
+
/* Clock for AXI bus */
struct clk *clk;
/* MDIO bus data */
struct mii_bus *mii_bus; /* MII bus reference */
+ u8 mii_clk_div; /* MII bus clock divider value */
/* IO registers, dma functions and IRQs */
resource_size_t regs_start;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 9aafd3ecdaa4..6fea980acf64 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1049,20 +1049,13 @@ static int axienet_open(struct net_device *ndev)
dev_dbg(&ndev->dev, "axienet_open()\n");
- /* Disable the MDIO interface till Axi Ethernet Reset is completed.
- * When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting
- * and re-enabled afterwards.
+ /* When we do an Axi Ethernet reset, it resets the complete core
+ * including the MDIO. MDIO must be disabled before resetting.
* Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
mutex_lock(&lp->mii_bus->mdio_lock);
- axienet_mdio_disable(lp);
ret = axienet_device_reset(ndev);
- if (ret == 0)
- ret = axienet_mdio_enable(lp);
mutex_unlock(&lp->mii_bus->mdio_lock);
- if (ret < 0)
- return ret;
ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
if (ret) {
@@ -1156,9 +1149,7 @@ static int axienet_stop(struct net_device *ndev)
/* Do a reset to ensure DMA is really stopped */
mutex_lock(&lp->mii_bus->mdio_lock);
- axienet_mdio_disable(lp);
__axienet_device_reset(lp);
- axienet_mdio_enable(lp);
mutex_unlock(&lp->mii_bus->mdio_lock);
cancel_work_sync(&lp->dma_err_task);
@@ -1517,10 +1508,27 @@ static void axienet_validate(struct phylink_config *config,
phylink_set(mask, Asym_Pause);
phylink_set(mask, Pause);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 1000baseT_Full);
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ break;
+ fallthrough;
+ case PHY_INTERFACE_MODE_MII:
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 10baseT_Full);
+ default:
+ break;
+ }
bitmap_and(supported, supported, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -1533,38 +1541,46 @@ static void axienet_mac_pcs_get_state(struct phylink_config *config,
{
struct net_device *ndev = to_net_dev(config->dev);
struct axienet_local *lp = netdev_priv(ndev);
- u32 emmc_reg, fcc_reg;
-
- state->interface = lp->phy_mode;
- emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
- if (emmc_reg & XAE_EMMC_LINKSPD_1000)
- state->speed = SPEED_1000;
- else if (emmc_reg & XAE_EMMC_LINKSPD_100)
- state->speed = SPEED_100;
- else
- state->speed = SPEED_10;
-
- state->pause = 0;
- fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
- if (fcc_reg & XAE_FCC_FCTX_MASK)
- state->pause |= MLO_PAUSE_TX;
- if (fcc_reg & XAE_FCC_FCRX_MASK)
- state->pause |= MLO_PAUSE_RX;
-
- state->an_complete = 0;
- state->duplex = 1;
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ phylink_mii_c22_pcs_get_state(lp->pcs_phy, state);
+ break;
+ default:
+ break;
+ }
}
static void axienet_mac_an_restart(struct phylink_config *config)
{
- /* Unsupported, do nothing */
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ phylink_mii_c22_pcs_an_restart(lp->pcs_phy);
}
static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
- /* nothing meaningful to do */
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ int ret;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ ret = phylink_mii_c22_pcs_config(lp->pcs_phy, mode,
+ state->interface,
+ state->advertising);
+ if (ret < 0)
+ netdev_warn(ndev, "Failed to configure PCS: %d\n",
+ ret);
+ break;
+
+ default:
+ break;
+ }
}
static void axienet_mac_link_down(struct phylink_config *config,
@@ -1644,16 +1660,12 @@ static void axienet_dma_err_handler(struct work_struct *work)
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- /* Disable the MDIO interface till Axi Ethernet Reset is completed.
- * When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting
- * and re-enabled afterwards.
+ /* When we do an Axi Ethernet reset, it resets the complete core
+ * including the MDIO. MDIO must be disabled before resetting.
* Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
mutex_lock(&lp->mii_bus->mdio_lock);
- axienet_mdio_disable(lp);
__axienet_device_reset(lp);
- axienet_mdio_enable(lp);
mutex_unlock(&lp->mii_bus->mdio_lock);
for (i = 0; i < lp->tx_bd_num; i++) {
@@ -1999,6 +2011,20 @@ static int axienet_probe(struct platform_device *pdev)
dev_warn(&pdev->dev,
"error registering MDIO bus: %d\n", ret);
}
+ if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
+ if (!lp->phy_node) {
+ dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
+ ret = -EINVAL;
+ goto free_netdev;
+ }
+ lp->pcs_phy = of_mdio_find_device(lp->phy_node);
+ if (!lp->pcs_phy) {
+ ret = -EPROBE_DEFER;
+ goto free_netdev;
+ }
+ lp->phylink_config.pcs_poll = true;
+ }
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
@@ -2036,6 +2062,9 @@ static int axienet_remove(struct platform_device *pdev)
if (lp->phylink)
phylink_destroy(lp->phylink);
+ if (lp->pcs_phy)
+ put_device(&lp->pcs_phy->dev);
+
axienet_mdio_teardown(lp);
clk_disable_unprepare(lp->clk);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 435ed308d990..9c014cee34b2 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -30,6 +30,23 @@ static int axienet_mdio_wait_until_ready(struct axienet_local *lp)
1, 20000);
}
+/* Enable the MDIO MDC. Called prior to a read/write operation */
+static void axienet_mdio_mdc_enable(struct axienet_local *lp)
+{
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET,
+ ((u32)lp->mii_clk_div | XAE_MDIO_MC_MDIOEN_MASK));
+}
+
+/* Disable the MDIO MDC. Called after a read/write operation*/
+static void axienet_mdio_mdc_disable(struct axienet_local *lp)
+{
+ u32 mc_reg;
+
+ mc_reg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET,
+ (mc_reg & ~XAE_MDIO_MC_MDIOEN_MASK));
+}
+
/**
* axienet_mdio_read - MDIO interface read function
* @bus: Pointer to mii bus structure
@@ -48,9 +65,13 @@ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
int ret;
struct axienet_local *lp = bus->priv;
+ axienet_mdio_mdc_enable(lp);
+
ret = axienet_mdio_wait_until_ready(lp);
- if (ret < 0)
+ if (ret < 0) {
+ axienet_mdio_mdc_disable(lp);
return ret;
+ }
axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
(((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) &
@@ -61,14 +82,17 @@ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
XAE_MDIO_MCR_OP_READ_MASK));
ret = axienet_mdio_wait_until_ready(lp);
- if (ret < 0)
+ if (ret < 0) {
+ axienet_mdio_mdc_disable(lp);
return ret;
+ }
rc = axienet_ior(lp, XAE_MDIO_MRD_OFFSET) & 0x0000FFFF;
dev_dbg(lp->dev, "axienet_mdio_read(phy_id=%i, reg=%x) == %x\n",
phy_id, reg, rc);
+ axienet_mdio_mdc_disable(lp);
return rc;
}
@@ -94,9 +118,13 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
dev_dbg(lp->dev, "axienet_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
phy_id, reg, val);
+ axienet_mdio_mdc_enable(lp);
+
ret = axienet_mdio_wait_until_ready(lp);
- if (ret < 0)
+ if (ret < 0) {
+ axienet_mdio_mdc_disable(lp);
return ret;
+ }
axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32) val);
axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
@@ -108,8 +136,11 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
XAE_MDIO_MCR_OP_WRITE_MASK));
ret = axienet_mdio_wait_until_ready(lp);
- if (ret < 0)
+ if (ret < 0) {
+ axienet_mdio_mdc_disable(lp);
return ret;
+ }
+ axienet_mdio_mdc_disable(lp);
return 0;
}
@@ -124,7 +155,9 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
**/
int axienet_mdio_enable(struct axienet_local *lp)
{
- u32 clk_div, host_clock;
+ u32 host_clock;
+
+ lp->mii_clk_div = 0;
if (lp->clk) {
host_clock = clk_get_rate(lp->clk);
@@ -176,19 +209,19 @@ int axienet_mdio_enable(struct axienet_local *lp)
* "clock-frequency" from the CPU
*/
- clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
+ lp->mii_clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
/* If there is any remainder from the division of
* fHOST / (MAX_MDIO_FREQ * 2), then we need to add
* 1 to the clock divisor or we will surely be above 2.5 MHz
*/
if (host_clock % (MAX_MDIO_FREQ * 2))
- clk_div++;
+ lp->mii_clk_div++;
netdev_dbg(lp->ndev,
"Setting MDIO clock divisor to %u/%u Hz host clock.\n",
- clk_div, host_clock);
+ lp->mii_clk_div, host_clock);
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, clk_div | XAE_MDIO_MC_MDIOEN_MASK);
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, lp->mii_clk_div | XAE_MDIO_MC_MDIOEN_MASK);
return axienet_mdio_wait_until_ready(lp);
}
@@ -211,8 +244,8 @@ void axienet_mdio_disable(struct axienet_local *lp)
* Return: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
* mdiobus_alloc (to allocate memory for mii bus structure) fails.
*
- * Sets up the MDIO interface by initializing the MDIO clock and enabling the
- * MDIO interface in hardware. Register the MDIO interface.
+ * Sets up the MDIO interface by initializing the MDIO clock.
+ * Register the MDIO interface.
**/
int axienet_mdio_setup(struct axienet_local *lp)
{
@@ -246,6 +279,7 @@ int axienet_mdio_setup(struct axienet_local *lp)
lp->mii_bus = NULL;
return ret;
}
+ axienet_mdio_mdc_disable(lp);
return 0;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0c26f5bcc523..008b9a40faad 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -97,7 +97,7 @@
#define ALIGNMENT 4
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
-#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32)adr)) % ALIGNMENT)
+#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((uintptr_t)adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
@@ -338,7 +338,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
* if it is configured in HW
*/
- addr = (void __iomem __force *)((u32 __force)addr ^
+ addr = (void __iomem __force *)((uintptr_t __force)addr ^
XEL_BUFFER_OFFSET);
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
@@ -399,8 +399,9 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* will correct on subsequent calls
*/
if (drvdata->rx_ping_pong != 0)
- addr = (void __iomem __force *)((u32 __force)addr ^
- XEL_BUFFER_OFFSET);
+ addr = (void __iomem __force *)
+ ((uintptr_t __force)addr ^
+ XEL_BUFFER_OFFSET);
else
return 0; /* No data was available */
@@ -518,6 +519,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address)
/**
* xemaclite_tx_timeout - Callback for Tx Timeout
* @dev: Pointer to the network device
+ * @txqueue: Unused
*
* This function is called when Tx time out occurs for Emaclite device.
*/
@@ -1191,9 +1193,9 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
}
dev_info(dev,
- "Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n",
+ "Xilinx EmacLite at 0x%08X mapped to 0x%08lX, irq=%d\n",
(unsigned int __force)ndev->mem_start,
- (unsigned int __force)lp->base_addr, ndev->irq);
+ (unsigned long __force)lp->base_addr, ndev->irq);
return 0;
error:
diff --git a/drivers/net/fddi/skfp/drvfbi.c b/drivers/net/fddi/skfp/drvfbi.c
index cc9ac572423e..e9b9614639cd 100644
--- a/drivers/net/fddi/skfp/drvfbi.c
+++ b/drivers/net/fddi/skfp/drvfbi.c
@@ -22,10 +22,6 @@
#include <linux/bitrev.h>
#include <linux/pci.h>
-#ifndef lint
-static const char ID_sccs[] = "@(#)drvfbi.c 1.63 99/02/11 (C) SK " ;
-#endif
-
/*
* PCM active state
*/
diff --git a/drivers/net/fddi/skfp/ecm.c b/drivers/net/fddi/skfp/ecm.c
index 15c503f43727..2f5f5f26bb43 100644
--- a/drivers/net/fddi/skfp/ecm.c
+++ b/drivers/net/fddi/skfp/ecm.c
@@ -40,10 +40,6 @@
#define KERNEL
#include "h/smtstate.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)ecm.c 2.7 99/08/05 (C) SK " ;
-#endif
-
/*
* FSM Macros
*/
@@ -147,10 +143,11 @@ static void ecm_fsm(struct s_smc *smc, int cmd)
/* For AIX event notification: */
/* Is a disconnect command remotely issued ? */
if (cmd == EC_DISCONNECT &&
- smc->mib.fddiSMTRemoteDisconnectFlag == TRUE)
+ smc->mib.fddiSMTRemoteDisconnectFlag == TRUE) {
AIX_EVENT (smc, (u_long) CIO_HARD_FAIL, (u_long)
FDDI_REMOTE_DISCONNECT, smt_get_event_word(smc),
smt_get_error_word(smc) );
+ }
/*jd 05-Aug-1999 Bug #10419 "Port Disconnect fails at Dup MAc Cond."*/
if (cmd == EC_CONNECT) {
diff --git a/drivers/net/fddi/skfp/ess.c b/drivers/net/fddi/skfp/ess.c
index afd5ca39f43b..35110c0c00a0 100644
--- a/drivers/net/fddi/skfp/ess.c
+++ b/drivers/net/fddi/skfp/ess.c
@@ -40,7 +40,6 @@
#ifdef ESS
#ifndef lint
-static const char ID_sccs[] = "@(#)ess.c 1.10 96/02/23 (C) SK" ;
#define LINT_USE(x)
#else
#define LINT_USE(x) (x)=(x)
diff --git a/drivers/net/fddi/skfp/hwt.c b/drivers/net/fddi/skfp/hwt.c
index 32804ed049cd..5577b8e14b73 100644
--- a/drivers/net/fddi/skfp/hwt.c
+++ b/drivers/net/fddi/skfp/hwt.c
@@ -27,10 +27,6 @@
#include "h/fddi.h"
#include "h/smc.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)hwt.c 1.13 97/04/23 (C) SK " ;
-#endif
-
/*
* Prototypes of local functions.
*/
diff --git a/drivers/net/fddi/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c
index 554cde8d6073..90e8df6d9a88 100644
--- a/drivers/net/fddi/skfp/pcmplc.c
+++ b/drivers/net/fddi/skfp/pcmplc.c
@@ -45,10 +45,6 @@
#define KERNEL
#include "h/smtstate.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)pcmplc.c 2.55 99/08/05 (C) SK " ;
-#endif
-
#ifdef FDDI_MIB
extern int snmp_fddi_trap(
#ifdef ANSIC
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 14f10b4cab0f..563fb7f0b327 100644
--- a/drivers/net/fddi/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
@@ -24,10 +24,6 @@
#ifndef SLIM_SMT
-#ifndef lint
-static const char ID_sccs[] = "@(#)pmf.c 1.37 97/08/04 (C) SK " ;
-#endif
-
static int smt_authorize(struct s_smc *smc, struct smt_header *sm);
static int smt_check_set_count(struct s_smc *smc, struct smt_header *sm);
static const struct s_p_tab* smt_get_ptab(u_short para);
diff --git a/drivers/net/fddi/skfp/queue.c b/drivers/net/fddi/skfp/queue.c
index ba022f723bd7..abe155ad777f 100644
--- a/drivers/net/fddi/skfp/queue.c
+++ b/drivers/net/fddi/skfp/queue.c
@@ -18,10 +18,6 @@
#include "h/fddi.h"
#include "h/smc.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)queue.c 2.9 97/08/04 (C) SK " ;
-#endif
-
#define PRINTF(a,b,c)
/*
diff --git a/drivers/net/fddi/skfp/rmt.c b/drivers/net/fddi/skfp/rmt.c
index c0e62c25332c..37a89675dbeb 100644
--- a/drivers/net/fddi/skfp/rmt.c
+++ b/drivers/net/fddi/skfp/rmt.c
@@ -45,10 +45,6 @@
#define KERNEL
#include "h/smtstate.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)rmt.c 2.13 99/07/02 (C) SK " ;
-#endif
-
/*
* FSM Macros
*/
diff --git a/drivers/net/fddi/skfp/smtdef.c b/drivers/net/fddi/skfp/smtdef.c
index 0bebde3c6cb9..99cc9a549bd7 100644
--- a/drivers/net/fddi/skfp/smtdef.c
+++ b/drivers/net/fddi/skfp/smtdef.c
@@ -22,10 +22,6 @@
#define OEM_USER_DATA "SK-NET FDDI V2.0 Userdata"
#endif
-#ifndef lint
-static const char ID_sccs[] = "@(#)smtdef.c 2.53 99/08/11 (C) SK " ;
-#endif
-
/*
* defaults
*/
diff --git a/drivers/net/fddi/skfp/smtinit.c b/drivers/net/fddi/skfp/smtinit.c
index 01f6c75cbea8..c9898c83fe30 100644
--- a/drivers/net/fddi/skfp/smtinit.c
+++ b/drivers/net/fddi/skfp/smtinit.c
@@ -19,10 +19,6 @@
#include "h/fddi.h"
#include "h/smc.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)smtinit.c 1.15 97/05/06 (C) SK " ;
-#endif
-
void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
/* define global debug variable */
diff --git a/drivers/net/fddi/skfp/smttimer.c b/drivers/net/fddi/skfp/smttimer.c
index 9d549bb14f07..5f3e5d7bf415 100644
--- a/drivers/net/fddi/skfp/smttimer.c
+++ b/drivers/net/fddi/skfp/smttimer.c
@@ -18,10 +18,6 @@
#include "h/fddi.h"
#include "h/smc.h"
-#ifndef lint
-static const char ID_sccs[] = "@(#)smttimer.c 2.4 97/08/04 (C) SK " ;
-#endif
-
static void timer_done(struct s_smc *smc, int restart);
void smt_timer_init(struct s_smc *smc)
diff --git a/drivers/net/fddi/skfp/srf.c b/drivers/net/fddi/skfp/srf.c
index f98d060b0f5b..4cad68c3f49b 100644
--- a/drivers/net/fddi/skfp/srf.c
+++ b/drivers/net/fddi/skfp/srf.c
@@ -26,11 +26,6 @@
#ifndef SLIM_SMT
#ifndef BOOT
-#ifndef lint
-static const char ID_sccs[] = "@(#)srf.c 1.18 97/08/04 (C) SK " ;
-#endif
-
-
/*
* function declarations
*/
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 8ae9ce2014a4..627c33333866 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
@@ -1149,7 +1150,7 @@ static const struct net_device_ops geneve_netdev_ops = {
.ndo_open = geneve_open,
.ndo_stop = geneve_stop,
.ndo_start_xmit = geneve_xmit,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = geneve_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index dc668ed280b9..4c04e271f184 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -607,7 +607,7 @@ static const struct net_device_ops gtp_netdev_ops = {
.ndo_init = gtp_dev_init,
.ndo_uninit = gtp_dev_uninit,
.ndo_start_xmit = gtp_dev_xmit,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
};
static void gtp_link_setup(struct net_device *dev)
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index e7413a643929..9e0058154ac3 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -597,7 +597,7 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case HDLCDRVCTL_DRIVERNAME:
if (s->ops && s->ops->drvname) {
- strncpy(bi.data.drivername, s->ops->drvname,
+ strlcpy(bi.data.drivername, s->ops->drvname,
sizeof(bi.data.drivername));
break;
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 261e6e55a907..d17bbc75f5e7 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/atomic.h>
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/device.h>
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index b22e47bcfeca..2c2b55c32a7a 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -6,6 +6,7 @@
* Haiyang Zhang <haiyangz@microsoft.com>
* Hank Janssen <hjanssen@microsoft.com>
*/
+#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/wait.h>
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 4eb64709d44c..3a2824f24caa 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -316,6 +316,7 @@ struct cas_control {
* struct ca8210_test - ca8210 test interface structure
* @ca8210_dfs_spi_int: pointer to the entry in the debug fs for this device
* @up_fifo: fifo for upstream messages
+ * @readq: read wait queue
*
* This structure stores all the data pertaining to the debug interface
*/
@@ -346,12 +347,12 @@ struct ca8210_test {
* @ca8210_is_awake: nonzero if ca8210 is initialised, ready for comms
* @sync_down: counts number of downstream synchronous commands
* @sync_up: counts number of upstream synchronous commands
- * @spi_transfer_complete completion object for a single spi_transfer
- * @sync_exchange_complete completion object for a complete synchronous API
- * exchange
- * @promiscuous whether the ca8210 is in promiscuous mode or not
+ * @spi_transfer_complete: completion object for a single spi_transfer
+ * @sync_exchange_complete: completion object for a complete synchronous API
+ * exchange
+ * @promiscuous: whether the ca8210 is in promiscuous mode or not
* @retries: records how many times the current pending spi
- * transfer has been retried
+ * transfer has been retried
*/
struct ca8210_priv {
struct spi_device *spi;
@@ -420,8 +421,8 @@ struct fulladdr {
/**
* union macaddr: generic MAC address container
- * @short_addr: 16-bit short address
- * @ieee_address: 64-bit extended address as LE byte array
+ * @short_address: 16-bit short address
+ * @ieee_address: 64-bit extended address as LE byte array
*
*/
union macaddr {
@@ -714,7 +715,7 @@ static void ca8210_mlme_reset_worker(struct work_struct *work)
/**
* ca8210_rx_done() - Calls various message dispatches responding to a received
* command
- * @arg: Pointer to the cas_control object for the relevant spi transfer
+ * @cas_ctl: Pointer to the cas_control object for the relevant spi transfer
*
* Presents a received SAP command from the ca8210 to the Cascoda EVBME, test
* interface and network driver.
@@ -1277,7 +1278,6 @@ static u8 tdme_channelinit(u8 channel, void *device_ref)
* @pib_attribute: Attribute Number
* @pib_attribute_length: Attribute length
* @pib_attribute_value: Pointer to Attribute Value
- * @device_ref: Nondescript pointer to target device
*
* Return: 802.15.4 status code of checks
*/
@@ -3046,7 +3046,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
/**
* ca8210_remove() - Shut down a ca8210 upon being disconnected
- * @priv: Pointer to private data structure
+ * @spi_device: Pointer to spi device data structure
*
* Return: 0 or linux error code
*/
@@ -3096,7 +3096,7 @@ static int ca8210_remove(struct spi_device *spi_device)
/**
* ca8210_probe() - Set up a connected ca8210 upon being detected by the system
- * @priv: Pointer to private data structure
+ * @spi_device: Pointer to spi device data structure
*
* Return: 0 or linux error code
*/
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 7fe306e76281..fa63d4dee0ba 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -187,8 +187,7 @@ static const struct net_device_ops ifb_netdev_ops = {
};
#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
- NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
- NETIF_F_GSO_ENCAP_ALL | \
+ NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_TX)
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 6bfac1efe037..c4795249719d 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -21,6 +21,7 @@
#include "gsi_trans.h"
#include "ipa_gsi.h"
#include "ipa_data.h"
+#include "ipa_version.h"
/**
* DOC: The IPA Generic Software Interface
@@ -91,6 +92,7 @@
#define GSI_CMD_TIMEOUT 5 /* seconds */
#define GSI_CHANNEL_STOP_RX_RETRIES 10
+#define GSI_CHANNEL_MODEM_HALT_RETRIES 10
#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
#define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
@@ -108,62 +110,6 @@ struct gsi_event {
u8 chid;
};
-/* Hardware values from the error log register error code field */
-enum gsi_err_code {
- GSI_INVALID_TRE_ERR = 0x1,
- GSI_OUT_OF_BUFFERS_ERR = 0x2,
- GSI_OUT_OF_RESOURCES_ERR = 0x3,
- GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
- GSI_EVT_RING_EMPTY_ERR = 0x5,
- GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6,
- GSI_HWO_1_ERR = 0x8,
-};
-
-/* Hardware values from the error log register error type field */
-enum gsi_err_type {
- GSI_ERR_TYPE_GLOB = 0x1,
- GSI_ERR_TYPE_CHAN = 0x2,
- GSI_ERR_TYPE_EVT = 0x3,
-};
-
-/* Hardware values used when programming an event ring */
-enum gsi_evt_chtype {
- GSI_EVT_CHTYPE_MHI_EV = 0x0,
- GSI_EVT_CHTYPE_XHCI_EV = 0x1,
- GSI_EVT_CHTYPE_GPI_EV = 0x2,
- GSI_EVT_CHTYPE_XDCI_EV = 0x3,
-};
-
-/* Hardware values used when programming a channel */
-enum gsi_channel_protocol {
- GSI_CHANNEL_PROTOCOL_MHI = 0x0,
- GSI_CHANNEL_PROTOCOL_XHCI = 0x1,
- GSI_CHANNEL_PROTOCOL_GPI = 0x2,
- GSI_CHANNEL_PROTOCOL_XDCI = 0x3,
-};
-
-/* Hardware values representing an event ring immediate command opcode */
-enum gsi_evt_cmd_opcode {
- GSI_EVT_ALLOCATE = 0x0,
- GSI_EVT_RESET = 0x9,
- GSI_EVT_DE_ALLOC = 0xa,
-};
-
-/* Hardware values representing a generic immediate command opcode */
-enum gsi_generic_cmd_opcode {
- GSI_GENERIC_HALT_CHANNEL = 0x1,
- GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
-};
-
-/* Hardware values representing a channel immediate command opcode */
-enum gsi_ch_cmd_opcode {
- GSI_CH_ALLOCATE = 0x0,
- GSI_CH_START = 0x1,
- GSI_CH_STOP = 0x2,
- GSI_CH_RESET = 0x9,
- GSI_CH_DE_ALLOC = 0xa,
-};
-
/** gsi_channel_scratch_gpi - GPI protocol scratch register
* @max_outstanding_tre:
* Defines the maximum number of TREs allowed in a single transaction
@@ -229,21 +175,76 @@ static u32 gsi_channel_id(struct gsi_channel *channel)
return channel - &channel->gsi->channel[0];
}
+/* Update the GSI IRQ type register with the cached value */
+static void gsi_irq_type_update(struct gsi *gsi, u32 val)
+{
+ gsi->type_enabled_bitmap = val;
+ iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
+}
+
+static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
+{
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
+}
+
+static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
+{
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
+}
+
+/* Turn off all GSI interrupts initially */
+static void gsi_irq_setup(struct gsi *gsi)
+{
+ u32 adjust;
+
+ /* Disable all interrupt types */
+ gsi_irq_type_update(gsi, 0);
+
+ /* Clear all type-specific interrupt masks */
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+
+ /* Reverse the offset adjustment for inter-EE register offsets */
+ adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
+ iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
+ iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
+
+ iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+}
+
+/* Turn off all GSI interrupts when we're all done */
+static void gsi_irq_teardown(struct gsi *gsi)
+{
+ /* Nothing to do */
+}
+
static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
{
+ bool enable_ieob = !gsi->ieob_enabled_bitmap;
u32 val;
- gsi->event_enable_bitmap |= BIT(evt_ring_id);
- val = gsi->event_enable_bitmap;
+ gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
+ val = gsi->ieob_enabled_bitmap;
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+
+ /* Enable the interrupt type if this is the first channel enabled */
+ if (enable_ieob)
+ gsi_irq_type_enable(gsi, GSI_IEOB);
}
static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
{
u32 val;
- gsi->event_enable_bitmap &= ~BIT(evt_ring_id);
- val = gsi->event_enable_bitmap;
+ gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id);
+
+ /* Disable the interrupt type if this was the last enabled channel */
+ if (!gsi->ieob_enabled_bitmap)
+ gsi_irq_type_disable(gsi, GSI_IEOB);
+
+ val = gsi->ieob_enabled_bitmap;
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
}
@@ -252,38 +253,32 @@ static void gsi_irq_enable(struct gsi *gsi)
{
u32 val;
- /* We don't use inter-EE channel or event interrupts */
- val = GSI_CNTXT_TYPE_IRQ_MSK_ALL;
- val &= ~INTER_EE_CH_CTRL_FMASK;
- val &= ~INTER_EE_EV_CTRL_FMASK;
- iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
-
- val = GENMASK(gsi->channel_count - 1, 0);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
-
- val = GENMASK(gsi->evt_ring_count - 1, 0);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
-
- /* Each IEOB interrupt is enabled (later) as needed by channels */
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
-
- val = GSI_CNTXT_GLOB_IRQ_ALL;
- iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ /* Global interrupts include hardware error reports. Enable
+ * that so we can at least report the error should it occur.
+ */
+ iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
- /* Never enable GSI_BREAK_POINT */
- val = GSI_CNTXT_GSI_IRQ_ALL & ~BREAK_POINT_FMASK;
+ /* General GSI interrupts are reported to all EEs; if they occur
+ * they are unrecoverable (without reset). A breakpoint interrupt
+ * also exists, but we don't support that. We want to be notified
+ * of errors so we can report them, even if they can't be handled.
+ */
+ val = BIT(BUS_ERROR);
+ val |= BIT(CMD_FIFO_OVRFLOW);
+ val |= BIT(MCS_STACK_OVRFLOW);
iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
}
-/* Disable all GSI_interrupt types */
+/* Disable all GSI interrupt types */
static void gsi_irq_disable(struct gsi *gsi)
{
+ gsi_irq_type_update(gsi, 0);
+
+ /* Clear the type-specific interrupt masks set by gsi_irq_enable() */
iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
}
/* Return the virtual address associated with a ring index */
@@ -337,13 +332,30 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion;
struct device *dev = gsi->dev;
+ bool success;
u32 val;
+ /* We only perform one event ring command at a time, and event
+ * control interrupts should only occur when such a command
+ * is issued here. Only permit *this* event ring to trigger
+ * an interrupt, and only enable the event control IRQ type
+ * when we expect it to occur.
+ */
+ val = BIT(evt_ring_id);
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_EV_CTRL);
+
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
- if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion))
- return 0; /* Success! */
+ success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
+
+ /* Disable the interrupt again */
+ gsi_irq_type_disable(gsi, GSI_EV_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+
+ if (success)
+ return 0;
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
opcode, evt_ring_id, evt_ring->state);
@@ -360,15 +372,15 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
/* Get initial event ring state */
evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
- dev_err(gsi->dev, "bad event ring state %u before alloc\n",
- evt_ring->state);
+ dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
+ evt_ring_id, evt_ring->state);
return -EINVAL;
}
ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
- dev_err(gsi->dev, "bad event ring state %u after alloc\n",
- evt_ring->state);
+ dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
+ evt_ring_id, evt_ring->state);
ret = -EIO;
}
@@ -384,15 +396,15 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
if (state != GSI_EVT_RING_STATE_ALLOCATED &&
state != GSI_EVT_RING_STATE_ERROR) {
- dev_err(gsi->dev, "bad event ring state %u before reset\n",
- evt_ring->state);
+ dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
+ evt_ring_id, evt_ring->state);
return;
}
ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
- dev_err(gsi->dev, "bad event ring state %u after reset\n",
- evt_ring->state);
+ dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
+ evt_ring_id, evt_ring->state);
}
/* Issue a hardware de-allocation request for an allocated event ring */
@@ -402,15 +414,15 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
int ret;
if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
- dev_err(gsi->dev, "bad event ring state %u before dealloc\n",
- evt_ring->state);
+ dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
+ evt_ring_id, evt_ring->state);
return;
}
ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
- dev_err(gsi->dev, "bad event ring state %u after dealloc\n",
- evt_ring->state);
+ dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
+ evt_ring_id, evt_ring->state);
}
/* Fetch the current state of a channel from hardware */
@@ -433,13 +445,29 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
+ bool success;
u32 val;
+ /* We only perform one channel command at a time, and channel
+ * control interrupts should only occur when such a command is
+ * issued here. So we only permit *this* channel to trigger
+ * an interrupt and only enable the channel control IRQ type
+ * when we expect it to occur.
+ */
+ val = BIT(channel_id);
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_CH_CTRL);
+
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
+ success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
- if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion))
- return 0; /* Success! */
+ /* Disable the interrupt again */
+ gsi_irq_type_disable(gsi, GSI_CH_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+
+ if (success)
+ return 0;
dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
opcode, channel_id, gsi_channel_state(channel));
@@ -458,7 +486,8 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
/* Get initial channel state */
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
- dev_err(dev, "bad channel state %u before alloc\n", state);
+ dev_err(dev, "channel %u bad state %u before alloc\n",
+ channel_id, state);
return -EINVAL;
}
@@ -467,7 +496,8 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(dev, "bad channel state %u after alloc\n", state);
+ dev_err(dev, "channel %u bad state %u after alloc\n",
+ channel_id, state);
ret = -EIO;
}
@@ -484,7 +514,8 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED &&
state != GSI_CHANNEL_STATE_STOPPED) {
- dev_err(dev, "bad channel state %u before start\n", state);
+ dev_err(dev, "channel %u bad state %u before start\n",
+ gsi_channel_id(channel), state);
return -EINVAL;
}
@@ -493,7 +524,8 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
- dev_err(dev, "bad channel state %u after start\n", state);
+ dev_err(dev, "channel %u bad state %u after start\n",
+ gsi_channel_id(channel), state);
ret = -EIO;
}
@@ -517,7 +549,8 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
if (state != GSI_CHANNEL_STATE_STARTED &&
state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
- dev_err(dev, "bad channel state %u before stop\n", state);
+ dev_err(dev, "channel %u bad state %u before stop\n",
+ gsi_channel_id(channel), state);
return -EINVAL;
}
@@ -532,7 +565,8 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
return -EAGAIN;
- dev_err(dev, "bad channel state %u after stop\n", state);
+ dev_err(dev, "channel %u bad state %u after stop\n",
+ gsi_channel_id(channel), state);
return -EIO;
}
@@ -549,7 +583,10 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_STOPPED &&
state != GSI_CHANNEL_STATE_ERROR) {
- dev_err(dev, "bad channel state %u before reset\n", state);
+ /* No need to reset a channel already in ALLOCATED state */
+ if (state != GSI_CHANNEL_STATE_ALLOCATED)
+ dev_err(dev, "channel %u bad state %u before reset\n",
+ gsi_channel_id(channel), state);
return;
}
@@ -558,7 +595,8 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
- dev_err(dev, "bad channel state %u after reset\n", state);
+ dev_err(dev, "channel %u bad state %u after reset\n",
+ gsi_channel_id(channel), state);
}
/* Deallocate an ALLOCATED GSI channel */
@@ -571,7 +609,8 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(dev, "bad channel state %u before dealloc\n", state);
+ dev_err(dev, "channel %u bad state %u before dealloc\n",
+ channel_id, state);
return;
}
@@ -580,7 +619,8 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
/* Channel state will normally have been updated */
state = gsi_channel_state(channel);
if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
- dev_err(dev, "bad channel state %u after dealloc\n", state);
+ dev_err(dev, "channel %u bad state %u after dealloc\n",
+ channel_id, state);
}
/* Ring an event ring doorbell, reporting the last entry processed by the AP.
@@ -607,7 +647,8 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
u32 val;
- val = u32_encode_bits(GSI_EVT_CHTYPE_GPI_EV, EV_CHTYPE_FMASK);
+ /* We program all event rings as GPI type/protocol */
+ val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
val |= EV_INTYPE_FMASK;
val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
@@ -714,8 +755,8 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* Arbitrarily pick TRE 0 as the first channel element to use */
channel->tre_ring.index = 0;
- /* We program all channels to use GPI protocol */
- val = u32_encode_bits(GSI_CHANNEL_PROTOCOL_GPI, CHTYPE_PROTOCOL_FMASK);
+ /* We program all channels as GPI type/protocol */
+ val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, CHTYPE_PROTOCOL_FMASK);
if (channel->toward_ipa)
val |= CHTYPE_DIR_FMASK;
val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
@@ -742,12 +783,21 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
- /* Enable the doorbell engine if requested */
- if (doorbell)
+ /* We enable the doorbell engine for IPA v3.5.1 */
+ if (gsi->version == IPA_VERSION_3_5_1 && doorbell)
val |= USE_DB_ENG_FMASK;
- if (!channel->use_prefetch)
- val |= USE_ESCAPE_BUF_ONLY_FMASK;
+ /* v4.0 introduces an escape buffer for prefetch. We use it
+ * on all but the AP command channel.
+ */
+ if (gsi->version != IPA_VERSION_3_5_1 && !channel->command) {
+ /* If not otherwise set, prefetch buffers are used */
+ if (gsi->version < IPA_VERSION_4_5)
+ val |= USE_ESCAPE_BUF_ONLY_FMASK;
+ else
+ val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
+ PREFETCH_MODE_FMASK);
+ }
iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
@@ -829,8 +879,8 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
return ret;
}
-/* Reset and reconfigure a channel (possibly leaving doorbell disabled) */
-void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy)
+/* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
@@ -838,10 +888,10 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy)
gsi_channel_reset_command(channel);
/* Due to a hardware quirk we may need to reset RX channels twice. */
- if (legacy && !channel->toward_ipa)
+ if (gsi->version == IPA_VERSION_3_5_1 && !channel->toward_ipa)
gsi_channel_reset_command(channel);
- gsi_channel_program(channel, legacy);
+ gsi_channel_program(channel, doorbell);
gsi_channel_trans_cancel_pending(channel);
mutex_unlock(&gsi->mutex);
@@ -989,7 +1039,7 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi)
static void
gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
{
- if (code == GSI_OUT_OF_RESOURCES_ERR) {
+ if (code == GSI_OUT_OF_RESOURCES) {
dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
complete(&gsi->channel[channel_id].completion);
return;
@@ -1004,7 +1054,7 @@ gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
static void
gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
{
- if (code == GSI_OUT_OF_RESOURCES_ERR) {
+ if (code == GSI_OUT_OF_RESOURCES) {
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
u32 channel_id = gsi_channel_id(evt_ring->channel);
@@ -1034,8 +1084,8 @@ static void gsi_isr_glob_err(struct gsi *gsi)
iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
ee = u32_get_bits(val, ERR_EE_FMASK);
- which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
type = u32_get_bits(val, ERR_TYPE_FMASK);
+ which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
code = u32_get_bits(val, ERR_CODE_FMASK);
if (type == GSI_ERR_TYPE_CHAN)
@@ -1052,10 +1102,38 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
u32 result;
u32 val;
+ /* This interrupt is used to handle completions of the two GENERIC
+ * GSI commands. We use these to allocate and halt channels on
+ * the modem's behalf due to a hardware quirk on IPA v4.2. Once
+ * allocated, the modem "owns" these channels, and as a result we
+ * have no way of knowing the channel's state at any given time.
+ *
+ * It is recommended that we halt the modem channels we allocated
+ * when shutting down, but it's possible the channel isn't running
+ * at the time we issue the HALT command. We'll get an error in
+ * that case, but it's harmless (the channel is already halted).
+ *
+ * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error
+ * if we receive it.
+ */
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
- if (result != GENERIC_EE_SUCCESS_FVAL)
+
+ switch (result) {
+ case GENERIC_EE_SUCCESS:
+ case GENERIC_EE_CHANNEL_NOT_RUNNING:
+ gsi->result = 0;
+ break;
+
+ case GENERIC_EE_RETRY:
+ gsi->result = -EAGAIN;
+ break;
+
+ default:
dev_err(gsi->dev, "global INT1 generic result %u\n", result);
+ gsi->result = -EIO;
+ break;
+ }
complete(&gsi->completion);
}
@@ -1067,15 +1145,15 @@ static void gsi_isr_glob_ee(struct gsi *gsi)
val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
- if (val & ERROR_INT_FMASK)
+ if (val & BIT(ERROR_INT))
gsi_isr_glob_err(gsi);
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
- val &= ~ERROR_INT_FMASK;
+ val &= ~BIT(ERROR_INT);
- if (val & GP_INT1_FMASK) {
- val ^= GP_INT1_FMASK;
+ if (val & BIT(GP_INT1)) {
+ val ^= BIT(GP_INT1);
gsi_isr_gp_int1(gsi);
}
@@ -1110,8 +1188,7 @@ static void gsi_isr_general(struct gsi *gsi)
val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
- if (val)
- dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
+ dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
}
/**
@@ -1128,6 +1205,7 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
u32 intr_mask;
u32 cnt = 0;
+ /* enum gsi_irq_type_id defines GSI interrupt types */
while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
/* intr_mask contains bitmask of pending GSI interrupts */
do {
@@ -1136,19 +1214,19 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
intr_mask ^= gsi_intr;
switch (gsi_intr) {
- case CH_CTRL_FMASK:
+ case BIT(GSI_CH_CTRL):
gsi_isr_chan_ctrl(gsi);
break;
- case EV_CTRL_FMASK:
+ case BIT(GSI_EV_CTRL):
gsi_isr_evt_ctrl(gsi);
break;
- case GLOB_EE_FMASK:
+ case BIT(GSI_GLOB_EE):
gsi_isr_glob_ee(gsi);
break;
- case IEOB_FMASK:
+ case BIT(GSI_IEOB):
gsi_isr_ieob(gsi);
break;
- case GENERAL_FMASK:
+ case BIT(GSI_GENERAL):
gsi_isr_general(gsi);
break;
default:
@@ -1168,6 +1246,34 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned int irq;
+ int ret;
+
+ ret = platform_get_irq_byname(pdev, "gsi");
+ if (ret <= 0) {
+ dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
+ return ret ? : -EINVAL;
+ }
+ irq = ret;
+
+ ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
+ if (ret) {
+ dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
+ return ret;
+ }
+ gsi->irq = irq;
+
+ return 0;
+}
+
+static void gsi_irq_exit(struct gsi *gsi)
+{
+ free_irq(gsi->irq, gsi);
+}
+
/* Return the transaction associated with a transfer completion event */
static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
struct gsi_event *event)
@@ -1452,8 +1558,7 @@ static void gsi_evt_ring_teardown(struct gsi *gsi)
}
/* Setup function for a single channel */
-static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
- bool legacy)
+static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
u32 evt_ring_id = channel->evt_ring_id;
@@ -1472,7 +1577,7 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
if (ret)
goto err_evt_ring_de_alloc;
- gsi_channel_program(channel, legacy);
+ gsi_channel_program(channel, true);
if (channel->toward_ipa)
netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
@@ -1511,8 +1616,19 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
enum gsi_generic_cmd_opcode opcode)
{
struct completion *completion = &gsi->completion;
+ bool success;
u32 val;
+ /* The error global interrupt type is always enabled (until we
+ * teardown), so we won't change that. A generic EE command
+ * completes with a GSI global interrupt of type GP_INT1. We
+ * only perform one generic command at a time (to allocate or
+ * halt a modem channel) and only from this function. So we
+ * enable the GP_INT1 IRQ type here while we're expecting it.
+ */
+ val = BIT(ERROR_INT) | BIT(GP_INT1);
+ iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+
/* First zero the result code field */
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
val &= ~GENERIC_EE_RESULT_FMASK;
@@ -1523,8 +1639,13 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
- if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion))
- return 0; /* Success! */
+ success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
+
+ /* Disable the GP_INT1 IRQ type again */
+ iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+
+ if (success)
+ return gsi->result;
dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
opcode, channel_id);
@@ -1540,16 +1661,21 @@ static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
{
+ u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
int ret;
- ret = gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL);
+ do
+ ret = gsi_generic_command(gsi, channel_id,
+ GSI_GENERIC_HALT_CHANNEL);
+ while (ret == -EAGAIN && retries--);
+
if (ret)
dev_err(gsi->dev, "error %d halting modem channel %u\n",
ret, channel_id);
}
/* Setup function for channels */
-static int gsi_channel_setup(struct gsi *gsi, bool legacy)
+static int gsi_channel_setup(struct gsi *gsi)
{
u32 channel_id = 0;
u32 mask;
@@ -1561,7 +1687,7 @@ static int gsi_channel_setup(struct gsi *gsi, bool legacy)
mutex_lock(&gsi->mutex);
do {
- ret = gsi_channel_setup_one(gsi, channel_id, legacy);
+ ret = gsi_channel_setup_one(gsi, channel_id);
if (ret)
goto err_unwind;
} while (++channel_id < gsi->channel_count);
@@ -1647,10 +1773,11 @@ static void gsi_channel_teardown(struct gsi *gsi)
}
/* Setup function for GSI. GSI firmware must be loaded and initialized */
-int gsi_setup(struct gsi *gsi, bool legacy)
+int gsi_setup(struct gsi *gsi)
{
struct device *dev = gsi->dev;
u32 val;
+ int ret;
/* Here is where we first touch the GSI hardware */
val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
@@ -1659,6 +1786,8 @@ int gsi_setup(struct gsi *gsi, bool legacy)
return -EIO;
}
+ gsi_irq_setup(gsi);
+
val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
@@ -1691,13 +1820,18 @@ int gsi_setup(struct gsi *gsi, bool legacy)
/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
- return gsi_channel_setup(gsi, legacy);
+ ret = gsi_channel_setup(gsi);
+ if (ret)
+ gsi_irq_teardown(gsi);
+
+ return ret;
}
/* Inverse of gsi_setup() */
void gsi_teardown(struct gsi *gsi)
{
gsi_channel_teardown(gsi);
+ gsi_irq_teardown(gsi);
}
/* Initialize a channel's event ring */
@@ -1745,7 +1879,7 @@ static void gsi_evt_ring_init(struct gsi *gsi)
u32 evt_ring_id = 0;
gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
- gsi->event_enable_bitmap = 0;
+ gsi->ieob_enabled_bitmap = 0;
do
init_completion(&gsi->evt_ring[evt_ring_id].completion);
while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
@@ -1814,7 +1948,7 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
/* Init function for a single channel */
static int gsi_channel_init_one(struct gsi *gsi,
const struct ipa_gsi_endpoint_data *data,
- bool command, bool prefetch)
+ bool command)
{
struct gsi_channel *channel;
u32 tre_count;
@@ -1838,7 +1972,6 @@ static int gsi_channel_init_one(struct gsi *gsi,
channel->gsi = gsi;
channel->toward_ipa = data->toward_ipa;
channel->command = command;
- channel->use_prefetch = command && prefetch;
channel->tlv_count = data->channel.tlv_count;
channel->tre_count = tre_count;
channel->event_count = data->channel.event_count;
@@ -1892,13 +2025,16 @@ static void gsi_channel_exit_one(struct gsi_channel *channel)
}
/* Init function for channels */
-static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
- const struct ipa_gsi_endpoint_data *data,
- bool modem_alloc)
+static int gsi_channel_init(struct gsi *gsi, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
{
+ bool modem_alloc;
int ret = 0;
u32 i;
+ /* IPA v4.2 requires the AP to allocate channels for the modem */
+ modem_alloc = gsi->version == IPA_VERSION_4_2;
+
gsi_evt_ring_init(gsi);
/* The endpoint data array is indexed by endpoint name */
@@ -1916,7 +2052,7 @@ static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
continue;
}
- ret = gsi_channel_init_one(gsi, &data[i], command, prefetch);
+ ret = gsi_channel_init_one(gsi, &data[i], command);
if (ret)
goto err_unwind;
}
@@ -1952,19 +2088,20 @@ static void gsi_channel_exit(struct gsi *gsi)
}
/* Init function for GSI. GSI hardware does not need to be "ready" */
-int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
- u32 count, const struct ipa_gsi_endpoint_data *data,
- bool modem_alloc)
+int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ enum ipa_version version, u32 count,
+ const struct ipa_gsi_endpoint_data *data)
{
struct device *dev = &pdev->dev;
struct resource *res;
resource_size_t size;
- unsigned int irq;
+ u32 adjust;
int ret;
gsi_validate_build();
gsi->dev = dev;
+ gsi->version = version;
/* The GSI layer performs NAPI on all endpoints. NAPI requires a
* network device structure, but the GSI layer does not have one,
@@ -1972,55 +2109,53 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
*/
init_dummy_netdev(&gsi->dummy_dev);
- ret = platform_get_irq_byname(pdev, "gsi");
- if (ret <= 0) {
- dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
- return ret ? : -EINVAL;
- }
- irq = ret;
-
- ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
- if (ret) {
- dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
- return ret;
- }
- gsi->irq = irq;
-
/* Get GSI memory range and map it */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
if (!res) {
dev_err(dev, "DT error getting \"gsi\" memory property\n");
- ret = -ENODEV;
- goto err_free_irq;
+ return -ENODEV;
}
size = resource_size(res);
if (res->start > U32_MAX || size > U32_MAX - res->start) {
dev_err(dev, "DT memory resource \"gsi\" out of range\n");
- ret = -EINVAL;
- goto err_free_irq;
+ return -EINVAL;
+ }
+
+ /* Make sure we can make our pointer adjustment if necessary */
+ adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
+ if (res->start < adjust) {
+ dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
+ adjust);
+ return -EINVAL;
}
gsi->virt = ioremap(res->start, size);
if (!gsi->virt) {
dev_err(dev, "unable to remap \"gsi\" memory\n");
- ret = -ENOMEM;
- goto err_free_irq;
+ return -ENOMEM;
}
+ /* Adjust register range pointer downward for newer IPA versions */
+ gsi->virt -= adjust;
- ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
+ init_completion(&gsi->completion);
+
+ ret = gsi_irq_init(gsi, pdev);
if (ret)
goto err_iounmap;
+ ret = gsi_channel_init(gsi, count, data);
+ if (ret)
+ goto err_irq_exit;
+
mutex_init(&gsi->mutex);
- init_completion(&gsi->completion);
return 0;
+err_irq_exit:
+ gsi_irq_exit(gsi);
err_iounmap:
iounmap(gsi->virt);
-err_free_irq:
- free_irq(gsi->irq, gsi);
return ret;
}
@@ -2030,7 +2165,7 @@ void gsi_exit(struct gsi *gsi)
{
mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi);
- free_irq(gsi->irq, gsi);
+ gsi_irq_exit(gsi);
iounmap(gsi->virt);
}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 3f9f29d531c4..96c9aed397aa 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -13,6 +13,8 @@
#include <linux/platform_device.h>
#include <linux/netdevice.h>
+#include "ipa_version.h"
+
/* Maximum number of channels and event rings supported by the driver */
#define GSI_CHANNEL_COUNT_MAX 17
#define GSI_EVT_RING_COUNT_MAX 13
@@ -31,10 +33,10 @@ struct ipa_gsi_endpoint_data;
/* Execution environment IDs */
enum gsi_ee_id {
- GSI_EE_AP = 0,
- GSI_EE_MODEM = 1,
- GSI_EE_UC = 2,
- GSI_EE_TZ = 3,
+ GSI_EE_AP = 0x0,
+ GSI_EE_MODEM = 0x1,
+ GSI_EE_UC = 0x2,
+ GSI_EE_TZ = 0x3,
};
struct gsi_ring {
@@ -94,12 +96,12 @@ struct gsi_trans_info {
/* Hardware values signifying the state of a channel */
enum gsi_channel_state {
- GSI_CHANNEL_STATE_NOT_ALLOCATED = 0x0,
- GSI_CHANNEL_STATE_ALLOCATED = 0x1,
- GSI_CHANNEL_STATE_STARTED = 0x2,
- GSI_CHANNEL_STATE_STOPPED = 0x3,
- GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4,
- GSI_CHANNEL_STATE_ERROR = 0xf,
+ GSI_CHANNEL_STATE_NOT_ALLOCATED = 0x0,
+ GSI_CHANNEL_STATE_ALLOCATED = 0x1,
+ GSI_CHANNEL_STATE_STARTED = 0x2,
+ GSI_CHANNEL_STATE_STOPPED = 0x3,
+ GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4,
+ GSI_CHANNEL_STATE_ERROR = 0xf,
};
/* We only care about channels between IPA and AP */
@@ -107,7 +109,6 @@ struct gsi_channel {
struct gsi *gsi;
bool toward_ipa;
bool command; /* AP command TX channel or not */
- bool use_prefetch; /* use prefetch (else escape buf) */
u8 tlv_count; /* # entries in TLV FIFO */
u16 tre_count;
@@ -147,6 +148,7 @@ struct gsi_evt_ring {
struct gsi {
struct device *dev; /* Same as IPA device */
+ enum ipa_version version;
struct net_device dummy_dev; /* needed for NAPI */
void __iomem *virt;
u32 irq;
@@ -154,24 +156,25 @@ struct gsi {
u32 evt_ring_count;
struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
- u32 event_bitmap;
- u32 event_enable_bitmap;
- u32 modem_channel_bitmap;
+ u32 event_bitmap; /* allocated event rings */
+ u32 modem_channel_bitmap; /* modem channels to allocate */
+ u32 type_enabled_bitmap; /* GSI IRQ types enabled */
+ u32 ieob_enabled_bitmap; /* IEOB IRQ enabled (event rings) */
struct completion completion; /* for global EE commands */
+ int result; /* Negative errno (generic commands) */
struct mutex mutex; /* protects commands, programming */
};
/**
* gsi_setup() - Set up the GSI subsystem
* @gsi: Address of GSI structure embedded in an IPA structure
- * @legacy: Set up for legacy hardware
*
* Return: 0 if successful, or a negative error code
*
* Performs initialization that must wait until the GSI hardware is
* ready (including firmware loaded).
*/
-int gsi_setup(struct gsi *gsi, bool legacy);
+int gsi_setup(struct gsi *gsi);
/**
* gsi_teardown() - Tear down GSI subsystem
@@ -219,15 +222,15 @@ int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
* gsi_channel_reset() - Reset an allocated GSI channel
* @gsi: GSI pointer
* @channel_id: Channel to be reset
- * @legacy: Legacy behavior
+ * @doorbell: Whether to (possibly) enable the doorbell engine
*
- * Reset a channel and reconfigure it. The @legacy flag indicates
- * that some steps should be done differently for legacy hardware.
+ * Reset a channel and reconfigure it. The @doorbell flag indicates
+ * that the doorbell engine should be enabled if needed.
*
* GSI hardware relinquishes ownership of all pending receive buffer
* transactions and they will complete with their cancelled flag set.
*/
-void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool legacy);
+void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell);
int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop);
int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
@@ -236,15 +239,18 @@ int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
* gsi_init() - Initialize the GSI subsystem
* @gsi: Address of GSI structure embedded in an IPA structure
* @pdev: IPA platform device
+ * @version: IPA hardware version (implies GSI version)
+ * @count: Number of entries in the configuration data array
+ * @data: Endpoint and channel configuration data
*
* Return: 0 if successful, or a negative error code
*
* Early stage initialization of the GSI subsystem, performing tasks
* that can be done before the GSI hardware is ready to use.
*/
-int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
- u32 count, const struct ipa_gsi_endpoint_data *data,
- bool modem_alloc);
+int gsi_init(struct gsi *gsi, struct platform_device *pdev,
+ enum ipa_version version, u32 count,
+ const struct ipa_gsi_endpoint_data *data);
/**
* gsi_exit() - Exit the GSI subsystem
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index 8e0e9350c383..0e138bbd8205 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -38,6 +38,17 @@
* (though the actual limit is hardware-dependent).
*/
+/* GSI EE registers as a group are shifted downward by a fixed
+ * constant amount for IPA versions 4.5 and beyond. This applies
+ * to all GSI registers we use *except* the ones that disable
+ * inter-EE interrupts for channels and event channels.
+ *
+ * We handle this by adjusting the pointer to the mapped GSI memory
+ * region downward. Then in the one place we use them (gsi_irq_setup())
+ * we undo that adjustment for the inter-EE interrupt registers.
+ */
+#define GSI_EE_REG_ADJUST 0x0000d000 /* IPA v4.5+ */
+
#define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
#define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
@@ -66,12 +77,20 @@
#define CHTYPE_DIR_FMASK GENMASK(3, 3)
#define EE_FMASK GENMASK(7, 4)
#define CHID_FMASK GENMASK(12, 8)
-/* The next field is present for GSI v2.0 and above */
+/* The next field is present for IPA v4.5 and above */
#define CHTYPE_PROTOCOL_MSB_FMASK GENMASK(13, 13)
#define ERINDEX_FMASK GENMASK(18, 14)
#define CHSTATE_FMASK GENMASK(23, 20)
#define ELEMENT_SIZE_FMASK GENMASK(31, 24)
+/** enum gsi_channel_type - CHTYPE_PROTOCOL field values in CH_C_CNTXT_0 */
+enum gsi_channel_type {
+ GSI_CHANNEL_TYPE_MHI = 0x0,
+ GSI_CHANNEL_TYPE_XHCI = 0x1,
+ GSI_CHANNEL_TYPE_GPI = 0x2,
+ GSI_CHANNEL_TYPE_XDCI = 0x3,
+};
+
#define GSI_CH_C_CNTXT_1_OFFSET(ch) \
GSI_EE_N_CH_C_CNTXT_1_OFFSET((ch), GSI_EE_AP)
#define GSI_EE_N_CH_C_CNTXT_1_OFFSET(ch, ee) \
@@ -95,8 +114,18 @@
#define WRR_WEIGHT_FMASK GENMASK(3, 0)
#define MAX_PREFETCH_FMASK GENMASK(8, 8)
#define USE_DB_ENG_FMASK GENMASK(9, 9)
-/* The next field is present for GSI v2.0 and above */
+/* The next field is only present for IPA v4.0, v4.1, and v4.2 */
#define USE_ESCAPE_BUF_ONLY_FMASK GENMASK(10, 10)
+/* The next two fields are present for IPA v4.5 and above */
+#define PREFETCH_MODE_FMASK GENMASK(13, 10)
+#define EMPTY_LVL_THRSHOLD_FMASK GENMASK(23, 16)
+/** enum gsi_prefetch_mode - PREFETCH_MODE field in CH_C_QOS */
+enum gsi_prefetch_mode {
+ GSI_USE_PREFETCH_BUFS = 0x0,
+ GSI_ESCAPE_BUF_ONLY = 0x1,
+ GSI_SMART_PREFETCH = 0x2,
+ GSI_FREE_PREFETCH = 0x3,
+};
#define GSI_CH_C_SCRATCH_0_OFFSET(ch) \
GSI_EE_N_CH_C_SCRATCH_0_OFFSET((ch), GSI_EE_AP)
@@ -128,6 +157,7 @@
#define EV_INTYPE_FMASK GENMASK(16, 16)
#define EV_CHSTATE_FMASK GENMASK(23, 20)
#define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24)
+/* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */
#define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \
GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET((ev), GSI_EE_AP)
@@ -216,6 +246,15 @@
#define CH_CHID_FMASK GENMASK(7, 0)
#define CH_OPCODE_FMASK GENMASK(31, 24)
+/** enum gsi_ch_cmd_opcode - CH_OPCODE field values in CH_CMD */
+enum gsi_ch_cmd_opcode {
+ GSI_CH_ALLOCATE = 0x0,
+ GSI_CH_START = 0x1,
+ GSI_CH_STOP = 0x2,
+ GSI_CH_RESET = 0x9,
+ GSI_CH_DE_ALLOC = 0xa,
+};
+
#define GSI_EV_CH_CMD_OFFSET \
GSI_EE_N_EV_CH_CMD_OFFSET(GSI_EE_AP)
#define GSI_EE_N_EV_CH_CMD_OFFSET(ee) \
@@ -223,6 +262,13 @@
#define EV_CHID_FMASK GENMASK(7, 0)
#define EV_OPCODE_FMASK GENMASK(31, 24)
+/** enum gsi_evt_cmd_opcode - EV_OPCODE field values in EV_CH_CMD */
+enum gsi_evt_cmd_opcode {
+ GSI_EVT_ALLOCATE = 0x0,
+ GSI_EVT_RESET = 0x9,
+ GSI_EVT_DE_ALLOC = 0xa,
+};
+
#define GSI_GENERIC_CMD_OFFSET \
GSI_EE_N_GENERIC_CMD_OFFSET(GSI_EE_AP)
#define GSI_EE_N_GENERIC_CMD_OFFSET(ee) \
@@ -231,29 +277,43 @@
#define GENERIC_CHID_FMASK GENMASK(9, 5)
#define GENERIC_EE_FMASK GENMASK(13, 10)
+/** enum gsi_generic_cmd_opcode - GENERIC_OPCODE field values in GENERIC_CMD */
+enum gsi_generic_cmd_opcode {
+ GSI_GENERIC_HALT_CHANNEL = 0x1,
+ GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
+};
+
#define GSI_GSI_HW_PARAM_2_OFFSET \
GSI_EE_N_GSI_HW_PARAM_2_OFFSET(GSI_EE_AP)
#define GSI_EE_N_GSI_HW_PARAM_2_OFFSET(ee) \
(0x0001f040 + 0x4000 * (ee))
#define IRAM_SIZE_FMASK GENMASK(2, 0)
-#define IRAM_SIZE_ONE_KB_FVAL 0
-#define IRAM_SIZE_TWO_KB_FVAL 1
-/* The next two values are available for GSI v2.0 and above */
-#define IRAM_SIZE_TWO_N_HALF_KB_FVAL 2
-#define IRAM_SIZE_THREE_KB_FVAL 3
#define NUM_CH_PER_EE_FMASK GENMASK(7, 3)
#define NUM_EV_PER_EE_FMASK GENMASK(12, 8)
#define GSI_CH_PEND_TRANSLATE_FMASK GENMASK(13, 13)
#define GSI_CH_FULL_LOGIC_FMASK GENMASK(14, 14)
-/* Fields below are present for GSI v2.0 and above */
+/* Fields below are present for IPA v4.0 and above */
#define GSI_USE_SDMA_FMASK GENMASK(15, 15)
#define GSI_SDMA_N_INT_FMASK GENMASK(18, 16)
#define GSI_SDMA_MAX_BURST_FMASK GENMASK(26, 19)
#define GSI_SDMA_N_IOVEC_FMASK GENMASK(29, 27)
-/* Fields below are present for GSI v2.2 and above */
+/* Fields below are present for IPA v4.2 and above */
#define GSI_USE_RD_WR_ENG_FMASK GENMASK(30, 30)
#define GSI_USE_INTER_EE_FMASK GENMASK(31, 31)
+/** enum gsi_iram_size - IRAM_SIZE field values in HW_PARAM_2 */
+enum gsi_iram_size {
+ IRAM_SIZE_ONE_KB = 0x0,
+ IRAM_SIZE_TWO_KB = 0x1,
+/* The next two values are available for IPA v4.0 and above */
+ IRAM_SIZE_TWO_N_HALF_KB = 0x2,
+ IRAM_SIZE_THREE_KB = 0x3,
+ /* The next two values are available for IPA v4.5 and above */
+ IRAM_SIZE_THREE_N_HALF_KB = 0x4,
+ IRAM_SIZE_FOUR_KB = 0x5,
+};
+
+/* IRQ condition for each type is cleared by writing type-specific register */
#define GSI_CNTXT_TYPE_IRQ_OFFSET \
GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(ee) \
@@ -262,15 +322,17 @@
GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \
(0x0001f088 + 0x4000 * (ee))
-/* The masks below are used for the TYPE_IRQ and TYPE_IRQ_MASK registers */
-#define CH_CTRL_FMASK GENMASK(0, 0)
-#define EV_CTRL_FMASK GENMASK(1, 1)
-#define GLOB_EE_FMASK GENMASK(2, 2)
-#define IEOB_FMASK GENMASK(3, 3)
-#define INTER_EE_CH_CTRL_FMASK GENMASK(4, 4)
-#define INTER_EE_EV_CTRL_FMASK GENMASK(5, 5)
-#define GENERAL_FMASK GENMASK(6, 6)
-#define GSI_CNTXT_TYPE_IRQ_MSK_ALL GENMASK(6, 0)
+
+/* Values here are bit positions in the TYPE_IRQ and TYPE_IRQ_MSK registers */
+enum gsi_irq_type_id {
+ GSI_CH_CTRL = 0x0, /* channel allocation, etc. */
+ GSI_EV_CTRL = 0x1, /* event ring allocation, etc. */
+ GSI_GLOB_EE = 0x2, /* global/general event */
+ GSI_IEOB = 0x3, /* TRE completion */
+ GSI_INTER_EE_CH_CTRL = 0x4, /* remote-issued stop/reset (unused) */
+ GSI_INTER_EE_EV_CTRL = 0x5, /* remote-issued event reset (unused) */
+ GSI_GENERAL = 0x6, /* general-purpose event */
+};
#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
@@ -329,12 +391,13 @@
GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(ee) \
(0x0001f110 + 0x4000 * (ee))
-/* The masks below are used for the general IRQ STTS, EN, and CLR registers */
-#define ERROR_INT_FMASK GENMASK(0, 0)
-#define GP_INT1_FMASK GENMASK(1, 1)
-#define GP_INT2_FMASK GENMASK(2, 2)
-#define GP_INT3_FMASK GENMASK(3, 3)
-#define GSI_CNTXT_GLOB_IRQ_ALL GENMASK(3, 0)
+/* Values here are bit positions in the GLOB_IRQ_* registers */
+enum gsi_global_irq_id {
+ ERROR_INT = 0x0,
+ GP_INT1 = 0x1,
+ GP_INT2 = 0x2,
+ GP_INT3 = 0x3,
+};
#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(GSI_EE_AP)
@@ -348,12 +411,13 @@
GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(ee) \
(0x0001f128 + 0x4000 * (ee))
-/* The masks below are used for the general IRQ STTS, EN, and CLR registers */
-#define BREAK_POINT_FMASK GENMASK(0, 0)
-#define BUS_ERROR_FMASK GENMASK(1, 1)
-#define CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
-#define MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
-#define GSI_CNTXT_GSI_IRQ_ALL GENMASK(3, 0)
+/* Values here are bit positions in the (general) GSI_IRQ_* registers */
+enum gsi_general_id {
+ BREAK_POINT = 0x0,
+ BUS_ERROR = 0x1,
+ CMD_FIFO_OVRFLOW = 0x2,
+ MCS_STACK_OVRFLOW = 0x3,
+};
#define GSI_CNTXT_INTSET_OFFSET \
GSI_EE_N_CNTXT_INTSET_OFFSET(GSI_EE_AP)
@@ -373,6 +437,25 @@
#define ERR_TYPE_FMASK GENMASK(27, 24)
#define ERR_EE_FMASK GENMASK(31, 28)
+/** enum gsi_err_code - ERR_CODE field values in EE_ERR_LOG */
+enum gsi_err_code {
+ GSI_INVALID_TRE = 0x1,
+ GSI_OUT_OF_BUFFERS = 0x2,
+ GSI_OUT_OF_RESOURCES = 0x3,
+ GSI_UNSUPPORTED_INTER_EE_OP = 0x4,
+ GSI_EVT_RING_EMPTY = 0x5,
+ GSI_NON_ALLOCATED_EVT_ACCESS = 0x6,
+ /* 7 is not assigned */
+ GSI_HWO_1 = 0x8,
+};
+
+/** enum gsi_err_type - ERR_TYPE field values in EE_ERR_LOG */
+enum gsi_err_type {
+ GSI_ERR_TYPE_GLOB = 0x1,
+ GSI_ERR_TYPE_CHAN = 0x2,
+ GSI_ERR_TYPE_EVT = 0x3,
+};
+
#define GSI_ERROR_LOG_CLR_OFFSET \
GSI_EE_N_ERROR_LOG_CLR_OFFSET(GSI_EE_AP)
#define GSI_EE_N_ERROR_LOG_CLR_OFFSET(ee) \
@@ -384,10 +467,18 @@
(0x0001f400 + 0x4000 * (ee))
#define INTER_EE_RESULT_FMASK GENMASK(2, 0)
#define GENERIC_EE_RESULT_FMASK GENMASK(7, 5)
-#define GENERIC_EE_SUCCESS_FVAL 1
-#define GENERIC_EE_INCORRECT_DIRECTION_FVAL 3
-#define GENERIC_EE_INCORRECT_CHANNEL_FVAL 5
-#define GENERIC_EE_NO_RESOURCES_FVAL 7
+
+/** enum gsi_generic_ee_result - GENERIC_EE_RESULT field values in SCRATCH_0 */
+enum gsi_generic_ee_result {
+ GENERIC_EE_SUCCESS = 0x1,
+ GENERIC_EE_CHANNEL_NOT_RUNNING = 0x2,
+ GENERIC_EE_INCORRECT_DIRECTION = 0x3,
+ GENERIC_EE_INCORRECT_CHANNEL_TYPE = 0x4,
+ GENERIC_EE_INCORRECT_CHANNEL = 0x5,
+ GENERIC_EE_RETRY = 0x6,
+ GENERIC_EE_NO_RESOURCES = 0x7,
+};
+
#define USB_MAX_PACKET_FMASK GENMASK(15, 15) /* 0: HS; 1: SS */
#define MHI_BASE_CHANNEL_FMASK GENMASK(31, 24)
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index a2c0fde05819..9dcf16f399b7 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -13,6 +13,7 @@
#include "ipa.h"
#include "ipa_clock.h"
#include "ipa_modem.h"
+#include "ipa_data.h"
/**
* DOC: IPA Clocking
@@ -29,18 +30,6 @@
* An IPA clock reference must be held for any access to IPA hardware.
*/
-#define IPA_CORE_CLOCK_RATE (75UL * 1000 * 1000) /* Hz */
-
-/* Interconnect path bandwidths (each times 1000 bytes per second) */
-#define IPA_MEMORY_AVG (80 * 1000) /* 80 MBps */
-#define IPA_MEMORY_PEAK (600 * 1000)
-
-#define IPA_IMEM_AVG (80 * 1000)
-#define IPA_IMEM_PEAK (350 * 1000)
-
-#define IPA_CONFIG_AVG (40 * 1000)
-#define IPA_CONFIG_PEAK (40 * 1000)
-
/**
* struct ipa_clock - IPA clocking information
* @count: Clocking reference count
@@ -49,6 +38,7 @@
* @memory_path: Memory interconnect
* @imem_path: Internal memory interconnect
* @config_path: Configuration space interconnect
+ * @interconnect_data: Interconnect configuration data
*/
struct ipa_clock {
refcount_t count;
@@ -57,6 +47,7 @@ struct ipa_clock {
struct icc_path *memory_path;
struct icc_path *imem_path;
struct icc_path *config_path;
+ const struct ipa_interconnect_data *interconnect_data;
};
static struct icc_path *
@@ -113,18 +104,25 @@ static void ipa_interconnect_exit(struct ipa_clock *clock)
/* Currently we only use one bandwidth level, so just "enable" interconnects */
static int ipa_interconnect_enable(struct ipa *ipa)
{
+ const struct ipa_interconnect_data *data;
struct ipa_clock *clock = ipa->clock;
int ret;
- ret = icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK);
+ data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY];
+ ret = icc_set_bw(clock->memory_path, data->average_rate,
+ data->peak_rate);
if (ret)
return ret;
- ret = icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK);
+ data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
+ ret = icc_set_bw(clock->memory_path, data->average_rate,
+ data->peak_rate);
if (ret)
goto err_memory_path_disable;
- ret = icc_set_bw(clock->config_path, IPA_CONFIG_AVG, IPA_CONFIG_PEAK);
+ data = &clock->interconnect_data[IPA_INTERCONNECT_CONFIG];
+ ret = icc_set_bw(clock->memory_path, data->average_rate,
+ data->peak_rate);
if (ret)
goto err_imem_path_disable;
@@ -141,6 +139,7 @@ err_memory_path_disable:
/* To disable an interconnect, we just its bandwidth to 0 */
static int ipa_interconnect_disable(struct ipa *ipa)
{
+ const struct ipa_interconnect_data *data;
struct ipa_clock *clock = ipa->clock;
int ret;
@@ -159,9 +158,13 @@ static int ipa_interconnect_disable(struct ipa *ipa)
return 0;
err_imem_path_reenable:
- (void)icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK);
+ data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
+ (void)icc_set_bw(clock->imem_path, data->average_rate,
+ data->peak_rate);
err_memory_path_reenable:
- (void)icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK);
+ data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY];
+ (void)icc_set_bw(clock->memory_path, data->average_rate,
+ data->peak_rate);
return ret;
}
@@ -257,7 +260,8 @@ u32 ipa_clock_rate(struct ipa *ipa)
}
/* Initialize IPA clocking */
-struct ipa_clock *ipa_clock_init(struct device *dev)
+struct ipa_clock *
+ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
{
struct ipa_clock *clock;
struct clk *clk;
@@ -269,10 +273,10 @@ struct ipa_clock *ipa_clock_init(struct device *dev)
return ERR_CAST(clk);
}
- ret = clk_set_rate(clk, IPA_CORE_CLOCK_RATE);
+ ret = clk_set_rate(clk, data->core_clock_rate);
if (ret) {
- dev_err(dev, "error %d setting core clock rate to %lu\n",
- ret, IPA_CORE_CLOCK_RATE);
+ dev_err(dev, "error %d setting core clock rate to %u\n",
+ ret, data->core_clock_rate);
goto err_clk_put;
}
@@ -282,6 +286,7 @@ struct ipa_clock *ipa_clock_init(struct device *dev)
goto err_clk_put;
}
clock->core = clk;
+ clock->interconnect_data = data->interconnect;
ret = ipa_interconnect_init(clock, dev);
if (ret)
diff --git a/drivers/net/ipa/ipa_clock.h b/drivers/net/ipa/ipa_clock.h
index 1d70f1de3875..1fe634760e59 100644
--- a/drivers/net/ipa/ipa_clock.h
+++ b/drivers/net/ipa/ipa_clock.h
@@ -9,6 +9,7 @@
struct device;
struct ipa;
+struct ipa_clock_data;
/**
* ipa_clock_rate() - Return the current IPA core clock rate
@@ -21,10 +22,12 @@ u32 ipa_clock_rate(struct ipa *ipa);
/**
* ipa_clock_init() - Initialize IPA clocking
* @dev: IPA device
+ * @data: Clock configuration data
*
* Return: A pointer to an ipa_clock structure, or a pointer-coded error
*/
-struct ipa_clock *ipa_clock_init(struct device *dev);
+struct ipa_clock *ipa_clock_init(struct device *dev,
+ const struct ipa_clock_data *data);
/**
* ipa_clock_exit() - Inverse of ipa_clock_init()
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index d92dd3f09b73..002e51448510 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -38,9 +38,9 @@
/* Some commands can wait until indicated pipeline stages are clear */
enum pipeline_clear_options {
- pipeline_clear_hps = 0,
- pipeline_clear_src_grp = 1,
- pipeline_clear_full = 2,
+ pipeline_clear_hps = 0x0,
+ pipeline_clear_src_grp = 0x1,
+ pipeline_clear_full = 0x2,
};
/* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index f7e6f87facf7..4ed09c486abc 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -27,16 +27,16 @@ struct gsi_channel;
* a request is *not* an immediate command.
*/
enum ipa_cmd_opcode {
- IPA_CMD_NONE = 0,
- IPA_CMD_IP_V4_FILTER_INIT = 3,
- IPA_CMD_IP_V6_FILTER_INIT = 4,
- IPA_CMD_IP_V4_ROUTING_INIT = 7,
- IPA_CMD_IP_V6_ROUTING_INIT = 8,
- IPA_CMD_HDR_INIT_LOCAL = 9,
- IPA_CMD_REGISTER_WRITE = 12,
- IPA_CMD_IP_PACKET_INIT = 16,
- IPA_CMD_DMA_SHARED_MEM = 19,
- IPA_CMD_IP_PACKET_TAG_STATUS = 20,
+ IPA_CMD_NONE = 0x0,
+ IPA_CMD_IP_V4_FILTER_INIT = 0x3,
+ IPA_CMD_IP_V6_FILTER_INIT = 0x4,
+ IPA_CMD_IP_V4_ROUTING_INIT = 0x7,
+ IPA_CMD_IP_V6_ROUTING_INIT = 0x8,
+ IPA_CMD_HDR_INIT_LOCAL = 0x9,
+ IPA_CMD_REGISTER_WRITE = 0xc,
+ IPA_CMD_IP_PACKET_INIT = 0x10,
+ IPA_CMD_DMA_SHARED_MEM = 0x13,
+ IPA_CMD_IP_PACKET_TAG_STATUS = 0x14,
};
/**
@@ -50,7 +50,6 @@ struct ipa_cmd_info {
enum dma_data_direction direction;
};
-
#ifdef IPA_VALIDATE
/**
diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c
index d4c2bc7ad24b..5cc0ed77edb9 100644
--- a/drivers/net/ipa/ipa_data-sc7180.c
+++ b/drivers/net/ipa/ipa_data-sc7180.c
@@ -24,6 +24,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_DMA_ONLY,
.config = {
+ .resource_group = 0,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
},
@@ -42,6 +43,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_INVALID,
.config = {
+ .resource_group = 0,
.aggregation = true,
.status_enable = true,
.rx = {
@@ -65,6 +67,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.seq_type =
IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP,
.config = {
+ .resource_group = 0,
.checksum = true,
.qmap = true,
.status_enable = true,
@@ -88,6 +91,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_INVALID,
.config = {
+ .resource_group = 0,
.checksum = true,
.qmap = true,
.aggregation = true,
@@ -305,6 +309,26 @@ static struct ipa_mem_data ipa_mem_data = {
.smem_size = 0x00002000,
};
+static struct ipa_clock_data ipa_clock_data = {
+ .core_clock_rate = 100 * 1000 * 1000, /* Hz */
+ /* Interconnect rates are in 1000 byte/second units */
+ .interconnect = {
+ [IPA_INTERCONNECT_MEMORY] = {
+ .peak_rate = 465000, /* 465 MBps */
+ .average_rate = 80000, /* 80 MBps */
+ },
+ /* Average rate is unused for the next two interconnects */
+ [IPA_INTERCONNECT_IMEM] = {
+ .peak_rate = 68570, /* 68.570 MBps */
+ .average_rate = 0, /* unused */
+ },
+ [IPA_INTERCONNECT_CONFIG] = {
+ .peak_rate = 30000, /* 30 MBps */
+ .average_rate = 0, /* unused */
+ },
+ },
+};
+
/* Configuration data for the SC7180 SoC. */
const struct ipa_data ipa_data_sc7180 = {
.version = IPA_VERSION_4_2,
@@ -312,4 +336,5 @@ const struct ipa_data ipa_data_sc7180 = {
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
+ .clock_data = &ipa_clock_data,
};
diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c
index de2768d71ab5..f8fee8d3ca42 100644
--- a/drivers/net/ipa/ipa_data-sdm845.c
+++ b/drivers/net/ipa/ipa_data-sdm845.c
@@ -26,6 +26,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_DMA_ONLY,
.config = {
+ .resource_group = 1,
.dma_mode = true,
.dma_endpoint = IPA_ENDPOINT_AP_LAN_RX,
},
@@ -44,6 +45,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_INVALID,
.config = {
+ .resource_group = 1,
.aggregation = true,
.status_enable = true,
.rx = {
@@ -67,6 +69,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.seq_type =
IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
.config = {
+ .resource_group = 1,
.checksum = true,
.qmap = true,
.status_enable = true,
@@ -90,6 +93,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_INVALID,
.config = {
+ .resource_group = 1,
.checksum = true,
.qmap = true,
.aggregation = true,
@@ -146,11 +150,11 @@ static const struct ipa_resource_src ipa_resource_src[] = {
.type = IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS,
.limits[0] = {
.min = 1,
- .max = 63,
+ .max = 255,
},
.limits[1] = {
.min = 1,
- .max = 63,
+ .max = 255,
},
},
{
@@ -325,6 +329,26 @@ static struct ipa_mem_data ipa_mem_data = {
.smem_size = 0x00002000,
};
+static struct ipa_clock_data ipa_clock_data = {
+ .core_clock_rate = 75 * 1000 * 1000, /* Hz */
+ /* Interconnect rates are in 1000 byte/second units */
+ .interconnect = {
+ [IPA_INTERCONNECT_MEMORY] = {
+ .peak_rate = 600000, /* 600 MBps */
+ .average_rate = 80000, /* 80 MBps */
+ },
+ /* Average rate is unused for the next two interconnects */
+ [IPA_INTERCONNECT_IMEM] = {
+ .peak_rate = 350000, /* 350 MBps */
+ .average_rate = 0, /* unused */
+ },
+ [IPA_INTERCONNECT_CONFIG] = {
+ .peak_rate = 40000, /* 40 MBps */
+ .average_rate = 0, /* unused */
+ },
+ },
+};
+
/* Configuration data for the SDM845 SoC. */
const struct ipa_data ipa_data_sdm845 = {
.version = IPA_VERSION_3_5_1,
@@ -332,4 +356,5 @@ const struct ipa_data ipa_data_sdm845 = {
.endpoint_data = ipa_gsi_endpoint_data,
.resource_data = &ipa_resource_data,
.mem_data = &ipa_mem_data,
+ .clock_data = &ipa_clock_data,
};
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 7fc1058a5ca9..0ed5ffe2b8da 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -45,10 +45,10 @@
* the IPA endpoint.
*/
-/* The maximum value returned by ipa_resource_group_count() */
-#define IPA_RESOURCE_GROUP_COUNT 4
+/* The maximum value returned by ipa_resource_group_{src,dst}_count() */
+#define IPA_RESOURCE_GROUP_SRC_MAX 5
+#define IPA_RESOURCE_GROUP_DST_MAX 5
-/** enum ipa_resource_type_src - source resource types */
/**
* struct gsi_channel_data - GSI channel configuration data
* @tre_count: number of TREs in the channel ring
@@ -109,6 +109,7 @@ struct ipa_endpoint_rx_data {
/**
* struct ipa_endpoint_config_data - IPA endpoint hardware configuration
+ * @resource_group: resource group to assign endpoint to
* @checksum: whether checksum offload is enabled
* @qmap: whether endpoint uses QMAP protocol
* @aggregation: whether endpoint supports aggregation
@@ -119,6 +120,7 @@ struct ipa_endpoint_rx_data {
* @rx: RX-specific endpoint information (see above)
*/
struct ipa_endpoint_config_data {
+ u32 resource_group;
bool checksum;
bool qmap;
bool aggregation;
@@ -206,7 +208,7 @@ struct ipa_resource_limits {
*/
struct ipa_resource_src {
enum ipa_resource_type_src type;
- struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_COUNT];
+ struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_SRC_MAX];
};
/**
@@ -216,7 +218,7 @@ struct ipa_resource_src {
*/
struct ipa_resource_dst {
enum ipa_resource_type_dst type;
- struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_COUNT];
+ struct ipa_resource_limits limits[IPA_RESOURCE_GROUP_DST_MAX];
};
/**
@@ -239,7 +241,7 @@ struct ipa_resource_data {
};
/**
- * struct ipa_mem - description of IPA memory regions
+ * struct ipa_mem_data - description of IPA memory regions
* @local_count: number of regions defined in the local[] array
* @local: array of IPA-local memory region descriptors
* @imem_addr: physical address of IPA region within IMEM
@@ -256,6 +258,34 @@ struct ipa_mem_data {
u32 smem_size;
};
+/** enum ipa_interconnect_id - IPA interconnect identifier */
+enum ipa_interconnect_id {
+ IPA_INTERCONNECT_MEMORY,
+ IPA_INTERCONNECT_IMEM,
+ IPA_INTERCONNECT_CONFIG,
+ IPA_INTERCONNECT_COUNT, /* Last; not an interconnect */
+};
+
+/**
+ * struct ipa_interconnect_data - description of IPA interconnect rates
+ * @peak_rate: Peak interconnect bandwidth (in 1000 byte/sec units)
+ * @average_rate: Average interconnect bandwidth (in 1000 byte/sec units)
+ */
+struct ipa_interconnect_data {
+ u32 peak_rate;
+ u32 average_rate;
+};
+
+/**
+ * struct ipa_clock_data - description of IPA clock and interconnect rates
+ * @core_clock_rate: Core clock rate (Hz)
+ * @interconnect: Array of interconnect bandwidth parameters
+ */
+struct ipa_clock_data {
+ u32 core_clock_rate;
+ struct ipa_interconnect_data interconnect[IPA_INTERCONNECT_COUNT];
+};
+
/**
* struct ipa_data - combined IPA/GSI configuration data
* @version: IPA hardware version
@@ -271,6 +301,7 @@ struct ipa_data {
const struct ipa_gsi_endpoint_data *endpoint_data;
const struct ipa_resource_data *resource_data;
const struct ipa_mem_data *mem_data;
+ const struct ipa_clock_data *clock_data;
};
extern const struct ipa_data ipa_data_sdm845;
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index b40b711cf4bd..9f4be9812a1f 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -37,7 +37,7 @@
#define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
-#define IPA_AGGR_TIME_LIMIT_DEFAULT 500 /* microseconds */
+#define IPA_AGGR_TIME_LIMIT 500 /* microseconds */
/** enum ipa_status_opcode - status element opcode hardware values */
enum ipa_status_opcode {
@@ -74,31 +74,6 @@ struct ipa_status {
#ifdef IPA_VALIDATE
-static void ipa_endpoint_validate_build(void)
-{
- /* The aggregation byte limit defines the point at which an
- * aggregation window will close. It is programmed into the
- * IPA hardware as a number of KB. We don't use "hard byte
- * limit" aggregation, which means that we need to supply
- * enough space in a receive buffer to hold a complete MTU
- * plus normal skb overhead *after* that aggregation byte
- * limit has been crossed.
- *
- * This check just ensures we don't define a receive buffer
- * size that would exceed what we can represent in the field
- * that is used to program its size.
- */
- BUILD_BUG_ON(IPA_RX_BUFFER_SIZE >
- field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K +
- IPA_MTU + IPA_RX_BUFFER_OVERHEAD);
-
- /* I honestly don't know where this requirement comes from. But
- * it holds, and if we someday need to loosen the constraint we
- * can try to track it down.
- */
- BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
-}
-
static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *all_data,
const struct ipa_gsi_endpoint_data *data)
@@ -180,14 +155,24 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true;
}
+static u32 aggr_byte_limit_max(enum ipa_version version)
+{
+ if (version < IPA_VERSION_4_5)
+ return field_max(aggr_byte_limit_fmask(true));
+
+ return field_max(aggr_byte_limit_fmask(false));
+}
+
static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *dp = data;
struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name name;
+ u32 limit;
- ipa_endpoint_validate_build();
+ /* Not sure where this constraint come from... */
+ BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n",
@@ -195,6 +180,26 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
return false;
}
+ /* The aggregation byte limit defines the point at which an
+ * aggregation window will close. It is programmed into the
+ * IPA hardware as a number of KB. We don't use "hard byte
+ * limit" aggregation, which means that we need to supply
+ * enough space in a receive buffer to hold a complete MTU
+ * plus normal skb overhead *after* that aggregation byte
+ * limit has been crossed.
+ *
+ * This check ensures we don't define a receive buffer size
+ * that would exceed what we can represent in the field that
+ * is used to program its size.
+ */
+ limit = aggr_byte_limit_max(ipa->version) * SZ_1K;
+ limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+ if (limit < IPA_RX_BUFFER_SIZE) {
+ dev_err(dev, "buffer size too big for aggregation (%u > %u)\n",
+ IPA_RX_BUFFER_SIZE, limit);
+ return false;
+ }
+
/* Make sure needed endpoints have defined data */
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
dev_err(dev, "command TX endpoint not defined\n");
@@ -485,28 +490,34 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
u32 val = 0;
if (endpoint->data->qmap) {
size_t header_size = sizeof(struct rmnet_map_header);
+ enum ipa_version version = ipa->version;
/* We might supply a checksum header after the QMAP header */
if (endpoint->toward_ipa && endpoint->data->checksum)
header_size += sizeof(struct rmnet_map_ul_csum_header);
- val |= u32_encode_bits(header_size, HDR_LEN_FMASK);
+ val |= ipa_header_size_encoded(version, header_size);
/* Define how to fill fields in a received QMAP header */
if (!endpoint->toward_ipa) {
- u32 off; /* Field offset within header */
+ u32 offset; /* Field offset within header */
/* Where IPA will write the metadata value */
- off = offsetof(struct rmnet_map_header, mux_id);
- val |= u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
+ offset = offsetof(struct rmnet_map_header, mux_id);
+ val |= ipa_metadata_offset_encoded(version, offset);
/* Where IPA will write the length */
- off = offsetof(struct rmnet_map_header, pkt_len);
+ offset = offsetof(struct rmnet_map_header, pkt_len);
+ /* Upper bits are stored in HDR_EXT with IPA v4.5 */
+ if (version == IPA_VERSION_4_5)
+ offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
+
val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
- val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK);
+ val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
}
/* For QMAP TX, metadata offset is 0 (modem assumes this) */
val |= HDR_OFST_METADATA_VALID_FMASK;
@@ -514,16 +525,17 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
/* HDR_A5_MUX is 0 */
/* HDR_LEN_INC_DEAGG_HDR is 0 */
- /* HDR_METADATA_REG_VALID is 0 (TX only) */
+ /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
}
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + offset);
}
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
u32 pad_align = endpoint->data->rx.pad_align;
+ struct ipa *ipa = endpoint->ipa;
u32 val = 0;
val |= HDR_ENDIANNESS_FMASK; /* big endian */
@@ -545,10 +557,24 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
if (!endpoint->toward_ipa)
val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ /* IPA v4.5 adds some most-significant bits to a few fields,
+ * two of which are defined in the HDR (not HDR_EXT) register.
+ */
+ if (ipa->version == IPA_VERSION_4_5) {
+ /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
+ if (endpoint->data->qmap && !endpoint->toward_ipa) {
+ u32 offset;
+
+ offset = offsetof(struct rmnet_map_header, pkt_len);
+ offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
+ val |= u32_encode_bits(offset,
+ HDR_OFST_PKT_SIZE_MSB_FMASK);
+ /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
+ }
+ }
+ iowrite32(val, ipa->reg_virt + offset);
}
-
static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
@@ -603,29 +629,84 @@ static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
return rx_buffer_size / SZ_1K;
}
+/* Encoded values for AGGR endpoint register fields */
+static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
+{
+ if (version < IPA_VERSION_4_5)
+ return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
+
+ return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
+}
+
+/* Encode the aggregation timer limit (microseconds) based on IPA version */
+static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
+{
+ u32 gran_sel;
+ u32 fmask;
+ u32 val;
+
+ if (version < IPA_VERSION_4_5) {
+ /* We set aggregation granularity in ipa_hardware_config() */
+ limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
+
+ return u32_encode_bits(limit, aggr_time_limit_fmask(true));
+ }
+
+ /* IPA v4.5 expresses the time limit using Qtime. The AP has
+ * pulse generators 0 and 1 available, which were configured
+ * in ipa_qtime_config() to have granularity 100 usec and
+ * 1 msec, respectively. Use pulse generator 0 if possible,
+ * otherwise fall back to pulse generator 1.
+ */
+ fmask = aggr_time_limit_fmask(false);
+ val = DIV_ROUND_CLOSEST(limit, 100);
+ if (val > field_max(fmask)) {
+ /* Have to use pulse generator 1 (millisecond granularity) */
+ gran_sel = AGGR_GRAN_SEL_FMASK;
+ val = DIV_ROUND_CLOSEST(limit, 1000);
+ } else {
+ /* We can use pulse generator 0 (100 usec granularity) */
+ gran_sel = 0;
+ }
+
+ return gran_sel | u32_encode_bits(val, fmask);
+}
+
+static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
+{
+ u32 val = enabled ? 1 : 0;
+
+ if (version < IPA_VERSION_4_5)
+ return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
+
+ return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
+}
+
static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
+ enum ipa_version version = endpoint->ipa->version;
u32 val = 0;
if (endpoint->data->aggregation) {
if (!endpoint->toward_ipa) {
+ bool close_eof;
u32 limit;
val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
- val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK);
+ val |= aggr_byte_limit_encoded(version, limit);
- limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
- limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
- val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK);
+ limit = IPA_AGGR_TIME_LIMIT;
+ val |= aggr_time_limit_encoded(version, limit);
/* AGGR_PKT_LIMIT is 0 (unlimited) */
- if (endpoint->data->rx.aggr_close_eof)
- val |= AGGR_SW_EOF_ACTIVE_FMASK;
+ close_eof = endpoint->data->rx.aggr_close_eof;
+ val |= aggr_sw_eof_active_encoded(version, close_eof);
+
/* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
} else {
val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
@@ -634,6 +715,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
/* other fields ignored */
}
/* AGGR_FORCE_CLOSE is 0 */
+ /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
} else {
val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
/* other fields ignored */
@@ -642,12 +724,45 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
-/* The head-of-line blocking timer is defined as a tick count, where each
- * tick represents 128 cycles of the IPA core clock. Return the value
- * that should be written to that register that represents the timeout
- * period provided.
+/* Return the Qtime-based head-of-line blocking timer value that
+ * represents the given number of microseconds. The result
+ * includes both the timer value and the selected timer granularity.
*/
-static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
+static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
+{
+ u32 gran_sel;
+ u32 val;
+
+ /* IPA v4.5 expresses time limits using Qtime. The AP has
+ * pulse generators 0 and 1 available, which were configured
+ * in ipa_qtime_config() to have granularity 100 usec and
+ * 1 msec, respectively. Use pulse generator 0 if possible,
+ * otherwise fall back to pulse generator 1.
+ */
+ val = DIV_ROUND_CLOSEST(microseconds, 100);
+ if (val > field_max(TIME_LIMIT_FMASK)) {
+ /* Have to use pulse generator 1 (millisecond granularity) */
+ gran_sel = GRAN_SEL_FMASK;
+ val = DIV_ROUND_CLOSEST(microseconds, 1000);
+ } else {
+ /* We can use pulse generator 0 (100 usec granularity) */
+ gran_sel = 0;
+ }
+
+ return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
+}
+
+/* The head-of-line blocking timer is defined as a tick count. For
+ * IPA version 4.5 the tick count is based on the Qtimer, which is
+ * derived from the 19.2 MHz SoC XO clock. For older IPA versions
+ * each tick represents 128 cycles of the IPA core clock.
+ *
+ * Return the encoded value that should be written to that register
+ * that represents the timeout period provided. For IPA v4.2 this
+ * encodes a base and scale value, while for earlier versions the
+ * value is a simple tick count.
+ */
+static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
{
u32 width;
u32 scale;
@@ -659,14 +774,17 @@ static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
if (!microseconds)
return 0; /* Nothing to compute if timer period is 0 */
+ if (ipa->version == IPA_VERSION_4_5)
+ return hol_block_timer_qtime_val(ipa, microseconds);
+
/* Use 64 bit arithmetic to avoid overflow... */
rate = ipa_clock_rate(ipa);
ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
/* ...but we still need to fit into a 32-bit register */
WARN_ON(ticks > U32_MAX);
- /* IPA v3.5.1 just records the tick count */
- if (ipa->version == IPA_VERSION_3_5_1)
+ /* IPA v3.5.1 through v4.1 just record the tick count */
+ if (ipa->version < IPA_VERSION_4_2)
return (u32)ticks;
/* For IPA v4.2, the tick count is represented by base and
@@ -704,7 +822,7 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
u32 val;
offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
- val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
+ val = hol_block_timer_val(ipa, microseconds);
iowrite32(val, ipa->reg_virt + offset);
}
@@ -751,6 +869,16 @@ static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
+static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
+{
+ u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ u32 val;
+
+ val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group);
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
@@ -834,9 +962,10 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
val |= u32_encode_bits(status_endpoint_id,
STATUS_ENDP_FMASK);
}
- /* STATUS_LOCATION is 0 (status element precedes packet) */
- /* The next field is present for IPA v4.0 and above */
- /* STATUS_PKT_SUPPRESS_FMASK is 0 */
+ /* STATUS_LOCATION is 0, meaning status element precedes
+ * packet (not present for IPA v4.5)
+ */
+ /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
}
iowrite32(val, ipa->reg_virt + offset);
@@ -1207,7 +1336,6 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
struct gsi *gsi = &ipa->gsi;
bool suspended = false;
dma_addr_t addr;
- bool legacy;
u32 retries;
u32 len = 1;
void *virt;
@@ -1269,8 +1397,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
* complete the channel reset sequence. Finish by suspending the
* channel again (if necessary).
*/
- legacy = ipa->version == IPA_VERSION_3_5_1;
- gsi_channel_reset(gsi, endpoint->channel_id, legacy);
+ gsi_channel_reset(gsi, endpoint->channel_id, true);
msleep(1);
@@ -1293,21 +1420,19 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
u32 channel_id = endpoint->channel_id;
struct ipa *ipa = endpoint->ipa;
bool special;
- bool legacy;
int ret = 0;
/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
* is active, we need to handle things specially to recover.
* All other cases just need to reset the underlying GSI channel.
- *
- * IPA v3.5.1 enables the doorbell engine. Newer versions do not.
*/
- legacy = ipa->version == IPA_VERSION_3_5_1;
- special = !endpoint->toward_ipa && endpoint->data->aggregation;
+ special = ipa->version == IPA_VERSION_3_5_1 &&
+ !endpoint->toward_ipa &&
+ endpoint->data->aggregation;
if (special && ipa_endpoint_aggr_active(endpoint))
ret = ipa_endpoint_reset_rx_aggr(endpoint);
else
- gsi_channel_reset(&ipa->gsi, channel_id, legacy);
+ gsi_channel_reset(&ipa->gsi, channel_id, true);
if (ret)
dev_err(&ipa->pdev->dev,
@@ -1328,6 +1453,7 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
ipa_endpoint_init_mode(endpoint);
ipa_endpoint_init_aggr(endpoint);
ipa_endpoint_init_deaggr(endpoint);
+ ipa_endpoint_init_rsrc_grp(endpoint);
ipa_endpoint_init_seq(endpoint);
ipa_endpoint_status(endpoint);
}
@@ -1538,8 +1664,8 @@ int ipa_endpoint_config(struct ipa *ipa)
val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
/* Our RX is an IPA producer */
- rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK);
- max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK);
+ rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
+ max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
if (max > IPA_ENDPOINT_MAX) {
dev_err(dev, "too many endpoints (%u > %u)\n",
max, IPA_ENDPOINT_MAX);
@@ -1548,7 +1674,7 @@ int ipa_endpoint_config(struct ipa *ipa)
rx_mask = GENMASK(max - 1, rx_base);
/* Our TX is an IPA consumer */
- max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK);
+ max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
tx_mask = GENMASK(max - 1, 0);
ipa->available = rx_mask | tx_mask;
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 58a245de488e..881ecc27bd6e 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -25,7 +25,7 @@ struct ipa_gsi_endpoint_data;
#define IPA_MTU ETH_DATA_LEN
enum ipa_endpoint_name {
- IPA_ENDPOINT_AP_MODEM_TX = 0,
+ IPA_ENDPOINT_AP_MODEM_TX,
IPA_ENDPOINT_MODEM_LAN_TX,
IPA_ENDPOINT_MODEM_COMMAND_TX,
IPA_ENDPOINT_AP_COMMAND_TX,
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index cc1ea28f7bc2..61dd7605bcb6 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -139,12 +139,12 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
u32 val;
/* assert(mask & ipa->available); */
- val = ioread32(ipa->reg_virt + IPA_REG_SUSPEND_IRQ_EN_OFFSET);
+ val = ioread32(ipa->reg_virt + IPA_REG_IRQ_SUSPEND_EN_OFFSET);
if (enable)
val |= mask;
else
val &= ~mask;
- iowrite32(val, ipa->reg_virt + IPA_REG_SUSPEND_IRQ_EN_OFFSET);
+ iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_SUSPEND_EN_OFFSET);
}
/* Enable TX_SUSPEND for an endpoint */
@@ -168,7 +168,7 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
u32 val;
val = ioread32(ipa->reg_virt + IPA_REG_IRQ_SUSPEND_INFO_OFFSET);
- iowrite32(val, ipa->reg_virt + IPA_REG_SUSPEND_IRQ_CLR_OFFSET);
+ iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_SUSPEND_CLR_OFFSET);
}
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index 727e9c5044d1..b5d63a0cd19e 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -13,22 +13,6 @@ struct ipa;
struct ipa_interrupt;
/**
- * enum ipa_irq_id - IPA interrupt type
- * @IPA_IRQ_UC_0: Microcontroller event interrupt
- * @IPA_IRQ_UC_1: Microcontroller response interrupt
- * @IPA_IRQ_TX_SUSPEND: Data ready interrupt
- *
- * The data ready interrupt is signaled if data has arrived that is destined
- * for an AP RX endpoint whose underlying GSI channel is suspended/stopped.
- */
-enum ipa_irq_id {
- IPA_IRQ_UC_0 = 2,
- IPA_IRQ_UC_1 = 3,
- IPA_IRQ_TX_SUSPEND = 14,
- IPA_IRQ_COUNT, /* Number of interrupt types (not an index) */
-};
-
-/**
* typedef ipa_irq_handler_t - IPA interrupt handler function type
* @ipa: IPA pointer
* @irq_id: interrupt type
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index cd4d993b0bbb..84bb8ae92725 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -70,6 +70,14 @@
#define IPA_FWS_PATH "ipa_fws.mdt"
#define IPA_PAS_ID 15
+/* Shift of 19.2 MHz timestamp to achieve lower resolution timestamps */
+#define DPL_TIMESTAMP_SHIFT 14 /* ~1.172 kHz, ~853 usec per tick */
+#define TAG_TIMESTAMP_SHIFT 14
+#define NAT_TIMESTAMP_SHIFT 24 /* ~1.144 Hz, ~874 msec per tick */
+
+/* Divider for 19.2 MHz crystal oscillator clock to get common timer clock */
+#define IPA_XO_CLOCK_DIVIDER 192 /* 1 is subtracted where used */
+
/**
* ipa_suspend_handler() - Handle the suspend IPA interrupt
* @ipa: IPA pointer
@@ -111,8 +119,7 @@ int ipa_setup(struct ipa *ipa)
struct device *dev = &ipa->pdev->dev;
int ret;
- /* Setup for IPA v3.5.1 has some slight differences */
- ret = gsi_setup(&ipa->gsi, ipa->version == IPA_VERSION_3_5_1);
+ ret = gsi_setup(&ipa->gsi);
if (ret)
return ret;
@@ -231,8 +238,10 @@ static void ipa_hardware_config_comp(struct ipa *ipa)
val &= ~IPA_QMB_SELECT_CONS_EN_FMASK;
val &= ~IPA_QMB_SELECT_PROD_EN_FMASK;
val &= ~IPA_QMB_SELECT_GLOBAL_EN_FMASK;
- } else {
+ } else if (ipa->version < IPA_VERSION_4_5) {
val |= GSI_MULTI_AXI_MASTERS_DIS_FMASK;
+ } else {
+ /* For IPA v4.5 IPA_FULL_FLUSH_WAIT_RSC_CLOSE_EN is 0 */
}
val |= GSI_MULTI_INORDER_RD_DIS_FMASK;
@@ -244,31 +253,100 @@ static void ipa_hardware_config_comp(struct ipa *ipa)
/* Configure DDR and PCIe max read/write QSB values */
static void ipa_hardware_config_qsb(struct ipa *ipa)
{
+ enum ipa_version version = ipa->version;
+ u32 max0;
+ u32 max1;
u32 val;
- /* QMB_0 represents DDR; QMB_1 represents PCIe (not present in 4.2) */
+ /* QMB_0 represents DDR; QMB_1 represents PCIe */
val = u32_encode_bits(8, GEN_QMB_0_MAX_WRITES_FMASK);
- if (ipa->version == IPA_VERSION_4_2)
- val |= u32_encode_bits(0, GEN_QMB_1_MAX_WRITES_FMASK);
- else
- val |= u32_encode_bits(4, GEN_QMB_1_MAX_WRITES_FMASK);
+ switch (version) {
+ case IPA_VERSION_4_2:
+ max1 = 0; /* PCIe not present */
+ break;
+ case IPA_VERSION_4_5:
+ max1 = 8;
+ break;
+ default:
+ max1 = 4;
+ break;
+ }
+ val |= u32_encode_bits(max1, GEN_QMB_1_MAX_WRITES_FMASK);
iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET);
- if (ipa->version == IPA_VERSION_3_5_1) {
- val = u32_encode_bits(8, GEN_QMB_0_MAX_READS_FMASK);
- val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK);
- } else {
- val = u32_encode_bits(12, GEN_QMB_0_MAX_READS_FMASK);
- if (ipa->version == IPA_VERSION_4_2)
- val |= u32_encode_bits(0, GEN_QMB_1_MAX_READS_FMASK);
- else
- val |= u32_encode_bits(12, GEN_QMB_1_MAX_READS_FMASK);
+ max1 = 12;
+ switch (version) {
+ case IPA_VERSION_3_5_1:
+ max0 = 8;
+ break;
+ case IPA_VERSION_4_0:
+ case IPA_VERSION_4_1:
+ max0 = 12;
+ break;
+ case IPA_VERSION_4_2:
+ max0 = 12;
+ max1 = 0; /* PCIe not present */
+ break;
+ case IPA_VERSION_4_5:
+ max0 = 0; /* No limit (hardware maximum) */
+ break;
+ }
+ val = u32_encode_bits(max0, GEN_QMB_0_MAX_READS_FMASK);
+ val |= u32_encode_bits(max1, GEN_QMB_1_MAX_READS_FMASK);
+ if (version != IPA_VERSION_3_5_1) {
/* GEN_QMB_0_MAX_READS_BEATS is 0 */
/* GEN_QMB_1_MAX_READS_BEATS is 0 */
}
iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET);
}
+/* IPA uses unified Qtime starting at IPA v4.5, implementing various
+ * timestamps and timers independent of the IPA core clock rate. The
+ * Qtimer is based on a 56-bit timestamp incremented at each tick of
+ * a 19.2 MHz SoC crystal oscillator (XO clock).
+ *
+ * For IPA timestamps (tag, NAT, data path logging) a lower resolution
+ * timestamp is achieved by shifting the Qtimer timestamp value right
+ * some number of bits to produce the low-order bits of the coarser
+ * granularity timestamp.
+ *
+ * For timers, a common timer clock is derived from the XO clock using
+ * a divider (we use 192, to produce a 100kHz timer clock). From
+ * this common clock, three "pulse generators" are used to produce
+ * timer ticks at a configurable frequency. IPA timers (such as
+ * those used for aggregation or head-of-line block handling) now
+ * define their period based on one of these pulse generators.
+ */
+static void ipa_qtime_config(struct ipa *ipa)
+{
+ u32 val;
+
+ /* Timer clock divider must be disabled when we change the rate */
+ iowrite32(0, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+
+ /* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */
+ val = u32_encode_bits(DPL_TIMESTAMP_SHIFT, DPL_TIMESTAMP_LSB_FMASK);
+ val |= u32_encode_bits(1, DPL_TIMESTAMP_SEL_FMASK);
+ /* Configure tag and NAT Qtime timestamp resolution as well */
+ val |= u32_encode_bits(TAG_TIMESTAMP_SHIFT, TAG_TIMESTAMP_LSB_FMASK);
+ val |= u32_encode_bits(NAT_TIMESTAMP_SHIFT, NAT_TIMESTAMP_LSB_FMASK);
+ iowrite32(val, ipa->reg_virt + IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET);
+
+ /* Set granularity of pulse generators used for other timers */
+ val = u32_encode_bits(IPA_GRAN_100_US, GRAN_0_FMASK);
+ val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_1_FMASK);
+ val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_2_FMASK);
+ iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET);
+
+ /* Actual divider is 1 more than value supplied here */
+ val = u32_encode_bits(IPA_XO_CLOCK_DIVIDER - 1, DIV_VALUE_FMASK);
+ iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+
+ /* Divider value is set; re-enable the common timer clock divider */
+ val |= u32_encode_bits(1, DIV_ENABLE_FMASK);
+ iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+}
+
static void ipa_idle_indication_cfg(struct ipa *ipa,
u32 enter_idle_debounce_thresh,
bool const_non_idle_enable)
@@ -295,7 +373,7 @@ static void ipa_idle_indication_cfg(struct ipa *ipa,
*/
static void ipa_hardware_dcd_config(struct ipa *ipa)
{
- /* Recommended values for IPA 3.5 according to IPA HPG */
+ /* Recommended values for IPA 3.5 and later according to IPA HPG */
ipa_idle_indication_cfg(ipa, 256, false);
}
@@ -311,22 +389,26 @@ static void ipa_hardware_dcd_deconfig(struct ipa *ipa)
*/
static void ipa_hardware_config(struct ipa *ipa)
{
+ enum ipa_version version = ipa->version;
u32 granularity;
u32 val;
- /* Fill in backward-compatibility register, based on version */
- val = ipa_reg_bcr_val(ipa->version);
- iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET);
+ /* IPA v4.5 has no backward compatibility register */
+ if (version < IPA_VERSION_4_5) {
+ val = ipa_reg_bcr_val(version);
+ iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET);
+ }
- if (ipa->version != IPA_VERSION_3_5_1) {
- /* Enable open global clocks (hardware workaround) */
+ /* Implement some hardware workarounds */
+ if (version != IPA_VERSION_3_5_1 && version < IPA_VERSION_4_5) {
+ /* Enable open global clocks (not needed for IPA v4.5) */
val = GLOBAL_FMASK;
val |= GLOBAL_2X_CLK_FMASK;
iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
- /* Disable PA mask to allow HOLB drop (hardware workaround) */
+ /* Disable PA mask to allow HOLB drop */
val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
- val &= ~PA_MASK_EN;
+ val &= ~PA_MASK_EN_FMASK;
iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
}
@@ -335,15 +417,21 @@ static void ipa_hardware_config(struct ipa *ipa)
/* Configure system bus limits */
ipa_hardware_config_qsb(ipa);
- /* Configure aggregation granularity */
- val = ioread32(ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
- granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
- val = u32_encode_bits(granularity, AGGR_GRANULARITY);
- iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
+ if (version < IPA_VERSION_4_5) {
+ /* Configure aggregation timer granularity */
+ granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
+ val = u32_encode_bits(granularity, AGGR_GRANULARITY_FMASK);
+ iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
+ } else {
+ ipa_qtime_config(ipa);
+ }
- /* Disable hashed IPv4 and IPv6 routing and filtering for IPA v4.2 */
- if (ipa->version == IPA_VERSION_4_2)
- iowrite32(0, ipa->reg_virt + IPA_REG_FILT_ROUT_HASH_EN_OFFSET);
+ /* IPA v4.2 does not support hashed tables, so disable them */
+ if (version == IPA_VERSION_4_2) {
+ u32 offset = ipa_reg_filt_rout_hash_en_offset(version);
+
+ iowrite32(0, ipa->reg_virt + offset);
+ }
/* Enable dynamic clock division */
ipa_hardware_dcd_config(ipa);
@@ -363,52 +451,41 @@ static void ipa_hardware_deconfig(struct ipa *ipa)
#ifdef IPA_VALIDATION
-/* # IPA resources used based on version (see IPA_RESOURCE_GROUP_COUNT) */
-static int ipa_resource_group_count(struct ipa *ipa)
-{
- switch (ipa->version) {
- case IPA_VERSION_3_5_1:
- return 3;
-
- case IPA_VERSION_4_0:
- case IPA_VERSION_4_1:
- return 4;
-
- case IPA_VERSION_4_2:
- return 1;
-
- default:
- return 0;
- }
-}
-
static bool ipa_resource_limits_valid(struct ipa *ipa,
const struct ipa_resource_data *data)
{
- u32 group_count = ipa_resource_group_count(ipa);
+ u32 group_count;
u32 i;
u32 j;
- if (!group_count)
+ /* We program at most 6 source or destination resource group limits */
+ BUILD_BUG_ON(IPA_RESOURCE_GROUP_SRC_MAX > 6);
+
+ group_count = ipa_resource_group_src_count(ipa->version);
+ if (!group_count || group_count > IPA_RESOURCE_GROUP_SRC_MAX)
return false;
- /* Return an error if a non-zero resource group limit is specified
- * for a resource not supported by hardware.
+ /* Return an error if a non-zero resource limit is specified
+ * for a resource group not supported by hardware.
*/
for (i = 0; i < data->resource_src_count; i++) {
const struct ipa_resource_src *resource;
resource = &data->resource_src[i];
- for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++)
+ for (j = group_count; j < IPA_RESOURCE_GROUP_SRC_MAX; j++)
if (resource->limits[j].min || resource->limits[j].max)
return false;
}
+ group_count = ipa_resource_group_dst_count(ipa->version);
+ if (!group_count || group_count > IPA_RESOURCE_GROUP_DST_MAX)
+ return false;
+
for (i = 0; i < data->resource_dst_count; i++) {
const struct ipa_resource_dst *resource;
resource = &data->resource_dst[i];
- for (j = group_count; j < IPA_RESOURCE_GROUP_COUNT; j++)
+ for (j = group_count; j < IPA_RESOURCE_GROUP_DST_MAX; j++)
if (resource->limits[j].min || resource->limits[j].max)
return false;
}
@@ -435,46 +512,64 @@ ipa_resource_config_common(struct ipa *ipa, u32 offset,
val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK);
val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK);
- val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK);
- val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK);
+ if (ylimits) {
+ val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK);
+ val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK);
+ }
iowrite32(val, ipa->reg_virt + offset);
}
-static void ipa_resource_config_src_01(struct ipa *ipa,
- const struct ipa_resource_src *resource)
+static void ipa_resource_config_src(struct ipa *ipa,
+ const struct ipa_resource_src *resource)
{
- u32 offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+ u32 group_count = ipa_resource_group_src_count(ipa->version);
+ const struct ipa_resource_limits *ylimits;
+ u32 offset;
- ipa_resource_config_common(ipa, offset,
- &resource->limits[0], &resource->limits[1]);
-}
+ offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 1 ? NULL : &resource->limits[1];
+ ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
-static void ipa_resource_config_src_23(struct ipa *ipa,
- const struct ipa_resource_src *resource)
-{
- u32 offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+ if (group_count < 2)
+ return;
- ipa_resource_config_common(ipa, offset,
- &resource->limits[2], &resource->limits[3]);
-}
+ offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 3 ? NULL : &resource->limits[3];
+ ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
-static void ipa_resource_config_dst_01(struct ipa *ipa,
- const struct ipa_resource_dst *resource)
-{
- u32 offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+ if (group_count < 4)
+ return;
- ipa_resource_config_common(ipa, offset,
- &resource->limits[0], &resource->limits[1]);
+ offset = IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 5 ? NULL : &resource->limits[5];
+ ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
}
-static void ipa_resource_config_dst_23(struct ipa *ipa,
- const struct ipa_resource_dst *resource)
+static void ipa_resource_config_dst(struct ipa *ipa,
+ const struct ipa_resource_dst *resource)
{
- u32 offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+ u32 group_count = ipa_resource_group_dst_count(ipa->version);
+ const struct ipa_resource_limits *ylimits;
+ u32 offset;
+
+ offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 1 ? NULL : &resource->limits[1];
+ ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
- ipa_resource_config_common(ipa, offset,
- &resource->limits[2], &resource->limits[3]);
+ if (group_count < 2)
+ return;
+
+ offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 3 ? NULL : &resource->limits[3];
+ ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
+
+ if (group_count < 4)
+ return;
+
+ offset = IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource->type);
+ ylimits = group_count == 5 ? NULL : &resource->limits[5];
+ ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
}
static int
@@ -485,15 +580,11 @@ ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
if (!ipa_resource_limits_valid(ipa, data))
return -EINVAL;
- for (i = 0; i < data->resource_src_count; i++) {
- ipa_resource_config_src_01(ipa, &data->resource_src[i]);
- ipa_resource_config_src_23(ipa, &data->resource_src[i]);
- }
+ for (i = 0; i < data->resource_src_count; i++)
+ ipa_resource_config_src(ipa, data->resource_src);
- for (i = 0; i < data->resource_dst_count; i++) {
- ipa_resource_config_dst_01(ipa, &data->resource_dst[i]);
- ipa_resource_config_dst_23(ipa, &data->resource_dst[i]);
- }
+ for (i = 0; i < data->resource_dst_count; i++)
+ ipa_resource_config_dst(ipa, data->resource_dst);
return 0;
}
@@ -678,16 +769,13 @@ static void ipa_validate_build(void)
*/
BUILD_BUG_ON(GSI_TLV_MAX > U8_MAX);
- /* Exceeding 128 bytes makes the transaction pool *much* larger */
- BUILD_BUG_ON(sizeof(struct gsi_trans) > 128);
-
/* This is used as a divisor */
BUILD_BUG_ON(!IPA_AGGR_GRANULARITY);
/* Aggregation granularity value can't be 0, and must fit */
BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) >
- field_max(AGGR_GRANULARITY));
+ field_max(AGGR_GRANULARITY_FMASK));
#endif /* IPA_VALIDATE */
}
@@ -720,15 +808,21 @@ static int ipa_probe(struct platform_device *pdev)
const struct ipa_data *data;
struct ipa_clock *clock;
struct rproc *rproc;
- bool modem_alloc;
bool modem_init;
struct ipa *ipa;
- bool prefetch;
phandle ph;
int ret;
ipa_validate_build();
+ /* Get configuration data early; needed for clock initialization */
+ data = of_device_get_match_data(dev);
+ if (!data) {
+ /* This is really IPA_VALIDATE (should never happen) */
+ dev_err(dev, "matched hardware not supported\n");
+ return -ENODEV;
+ }
+
/* If we need Trust Zone, make sure it's available */
modem_init = of_property_read_bool(dev->of_node, "modem-init");
if (!modem_init)
@@ -749,22 +843,13 @@ static int ipa_probe(struct platform_device *pdev)
/* The clock and interconnects might not be ready when we're
* probed, so might return -EPROBE_DEFER.
*/
- clock = ipa_clock_init(dev);
+ clock = ipa_clock_init(dev, data->clock_data);
if (IS_ERR(clock)) {
ret = PTR_ERR(clock);
goto err_rproc_put;
}
- /* No more EPROBE_DEFER. Get our configuration data */
- data = of_device_get_match_data(dev);
- if (!data) {
- /* This is really IPA_VALIDATE (should never happen) */
- dev_err(dev, "matched hardware not supported\n");
- ret = -ENOTSUPP;
- goto err_clock_exit;
- }
-
- /* Allocate and initialize the IPA structure */
+ /* No more EPROBE_DEFER. Allocate and initialize the IPA structure */
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
if (!ipa) {
ret = -ENOMEM;
@@ -785,17 +870,12 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_reg_exit;
- /* GSI v2.0+ (IPA v4.0+) uses prefetch for the command channel */
- prefetch = ipa->version != IPA_VERSION_3_5_1;
- /* IPA v4.2 requires the AP to allocate channels for the modem */
- modem_alloc = ipa->version == IPA_VERSION_4_2;
-
- ret = gsi_init(&ipa->gsi, pdev, prefetch, data->endpoint_count,
- data->endpoint_data, modem_alloc);
+ ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count,
+ data->endpoint_data);
if (ret)
goto err_mem_exit;
- /* Result is a non-zero mask endpoints that support filtering */
+ /* Result is a non-zero mask of endpoints that support filtering */
ipa->filter_map = ipa_endpoint_init(ipa, data->endpoint_count,
data->endpoint_data);
if (!ipa->filter_map) {
@@ -870,6 +950,11 @@ static int ipa_remove(struct platform_device *pdev)
if (ipa->setup_complete) {
ret = ipa_modem_stop(ipa);
+ /* If starting or stopping is in progress, try once more */
+ if (ret == -EBUSY) {
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+ ret = ipa_modem_stop(ipa);
+ }
if (ret)
return ret;
@@ -890,6 +975,15 @@ static int ipa_remove(struct platform_device *pdev)
return 0;
}
+static void ipa_shutdown(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = ipa_remove(pdev);
+ if (ret)
+ dev_err(&pdev->dev, "shutdown: remove returned %d\n", ret);
+}
+
/**
* ipa_suspend() - Power management system suspend callback
* @dev: IPA device structure
@@ -947,8 +1041,9 @@ static const struct dev_pm_ops ipa_pm_ops = {
};
static struct platform_driver ipa_driver = {
- .probe = ipa_probe,
- .remove = ipa_remove,
+ .probe = ipa_probe,
+ .remove = ipa_remove,
+ .shutdown = ipa_shutdown,
.driver = {
.name = "ipa",
.pm = &ipa_pm_ops,
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 2d45c444a67f..0cc3a3374caa 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -89,7 +89,7 @@ int ipa_mem_setup(struct ipa *ipa)
gsi_trans_commit_wait(trans);
/* Tell the hardware where the processing context area is located */
- iowrite32(ipa->mem_offset + offset,
+ iowrite32(ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset,
ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET);
return 0;
@@ -160,13 +160,13 @@ int ipa_mem_config(struct ipa *ipa)
mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK);
/* If the sizes don't match, issue a warning */
- if (ipa->mem_offset + mem_size > ipa->mem_size) {
- dev_warn(dev, "ignoring larger reported memory size: 0x%08x\n",
- mem_size);
- } else if (ipa->mem_offset + mem_size < ipa->mem_size) {
+ if (ipa->mem_offset + mem_size < ipa->mem_size) {
dev_warn(dev, "limiting IPA memory size to 0x%08x\n",
mem_size);
ipa->mem_size = mem_size;
+ } else if (ipa->mem_offset + mem_size > ipa->mem_size) {
+ dev_dbg(dev, "ignoring larger reported memory size: 0x%08x\n",
+ mem_size);
}
/* Prealloc DMA memory for zeroing regions */
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 5090f0f923ad..d2c3f273c233 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -168,7 +168,7 @@ static void ipa_server_bye(struct qmi_handle *qmi, unsigned int node)
ipa_qmi->indication_sent = false;
}
-static struct qmi_ops ipa_server_ops = {
+static const struct qmi_ops ipa_server_ops = {
.bye = ipa_server_bye,
};
@@ -234,7 +234,7 @@ static void ipa_server_driver_init_complete(struct qmi_handle *qmi,
}
/* The server handles two request message types sent by the modem. */
-static struct qmi_msg_handler ipa_server_msg_handlers[] = {
+static const struct qmi_msg_handler ipa_server_msg_handlers[] = {
{
.type = QMI_REQUEST,
.msg_id = IPA_QMI_INDICATION_REGISTER,
@@ -261,7 +261,7 @@ static void ipa_client_init_driver(struct qmi_handle *qmi,
}
/* The client handles one response message type sent by the modem. */
-static struct qmi_msg_handler ipa_client_msg_handlers[] = {
+static const struct qmi_msg_handler ipa_client_msg_handlers[] = {
{
.type = QMI_RESPONSE,
.msg_id = IPA_QMI_INIT_DRIVER,
@@ -463,7 +463,7 @@ ipa_client_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
return 0;
}
-static struct qmi_ops ipa_client_ops = {
+static const struct qmi_ops ipa_client_ops = {
.new_server = ipa_client_new_server,
};
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index cfac456cea0c..12b6621f4b0e 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -74,12 +74,12 @@ struct ipa_init_complete_ind {
/* The AP tells the modem its platform type. We assume Android. */
enum ipa_platform_type {
- IPA_QMI_PLATFORM_TYPE_INVALID = 0, /* Invalid */
- IPA_QMI_PLATFORM_TYPE_TN = 1, /* Data card */
- IPA_QMI_PLATFORM_TYPE_LE = 2, /* Data router */
- IPA_QMI_PLATFORM_TYPE_MSM_ANDROID = 3, /* Android MSM */
- IPA_QMI_PLATFORM_TYPE_MSM_WINDOWS = 4, /* Windows MSM */
- IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 5, /* QNX MSM */
+ IPA_QMI_PLATFORM_TYPE_INVALID = 0x0, /* Invalid */
+ IPA_QMI_PLATFORM_TYPE_TN = 0x1, /* Data card */
+ IPA_QMI_PLATFORM_TYPE_LE = 0x2, /* Data router */
+ IPA_QMI_PLATFORM_TYPE_MSM_ANDROID = 0x3, /* Android MSM */
+ IPA_QMI_PLATFORM_TYPE_MSM_WINDOWS = 0x4, /* Windows MSM */
+ IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */
};
/* This defines the start and end offset of a range of memory. Both
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index e542598fd775..e6b0827a244e 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -65,14 +65,15 @@ struct ipa;
* of valid bits for the register.
*/
-#define IPA_REG_ENABLED_PIPES_OFFSET 0x00000038
-
#define IPA_REG_COMP_CFG_OFFSET 0x0000003c
+/* The next field is not supported for IPA v4.1 */
#define ENABLE_FMASK GENMASK(0, 0)
#define GSI_SNOC_BYPASS_DIS_FMASK GENMASK(1, 1)
#define GEN_QMB_0_SNOC_BYPASS_DIS_FMASK GENMASK(2, 2)
#define GEN_QMB_1_SNOC_BYPASS_DIS_FMASK GENMASK(3, 3)
+/* The next field is not present for IPA v4.5 */
#define IPA_DCMP_FAST_CLK_EN_FMASK GENMASK(4, 4)
+/* The remaining fields are not present for IPA v3.5.1 */
#define IPA_QMB_SELECT_CONS_EN_FMASK GENMASK(5, 5)
#define IPA_QMB_SELECT_PROD_EN_FMASK GENMASK(6, 6)
#define GSI_MULTI_INORDER_RD_DIS_FMASK GENMASK(7, 7)
@@ -86,6 +87,8 @@ struct ipa;
#define GSI_MULTI_AXI_MASTERS_DIS_FMASK GENMASK(15, 15)
#define IPA_QMB_SELECT_GLOBAL_EN_FMASK GENMASK(16, 16)
#define IPA_ATOMIC_FETCHER_ARB_LOCK_DIS_FMASK GENMASK(20, 17)
+/* The next field is present for IPA v4.5 */
+#define IPA_FULL_FLUSH_WAIT_RSC_CLOSE_EN_FMASK GENMASK(21, 21)
#define IPA_REG_CLKON_CFG_OFFSET 0x00000044
#define RX_FMASK GENMASK(0, 0)
@@ -105,11 +108,13 @@ struct ipa;
#define ACK_MNGR_FMASK GENMASK(14, 14)
#define D_DCPH_FMASK GENMASK(15, 15)
#define H_DCPH_FMASK GENMASK(16, 16)
+/* The next field is not present for IPA v4.5 */
#define DCMP_FMASK GENMASK(17, 17)
#define NTF_TX_CMDQS_FMASK GENMASK(18, 18)
#define TX_0_FMASK GENMASK(19, 19)
#define TX_1_FMASK GENMASK(20, 20)
#define FNR_FMASK GENMASK(21, 21)
+/* The remaining fields are not present for IPA v3.5.1 */
#define QSB2AXI_CMDQ_L_FMASK GENMASK(22, 22)
#define AGGR_WRAPPER_FMASK GENMASK(23, 23)
#define RAM_SLAVEWAY_FMASK GENMASK(24, 24)
@@ -118,6 +123,8 @@ struct ipa;
#define GSI_IF_FMASK GENMASK(27, 27)
#define GLOBAL_FMASK GENMASK(28, 28)
#define GLOBAL_2X_CLK_FMASK GENMASK(29, 29)
+/* The next field is present for IPA v4.5 */
+#define DPL_FIFO_FMASK GENMASK(30, 30)
#define IPA_REG_ROUTE_OFFSET 0x00000048
#define ROUTE_DIS_FMASK GENMASK(0, 0)
@@ -138,25 +145,17 @@ struct ipa;
#define IPA_REG_QSB_MAX_READS_OFFSET 0x00000078
#define GEN_QMB_0_MAX_READS_FMASK GENMASK(3, 0)
#define GEN_QMB_1_MAX_READS_FMASK GENMASK(7, 4)
-/* The next two fields are present for IPA v4.0 and above */
+/* The next two fields are not present for IPA v3.5.1 */
#define GEN_QMB_0_MAX_READS_BEATS_FMASK GENMASK(23, 16)
#define GEN_QMB_1_MAX_READS_BEATS_FMASK GENMASK(31, 24)
-static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version)
+static inline u32 ipa_reg_filt_rout_hash_en_offset(enum ipa_version version)
{
if (version == IPA_VERSION_3_5_1)
- return 0x0000010c;
+ return 0x000008c;
- return 0x000000b4;
+ return 0x0000148;
}
-/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */
-
-/* The next register is present for IPA v4.2 and above */
-#define IPA_REG_FILT_ROUT_HASH_EN_OFFSET 0x00000148
-#define IPV6_ROUTER_HASH_EN GENMASK(0, 0)
-#define IPV6_FILTER_HASH_EN GENMASK(4, 4)
-#define IPV4_ROUTER_HASH_EN GENMASK(8, 8)
-#define IPV4_FILTER_HASH_EN GENMASK(12, 12)
static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version)
{
@@ -166,76 +165,108 @@ static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version)
return 0x000014c;
}
-#define IPV6_ROUTER_HASH_FLUSH GENMASK(0, 0)
-#define IPV6_FILTER_HASH_FLUSH GENMASK(4, 4)
-#define IPV4_ROUTER_HASH_FLUSH GENMASK(8, 8)
-#define IPV4_FILTER_HASH_FLUSH GENMASK(12, 12)
+/* The next four fields are used for the hash enable and flush registers */
+#define IPV6_ROUTER_HASH_FMASK GENMASK(0, 0)
+#define IPV6_FILTER_HASH_FMASK GENMASK(4, 4)
+#define IPV4_ROUTER_HASH_FMASK GENMASK(8, 8)
+#define IPV4_FILTER_HASH_FMASK GENMASK(12, 12)
+
+/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */
+static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version)
+{
+ if (version == IPA_VERSION_3_5_1)
+ return 0x0000010c;
+
+ return 0x000000b4;
+}
+/* The next register is not present for IPA v4.5 */
#define IPA_REG_BCR_OFFSET 0x000001d0
-#define BCR_CMDQ_L_LACK_ONE_ENTRY BIT(0)
-#define BCR_TX_NOT_USING_BRESP BIT(1)
-#define BCR_SUSPEND_L2_IRQ BIT(3)
-#define BCR_HOLB_DROP_L2_IRQ BIT(4)
-#define BCR_DUAL_TX BIT(5)
+/* The next two fields are not present for IPA v4.2 */
+#define BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK GENMASK(0, 0)
+#define BCR_TX_NOT_USING_BRESP_FMASK GENMASK(1, 1)
+/* The next field is invalid for IPA v4.1 */
+#define BCR_TX_SUSPEND_IRQ_ASSERT_ONCE_FMASK GENMASK(2, 2)
+/* The next two fields are not present for IPA v4.2 */
+#define BCR_SUSPEND_L2_IRQ_FMASK GENMASK(3, 3)
+#define BCR_HOLB_DROP_L2_IRQ_FMASK GENMASK(4, 4)
+#define BCR_DUAL_TX_FMASK GENMASK(5, 5)
+#define BCR_ENABLE_FILTER_DATA_CACHE_FMASK GENMASK(6, 6)
+#define BCR_NOTIF_PRIORITY_OVER_ZLT_FMASK GENMASK(7, 7)
+#define BCR_FILTER_PREFETCH_EN_FMASK GENMASK(8, 8)
+#define BCR_ROUTER_PREFETCH_EN_FMASK GENMASK(9, 9)
/* Backward compatibility register value to use for each version */
static inline u32 ipa_reg_bcr_val(enum ipa_version version)
{
if (version == IPA_VERSION_3_5_1)
- return BCR_CMDQ_L_LACK_ONE_ENTRY | BCR_TX_NOT_USING_BRESP |
- BCR_SUSPEND_L2_IRQ | BCR_HOLB_DROP_L2_IRQ | BCR_DUAL_TX;
+ return BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK |
+ BCR_TX_NOT_USING_BRESP_FMASK |
+ BCR_SUSPEND_L2_IRQ_FMASK |
+ BCR_HOLB_DROP_L2_IRQ_FMASK |
+ BCR_DUAL_TX_FMASK;
if (version == IPA_VERSION_4_0 || version == IPA_VERSION_4_1)
- return BCR_CMDQ_L_LACK_ONE_ENTRY | BCR_SUSPEND_L2_IRQ |
- BCR_HOLB_DROP_L2_IRQ | BCR_DUAL_TX;
+ return BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK |
+ BCR_SUSPEND_L2_IRQ_FMASK |
+ BCR_HOLB_DROP_L2_IRQ_FMASK |
+ BCR_DUAL_TX_FMASK;
+
+ /* assert(version != IPA_VERSION_4_5); */
return 0x00000000;
}
+/* The value of the next register must be a multiple of 8 */
#define IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET 0x000001e8
-#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
/* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */
+#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
+
+/* The next register is not present for IPA v4.5 */
+#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
+#define AGGR_GRANULARITY_FMASK GENMASK(8, 4)
/* The internal inactivity timer clock is used for the aggregation timer */
-#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */
+#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */
-#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
-#define AGGR_GRANULARITY GENMASK(8, 4)
/* Compute the value to use in the AGGR_GRANULARITY field representing the
* given number of microseconds. The value is one less than the number of
- * timer ticks in the requested period. Zero not a valid granularity value.
+ * timer ticks in the requested period. 0 not a valid granularity value.
*/
static inline u32 ipa_aggr_granularity_val(u32 usec)
{
return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1;
}
+/* The next register is not present for IPA v4.5 */
#define IPA_REG_TX_CFG_OFFSET 0x000001fc
/* The first three fields are present for IPA v3.5.1 only */
-#define TX0_PREFETCH_DISABLE GENMASK(0, 0)
-#define TX1_PREFETCH_DISABLE GENMASK(1, 1)
-#define PREFETCH_ALMOST_EMPTY_SIZE GENMASK(4, 2)
-/* The next fields are present for IPA v4.0 and above */
-#define PREFETCH_ALMOST_EMPTY_SIZE_TX0 GENMASK(5, 2)
-#define DMAW_SCND_OUTSD_PRED_THRESHOLD GENMASK(9, 6)
-#define DMAW_SCND_OUTSD_PRED_EN GENMASK(10, 10)
-#define DMAW_MAX_BEATS_256_DIS GENMASK(11, 11)
-#define PA_MASK_EN GENMASK(12, 12)
-#define PREFETCH_ALMOST_EMPTY_SIZE_TX1 GENMASK(16, 13)
-/* The last two fields are present for IPA v4.2 and above */
-#define SSPND_PA_NO_START_STATE GENMASK(18, 18)
-#define SSPND_PA_NO_BQ_STATE GENMASK(19, 19)
+#define TX0_PREFETCH_DISABLE_FMASK GENMASK(0, 0)
+#define TX1_PREFETCH_DISABLE_FMASK GENMASK(1, 1)
+#define PREFETCH_ALMOST_EMPTY_SIZE_FMASK GENMASK(4, 2)
+/* The next six fields are present for IPA v4.0 and above */
+#define PREFETCH_ALMOST_EMPTY_SIZE_TX0_FMASK GENMASK(5, 2)
+#define DMAW_SCND_OUTSD_PRED_THRESHOLD_FMASK GENMASK(9, 6)
+#define DMAW_SCND_OUTSD_PRED_EN_FMASK GENMASK(10, 10)
+#define DMAW_MAX_BEATS_256_DIS_FMASK GENMASK(11, 11)
+#define PA_MASK_EN_FMASK GENMASK(12, 12)
+#define PREFETCH_ALMOST_EMPTY_SIZE_TX1_FMASK GENMASK(16, 13)
+/* The next field is present for IPA v4.5 */
+#define DUAL_TX_ENABLE_FMASK GENMASK(17, 17)
+/* The next two fields are present for IPA v4.2 only */
+#define SSPND_PA_NO_START_STATE_FMASK GENMASK(18, 18)
+#define SSPND_PA_NO_BQ_STATE_FMASK GENMASK(19, 19)
#define IPA_REG_FLAVOR_0_OFFSET 0x00000210
-#define BAM_MAX_PIPES_FMASK GENMASK(4, 0)
-#define BAM_MAX_CONS_PIPES_FMASK GENMASK(12, 8)
-#define BAM_MAX_PROD_PIPES_FMASK GENMASK(20, 16)
-#define BAM_PROD_LOWEST_FMASK GENMASK(27, 24)
+#define IPA_MAX_PIPES_FMASK GENMASK(3, 0)
+#define IPA_MAX_CONS_PIPES_FMASK GENMASK(12, 8)
+#define IPA_MAX_PROD_PIPES_FMASK GENMASK(20, 16)
+#define IPA_PROD_LOWEST_FMASK GENMASK(27, 24)
static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
{
- if (version == IPA_VERSION_4_2)
+ if (version >= IPA_VERSION_4_2)
return 0x00000240;
return 0x00000220;
@@ -244,25 +275,102 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define ENTER_IDLE_DEBOUNCE_THRESH_FMASK GENMASK(15, 0)
#define CONST_NON_IDLE_ENABLE_FMASK GENMASK(16, 16)
+/* The next register is present for IPA v4.5 */
+#define IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET 0x0000024c
+#define DPL_TIMESTAMP_LSB_FMASK GENMASK(4, 0)
+#define DPL_TIMESTAMP_SEL_FMASK GENMASK(7, 7)
+#define TAG_TIMESTAMP_LSB_FMASK GENMASK(12, 8)
+#define NAT_TIMESTAMP_LSB_FMASK GENMASK(20, 16)
+
+/* The next register is present for IPA v4.5 */
+#define IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET 0x00000250
+#define DIV_VALUE_FMASK GENMASK(8, 0)
+#define DIV_ENABLE_FMASK GENMASK(31, 31)
+
+/* The next register is present for IPA v4.5 */
+#define IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET 0x00000254
+#define GRAN_0_FMASK GENMASK(2, 0)
+#define GRAN_1_FMASK GENMASK(5, 3)
+#define GRAN_2_FMASK GENMASK(8, 6)
+/* Values for GRAN_x fields of TIMERS_PULSE_GRAN_CFG */
+enum ipa_pulse_gran {
+ IPA_GRAN_10_US = 0x0,
+ IPA_GRAN_20_US = 0x1,
+ IPA_GRAN_50_US = 0x2,
+ IPA_GRAN_100_US = 0x3,
+ IPA_GRAN_1_MS = 0x4,
+ IPA_GRAN_10_MS = 0x5,
+ IPA_GRAN_100_MS = 0x6,
+ IPA_GRAN_655350_US = 0x7,
+};
+
+/* # IPA source resource groups available based on version */
+static inline u32 ipa_resource_group_src_count(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_5_1:
+ case IPA_VERSION_4_0:
+ case IPA_VERSION_4_1:
+ return 4;
+
+ case IPA_VERSION_4_2:
+ return 1;
+
+ case IPA_VERSION_4_5:
+ return 5;
+
+ default:
+ return 0;
+ }
+}
+
+/* # IPA destination resource groups available based on version */
+static inline u32 ipa_resource_group_dst_count(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_5_1:
+ return 3;
+
+ case IPA_VERSION_4_0:
+ case IPA_VERSION_4_1:
+ return 4;
+
+ case IPA_VERSION_4_2:
+ return 1;
+
+ case IPA_VERSION_4_5:
+ return 5;
+
+ default:
+ return 0;
+ }
+}
+
+/* Not all of the following are valid (depends on the count, above) */
#define IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
(0x00000400 + 0x0020 * (rt))
#define IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
(0x00000404 + 0x0020 * (rt))
+/* The next register is only present for IPA v4.5 */
#define IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
(0x00000408 + 0x0020 * (rt))
#define IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
(0x00000500 + 0x0020 * (rt))
#define IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
(0x00000504 + 0x0020 * (rt))
+/* The next register is only present for IPA v4.5 */
#define IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
(0x00000508 + 0x0020 * (rt))
+/* The next four fields are used for all resource group registers */
#define X_MIN_LIM_FMASK GENMASK(5, 0)
#define X_MAX_LIM_FMASK GENMASK(13, 8)
+/* The next two fields are not always present (if resource count is odd) */
#define Y_MIN_LIM_FMASK GENMASK(21, 16)
#define Y_MAX_LIM_FMASK GENMASK(29, 24)
#define IPA_REG_ENDP_INIT_CTRL_N_OFFSET(ep) \
(0x00000800 + 0x0070 * (ep))
+/* The next field should only used for IPA v3.5.1 */
#define ENDP_SUSPEND_FMASK GENMASK(0, 0)
#define ENDP_DELAY_FMASK GENMASK(1, 1)
@@ -273,6 +381,13 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define CS_METADATA_HDR_OFFSET_FMASK GENMASK(6, 3)
#define CS_GEN_QMB_MASTER_SEL_FMASK GENMASK(8, 8)
+/** enum ipa_cs_offload_en - checksum offload field in ENDP_INIT_CFG_N */
+enum ipa_cs_offload_en {
+ IPA_CS_OFFLOAD_NONE = 0x0,
+ IPA_CS_OFFLOAD_UL = 0x1,
+ IPA_CS_OFFLOAD_DL = 0x2,
+};
+
#define IPA_REG_ENDP_INIT_HDR_N_OFFSET(ep) \
(0x00000810 + 0x0070 * (ep))
#define HDR_LEN_FMASK GENMASK(5, 0)
@@ -283,7 +398,45 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define HDR_OFST_PKT_SIZE_FMASK GENMASK(25, 20)
#define HDR_A5_MUX_FMASK GENMASK(26, 26)
#define HDR_LEN_INC_DEAGG_HDR_FMASK GENMASK(27, 27)
+/* The next field is not present for IPA v4.5 */
#define HDR_METADATA_REG_VALID_FMASK GENMASK(28, 28)
+/* The next two fields are present for IPA v4.5 */
+#define HDR_LEN_MSB_FMASK GENMASK(29, 28)
+#define HDR_OFST_METADATA_MSB_FMASK GENMASK(31, 30)
+
+/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
+static inline u32 ipa_header_size_encoded(enum ipa_version version,
+ u32 header_size)
+{
+ u32 val;
+
+ val = u32_encode_bits(header_size, HDR_LEN_FMASK);
+ if (version < IPA_VERSION_4_5)
+ return val;
+
+ /* IPA v4.5 adds a few more most-significant bits */
+ header_size >>= hweight32(HDR_LEN_FMASK);
+ val |= u32_encode_bits(header_size, HDR_LEN_MSB_FMASK);
+
+ return val;
+}
+
+/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
+static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
+ u32 offset)
+{
+ u32 val;
+
+ val = u32_encode_bits(offset, HDR_OFST_METADATA_FMASK);
+ if (version < IPA_VERSION_4_5)
+ return val;
+
+ /* IPA v4.5 adds a few more most-significant bits */
+ offset >>= hweight32(HDR_OFST_METADATA_FMASK);
+ val |= u32_encode_bits(offset, HDR_OFST_METADATA_MSB_FMASK);
+
+ return val;
+}
#define IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(ep) \
(0x00000814 + 0x0070 * (ep))
@@ -293,6 +446,10 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define HDR_PAYLOAD_LEN_INC_PADDING_FMASK GENMASK(3, 3)
#define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK GENMASK(9, 4)
#define HDR_PAD_TO_ALIGNMENT_FMASK GENMASK(13, 10)
+/* The next three fields are present for IPA v4.5 */
+#define HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_FMASK GENMASK(17, 16)
+#define HDR_OFST_PKT_SIZE_MSB_FMASK GENMASK(19, 18)
+#define HDR_ADDITIONAL_CONST_LEN_MSB_FMASK GENMASK(21, 20)
/* Valid only for RX (IPA producer) endpoints */
#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(rxep) \
@@ -302,22 +459,77 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(txep) \
(0x00000820 + 0x0070 * (txep))
#define MODE_FMASK GENMASK(2, 0)
+/* The next field is present for IPA v4.5 */
+#define DCPH_ENABLE_FMASK GENMASK(3, 3)
#define DEST_PIPE_INDEX_FMASK GENMASK(8, 4)
#define BYTE_THRESHOLD_FMASK GENMASK(27, 12)
#define PIPE_REPLICATION_EN_FMASK GENMASK(28, 28)
#define PAD_EN_FMASK GENMASK(29, 29)
+/* The next register is not present for IPA v4.5 */
#define HDR_FTCH_DISABLE_FMASK GENMASK(30, 30)
+/** enum ipa_mode - mode field in ENDP_INIT_MODE_N */
+enum ipa_mode {
+ IPA_BASIC = 0x0,
+ IPA_ENABLE_FRAMING_HDLC = 0x1,
+ IPA_ENABLE_DEFRAMING_HDLC = 0x2,
+ IPA_DMA = 0x3,
+};
+
#define IPA_REG_ENDP_INIT_AGGR_N_OFFSET(ep) \
(0x00000824 + 0x0070 * (ep))
#define AGGR_EN_FMASK GENMASK(1, 0)
#define AGGR_TYPE_FMASK GENMASK(4, 2)
-#define AGGR_BYTE_LIMIT_FMASK GENMASK(9, 5)
-#define AGGR_TIME_LIMIT_FMASK GENMASK(14, 10)
-#define AGGR_PKT_LIMIT_FMASK GENMASK(20, 15)
-#define AGGR_SW_EOF_ACTIVE_FMASK GENMASK(21, 21)
-#define AGGR_FORCE_CLOSE_FMASK GENMASK(22, 22)
-#define AGGR_HARD_BYTE_LIMIT_ENABLE_FMASK GENMASK(24, 24)
+static inline u32 aggr_byte_limit_fmask(bool legacy)
+{
+ return legacy ? GENMASK(9, 5) : GENMASK(10, 5);
+}
+
+static inline u32 aggr_time_limit_fmask(bool legacy)
+{
+ return legacy ? GENMASK(14, 10) : GENMASK(16, 12);
+}
+
+static inline u32 aggr_pkt_limit_fmask(bool legacy)
+{
+ return legacy ? GENMASK(20, 15) : GENMASK(22, 17);
+}
+
+static inline u32 aggr_sw_eof_active_fmask(bool legacy)
+{
+ return legacy ? GENMASK(21, 21) : GENMASK(23, 23);
+}
+
+static inline u32 aggr_force_close_fmask(bool legacy)
+{
+ return legacy ? GENMASK(22, 22) : GENMASK(24, 24);
+}
+
+static inline u32 aggr_hard_byte_limit_enable_fmask(bool legacy)
+{
+ return legacy ? GENMASK(24, 24) : GENMASK(26, 26);
+}
+
+/* The next field is present for IPA v4.5 */
+#define AGGR_GRAN_SEL_FMASK GENMASK(27, 27)
+
+/** enum ipa_aggr_en - aggregation enable field in ENDP_INIT_AGGR_N */
+enum ipa_aggr_en {
+ IPA_BYPASS_AGGR = 0x0,
+ IPA_ENABLE_AGGR = 0x1,
+ IPA_ENABLE_DEAGGR = 0x2,
+};
+
+/** enum ipa_aggr_type - aggregation type field in ENDP_INIT_AGGR_N */
+enum ipa_aggr_type {
+ IPA_MBIM_16 = 0x0,
+ IPA_HDLC = 0x1,
+ IPA_TLP = 0x2,
+ IPA_RNDIS = 0x3,
+ IPA_GENERIC = 0x4,
+ IPA_COALESCE = 0x5,
+ IPA_QCMAP = 0x6,
+};
/* Valid only for RX (IPA producer) endpoints */
#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(rxep) \
@@ -327,21 +539,37 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
/* Valid only for RX (IPA producer) endpoints */
#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(rxep) \
(0x00000830 + 0x0070 * (rxep))
-/* The next fields are present for IPA v4.2 only */
+/* The next two fields are present for IPA v4.2 only */
#define BASE_VALUE_FMASK GENMASK(4, 0)
#define SCALE_FMASK GENMASK(12, 8)
+/* The next two fields are present for IPA v4.5 */
+#define TIME_LIMIT_FMASK GENMASK(4, 0)
+#define GRAN_SEL_FMASK GENMASK(8, 8)
/* Valid only for TX (IPA consumer) endpoints */
#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(txep) \
(0x00000834 + 0x0070 * (txep))
#define DEAGGR_HDR_LEN_FMASK GENMASK(5, 0)
+#define SYSPIPE_ERR_DETECTION_FMASK GENMASK(6, 6)
#define PACKET_OFFSET_VALID_FMASK GENMASK(7, 7)
#define PACKET_OFFSET_LOCATION_FMASK GENMASK(13, 8)
+#define IGNORE_MIN_PKT_ERR_FMASK GENMASK(14, 14)
#define MAX_PACKET_LEN_FMASK GENMASK(31, 16)
#define IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(ep) \
(0x00000838 + 0x0070 * (ep))
-#define RSRC_GRP_FMASK GENMASK(1, 0)
+/* Encoded value for ENDP_INIT_RSRC_GRP register RSRC_GRP field */
+static inline u32 rsrc_grp_encoded(enum ipa_version version, u32 rsrc_grp)
+{
+ switch (version) {
+ case IPA_VERSION_4_2:
+ return u32_encode_bits(rsrc_grp, GENMASK(0, 0));
+ case IPA_VERSION_4_5:
+ return u32_encode_bits(rsrc_grp, GENMASK(2, 0));
+ default:
+ return u32_encode_bits(rsrc_grp, GENMASK(1, 0));
+ }
+}
/* Valid only for TX (IPA consumer) endpoints */
#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(txep) \
@@ -351,15 +579,35 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
#define HPS_REP_SEQ_TYPE_FMASK GENMASK(11, 8)
#define DPS_REP_SEQ_TYPE_FMASK GENMASK(15, 12)
+/**
+ * enum ipa_seq_type - HPS and DPS sequencer type fields in ENDP_INIT_SEQ_N
+ * @IPA_SEQ_DMA_ONLY: only DMA is performed
+ * @IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP:
+ * second packet processing pass + no decipher + microcontroller
+ * @IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP:
+ * packet processing + no decipher + no uCP + HPS REP DMA parser
+ * @IPA_SEQ_INVALID: invalid sequencer type
+ *
+ * The values defined here are broken into 4-bit nibbles that are written
+ * into fields of the ENDP_INIT_SEQ registers.
+ */
+enum ipa_seq_type {
+ IPA_SEQ_DMA_ONLY = 0x0000,
+ IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP = 0x0004,
+ IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP = 0x0806,
+ IPA_SEQ_INVALID = 0xffff,
+};
+
#define IPA_REG_ENDP_STATUS_N_OFFSET(ep) \
(0x00000840 + 0x0070 * (ep))
#define STATUS_EN_FMASK GENMASK(0, 0)
#define STATUS_ENDP_FMASK GENMASK(5, 1)
+/* The next field is not present for IPA v4.5 */
#define STATUS_LOCATION_FMASK GENMASK(8, 8)
-/* The next field is present for IPA v4.0 and above */
+/* The next field is not present for IPA v3.5.1 */
#define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9)
-/* "er" is either an endpoint ID (for filters) or a route ID (for routes) */
+/* The next register is only present for IPA versions that support hashing */
#define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \
(0x0000085c + 0x0070 * (er))
#define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0)
@@ -394,89 +642,69 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
IPA_REG_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP)
#define IPA_REG_IRQ_CLR_EE_N_OFFSET(ee) \
(0x00003010 + 0x1000 * (ee))
+/**
+ * enum ipa_irq_id - Bit positions representing type of IPA IRQ
+ * @IPA_IRQ_UC_0: Microcontroller event interrupt
+ * @IPA_IRQ_UC_1: Microcontroller response interrupt
+ * @IPA_IRQ_TX_SUSPEND: Data ready interrupt
+ *
+ * IRQ types not described above are not currently used.
+ */
+enum ipa_irq_id {
+ IPA_IRQ_BAD_SNOC_ACCESS = 0x0,
+ /* Type (bit) 0x1 is not defined */
+ IPA_IRQ_UC_0 = 0x2,
+ IPA_IRQ_UC_1 = 0x3,
+ IPA_IRQ_UC_2 = 0x4,
+ IPA_IRQ_UC_3 = 0x5,
+ IPA_IRQ_UC_IN_Q_NOT_EMPTY = 0x6,
+ IPA_IRQ_UC_RX_CMD_Q_NOT_FULL = 0x7,
+ IPA_IRQ_PROC_UC_ACK_Q_NOT_EMPTY = 0x8,
+ IPA_IRQ_RX_ERR = 0x9,
+ IPA_IRQ_DEAGGR_ERR = 0xa,
+ IPA_IRQ_TX_ERR = 0xb,
+ IPA_IRQ_STEP_MODE = 0xc,
+ IPA_IRQ_PROC_ERR = 0xd,
+ IPA_IRQ_TX_SUSPEND = 0xe,
+ IPA_IRQ_TX_HOLB_DROP = 0xf,
+ IPA_IRQ_BAM_GSI_IDLE = 0x10,
+ IPA_IRQ_PIPE_YELLOW_BELOW = 0x11,
+ IPA_IRQ_PIPE_RED_BELOW = 0x12,
+ IPA_IRQ_PIPE_YELLOW_ABOVE = 0x13,
+ IPA_IRQ_PIPE_RED_ABOVE = 0x14,
+ IPA_IRQ_UCP = 0x15,
+ IPA_IRQ_DCMP = 0x16,
+ IPA_IRQ_GSI_EE = 0x17,
+ IPA_IRQ_GSI_IPA_IF_TLV_RCVD = 0x18,
+ IPA_IRQ_GSI_UC = 0x19,
+ /* The next bit is present for IPA v4.5 */
+ IPA_IRQ_TLV_LEN_MIN_DSM = 0x1a,
+ IPA_IRQ_COUNT, /* Last; not an id */
+};
#define IPA_REG_IRQ_UC_OFFSET \
IPA_REG_IRQ_UC_EE_N_OFFSET(GSI_EE_AP)
#define IPA_REG_IRQ_UC_EE_N_OFFSET(ee) \
(0x0000301c + 0x1000 * (ee))
+#define UC_INTR_FMASK GENMASK(0, 0)
+/* ipa->available defines the valid bits in the SUSPEND_INFO register */
#define IPA_REG_IRQ_SUSPEND_INFO_OFFSET \
IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(GSI_EE_AP)
#define IPA_REG_IRQ_SUSPEND_INFO_EE_N_OFFSET(ee) \
(0x00003030 + 0x1000 * (ee))
-/* ipa->available defines the valid bits in the SUSPEND_INFO register */
-#define IPA_REG_SUSPEND_IRQ_EN_OFFSET \
- IPA_REG_SUSPEND_IRQ_EN_EE_N_OFFSET(GSI_EE_AP)
-#define IPA_REG_SUSPEND_IRQ_EN_EE_N_OFFSET(ee) \
+/* ipa->available defines the valid bits in the IRQ_SUSPEND_EN register */
+#define IPA_REG_IRQ_SUSPEND_EN_OFFSET \
+ IPA_REG_IRQ_SUSPEND_EN_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_SUSPEND_EN_EE_N_OFFSET(ee) \
(0x00003034 + 0x1000 * (ee))
-/* ipa->available defines the valid bits in the SUSPEND_IRQ_EN register */
-#define IPA_REG_SUSPEND_IRQ_CLR_OFFSET \
- IPA_REG_SUSPEND_IRQ_CLR_EE_N_OFFSET(GSI_EE_AP)
-#define IPA_REG_SUSPEND_IRQ_CLR_EE_N_OFFSET(ee) \
+/* ipa->available defines the valid bits in the IRQ_SUSPEND_CLR register */
+#define IPA_REG_IRQ_SUSPEND_CLR_OFFSET \
+ IPA_REG_IRQ_SUSPEND_CLR_EE_N_OFFSET(GSI_EE_AP)
+#define IPA_REG_IRQ_SUSPEND_CLR_EE_N_OFFSET(ee) \
(0x00003038 + 0x1000 * (ee))
-/* ipa->available defines the valid bits in the SUSPEND_IRQ_CLR register */
-
-/** enum ipa_cs_offload_en - checksum offload field in ENDP_INIT_CFG_N */
-enum ipa_cs_offload_en {
- IPA_CS_OFFLOAD_NONE = 0,
- IPA_CS_OFFLOAD_UL = 1,
- IPA_CS_OFFLOAD_DL = 2,
- IPA_CS_RSVD
-};
-
-/** enum ipa_aggr_en - aggregation enable field in ENDP_INIT_AGGR_N */
-enum ipa_aggr_en {
- IPA_BYPASS_AGGR = 0,
- IPA_ENABLE_AGGR = 1,
- IPA_ENABLE_DEAGGR = 2,
-};
-
-/** enum ipa_aggr_type - aggregation type field in in_ENDP_INIT_AGGR_N */
-enum ipa_aggr_type {
- IPA_MBIM_16 = 0,
- IPA_HDLC = 1,
- IPA_TLP = 2,
- IPA_RNDIS = 3,
- IPA_GENERIC = 4,
- IPA_COALESCE = 5,
- IPA_QCMAP = 6,
-};
-
-/** enum ipa_mode - mode field in ENDP_INIT_MODE_N */
-enum ipa_mode {
- IPA_BASIC = 0,
- IPA_ENABLE_FRAMING_HDLC = 1,
- IPA_ENABLE_DEFRAMING_HDLC = 2,
- IPA_DMA = 3,
-};
-
-/**
- * enum ipa_seq_type - HPS and DPS sequencer type fields in in ENDP_INIT_SEQ_N
- * @IPA_SEQ_DMA_ONLY: only DMA is performed
- * @IPA_SEQ_PKT_PROCESS_NO_DEC_UCP:
- * packet processing + no decipher + microcontroller (Ethernet Bridging)
- * @IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP:
- * second packet processing pass + no decipher + microcontroller
- * @IPA_SEQ_DMA_DEC: DMA + cipher/decipher
- * @IPA_SEQ_DMA_COMP_DECOMP: DMA + compression/decompression
- * @IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP:
- * packet processing + no decipher + no uCP + HPS REP DMA parser
- * @IPA_SEQ_INVALID: invalid sequencer type
- *
- * The values defined here are broken into 4-bit nibbles that are written
- * into fields of the INIT_SEQ_N endpoint registers.
- */
-enum ipa_seq_type {
- IPA_SEQ_DMA_ONLY = 0x0000,
- IPA_SEQ_PKT_PROCESS_NO_DEC_UCP = 0x0002,
- IPA_SEQ_2ND_PKT_PROCESS_PASS_NO_DEC_UCP = 0x0004,
- IPA_SEQ_DMA_DEC = 0x0011,
- IPA_SEQ_DMA_COMP_DECOMP = 0x0020,
- IPA_SEQ_PKT_PROCESS_NO_DEC_NO_UCP_DMAP = 0x0806,
- IPA_SEQ_INVALID = 0xffff,
-};
int ipa_reg_init(struct ipa *ipa);
void ipa_reg_exit(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index b3790aa952a1..32e2d3e052d5 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -422,8 +422,8 @@ int ipa_table_hash_flush(struct ipa *ipa)
return -EBUSY;
}
- val = IPV4_FILTER_HASH_FLUSH | IPV6_FILTER_HASH_FLUSH;
- val |= IPV6_ROUTER_HASH_FLUSH | IPV4_ROUTER_HASH_FLUSH;
+ val = IPV4_FILTER_HASH_FMASK | IPV6_FILTER_HASH_FMASK;
+ val |= IPV6_ROUTER_HASH_FMASK | IPV4_ROUTER_HASH_FMASK;
ipa_cmd_register_write_add(trans, offset, val, val, false);
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index b382d47bc70d..dee58a6596d4 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -86,32 +86,32 @@ struct ipa_uc_mem_area {
/** enum ipa_uc_command - commands from the AP to the microcontroller */
enum ipa_uc_command {
- IPA_UC_COMMAND_NO_OP = 0,
- IPA_UC_COMMAND_UPDATE_FLAGS = 1,
- IPA_UC_COMMAND_DEBUG_RUN_TEST = 2,
- IPA_UC_COMMAND_DEBUG_GET_INFO = 3,
- IPA_UC_COMMAND_ERR_FATAL = 4,
- IPA_UC_COMMAND_CLK_GATE = 5,
- IPA_UC_COMMAND_CLK_UNGATE = 6,
- IPA_UC_COMMAND_MEMCPY = 7,
- IPA_UC_COMMAND_RESET_PIPE = 8,
- IPA_UC_COMMAND_REG_WRITE = 9,
- IPA_UC_COMMAND_GSI_CH_EMPTY = 10,
+ IPA_UC_COMMAND_NO_OP = 0x0,
+ IPA_UC_COMMAND_UPDATE_FLAGS = 0x1,
+ IPA_UC_COMMAND_DEBUG_RUN_TEST = 0x2,
+ IPA_UC_COMMAND_DEBUG_GET_INFO = 0x3,
+ IPA_UC_COMMAND_ERR_FATAL = 0x4,
+ IPA_UC_COMMAND_CLK_GATE = 0x5,
+ IPA_UC_COMMAND_CLK_UNGATE = 0x6,
+ IPA_UC_COMMAND_MEMCPY = 0x7,
+ IPA_UC_COMMAND_RESET_PIPE = 0x8,
+ IPA_UC_COMMAND_REG_WRITE = 0x9,
+ IPA_UC_COMMAND_GSI_CH_EMPTY = 0xa,
};
/** enum ipa_uc_response - microcontroller response codes */
enum ipa_uc_response {
- IPA_UC_RESPONSE_NO_OP = 0,
- IPA_UC_RESPONSE_INIT_COMPLETED = 1,
- IPA_UC_RESPONSE_CMD_COMPLETED = 2,
- IPA_UC_RESPONSE_DEBUG_GET_INFO = 3,
+ IPA_UC_RESPONSE_NO_OP = 0x0,
+ IPA_UC_RESPONSE_INIT_COMPLETED = 0x1,
+ IPA_UC_RESPONSE_CMD_COMPLETED = 0x2,
+ IPA_UC_RESPONSE_DEBUG_GET_INFO = 0x3,
};
/** enum ipa_uc_event - common cpu events reported by the microcontroller */
enum ipa_uc_event {
- IPA_UC_EVENT_NO_OP = 0,
- IPA_UC_EVENT_ERROR = 1,
- IPA_UC_EVENT_LOG_INFO = 2,
+ IPA_UC_EVENT_NO_OP = 0x0,
+ IPA_UC_EVENT_ERROR = 0x1,
+ IPA_UC_EVENT_LOG_INFO = 0x2,
};
static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
@@ -129,9 +129,10 @@ static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
if (shared->event == IPA_UC_EVENT_ERROR)
dev_err(dev, "microcontroller error event\n");
- else
+ else if (shared->event != IPA_UC_EVENT_LOG_INFO)
dev_err(dev, "unsupported microcontroller event %hhu\n",
shared->event);
+ /* The LOG_INFO event can be safely ignored */
}
/* Microcontroller response IPA interrupt handler */
@@ -191,14 +192,19 @@ void ipa_uc_teardown(struct ipa *ipa)
static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
+ u32 val;
+ /* Fill in the command data */
shared->command = command;
shared->command_param = cpu_to_le32(command_param);
shared->command_param_hi = 0;
shared->response = 0;
shared->response_param = 0;
- iowrite32(1, ipa->reg_virt + IPA_REG_IRQ_UC_OFFSET);
+ /* Use an interrupt to tell the microcontroller the command is ready */
+ val = u32_encode_bits(1, UC_INTR_FMASK);
+
+ iowrite32(val, ipa->reg_virt + IPA_REG_IRQ_UC_OFFSET);
}
/* Tell the microcontroller the AP is shutting down */
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index 85449df0f512..2944e2a89023 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -18,6 +18,7 @@ enum ipa_version {
IPA_VERSION_4_0, /* GSI version 2.0 */
IPA_VERSION_4_1, /* GSI version 2.1 */
IPA_VERSION_4_2, /* GSI version 2.2 */
+ IPA_VERSION_4_5, /* GSI version 2.5 */
};
#endif /* _IPA_VERSION_H_ */
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 60b7d93bb834..a707502a0c0f 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
*/
+#include <linux/ethtool.h>
+
#include "ipvlan.h"
static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval,
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 11ca5fa902a1..92425e1fd70c 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -101,6 +101,7 @@ struct pcpu_secy_stats {
* @real_dev: pointer to underlying netdevice
* @stats: MACsec device stats
* @secys: linked list of SecY's on the underlying device
+ * @gro_cells: pointer to the Generic Receive Offload cell
* @offload: status of offloading on the MACsec device
*/
struct macsec_dev {
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index c8d803d3616c..fb51329f8964 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -35,7 +35,7 @@
#define MACVLAN_HASH_BITS 8
#define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS)
-#define MACVLAN_BC_QUEUE_LEN 1000
+#define MACVLAN_DEFAULT_BC_QUEUE_LEN 1000
#define MACVLAN_F_PASSTHRU 1
#define MACVLAN_F_ADDRCHANGE 2
@@ -46,6 +46,7 @@ struct macvlan_port {
struct list_head vlans;
struct sk_buff_head bc_queue;
struct work_struct bc_work;
+ u32 bc_queue_len_used;
u32 flags;
int count;
struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE];
@@ -67,6 +68,7 @@ struct macvlan_skb_cb {
#define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0]))
static void macvlan_port_destroy(struct net_device *dev);
+static void update_port_bc_queue_len(struct macvlan_port *port);
static inline bool macvlan_passthru(const struct macvlan_port *port)
{
@@ -354,7 +356,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
MACVLAN_SKB_CB(nskb)->src = src;
spin_lock(&port->bc_queue.lock);
- if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) {
+ if (skb_queue_len(&port->bc_queue) < port->bc_queue_len_used) {
if (src)
dev_hold(src->dev);
__skb_queue_tail(&port->bc_queue, nskb);
@@ -1096,7 +1098,7 @@ static int macvlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *real_dev = vlan->lowerdev;
struct netpoll *netpoll;
- int err = 0;
+ int err;
netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
err = -ENOMEM;
@@ -1218,6 +1220,7 @@ static int macvlan_port_create(struct net_device *dev)
for (i = 0; i < MACVLAN_HASH_SIZE; i++)
INIT_HLIST_HEAD(&port->vlan_source_hash[i]);
+ port->bc_queue_len_used = 0;
skb_queue_head_init(&port->bc_queue);
INIT_WORK(&port->bc_work, macvlan_process_broadcast);
@@ -1339,7 +1342,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
-/**
+/*
* reconfigure list of remote source mac address
* (only for macvlan devices in source mode)
* Note regarding alignment: all netlink data is aligned to 4 Byte, which
@@ -1486,6 +1489,10 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
goto destroy_macvlan_port;
}
+ vlan->bc_queue_len_req = MACVLAN_DEFAULT_BC_QUEUE_LEN;
+ if (data && data[IFLA_MACVLAN_BC_QUEUE_LEN])
+ vlan->bc_queue_len_req = nla_get_u32(data[IFLA_MACVLAN_BC_QUEUE_LEN]);
+
err = register_netdevice(dev);
if (err < 0)
goto destroy_macvlan_port;
@@ -1496,6 +1503,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
goto unregister_netdev;
list_add_tail_rcu(&vlan->list, &port->vlans);
+ update_port_bc_queue_len(vlan->port);
netif_stacked_transfer_operstate(lowerdev, dev);
linkwatch_fire_event(dev);
@@ -1529,6 +1537,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head)
if (vlan->mode == MACVLAN_MODE_SOURCE)
macvlan_flush_sources(vlan->port, vlan);
list_del_rcu(&vlan->list);
+ update_port_bc_queue_len(vlan->port);
unregister_netdevice_queue(dev, head);
netdev_upper_dev_unlink(vlan->lowerdev, dev);
}
@@ -1572,6 +1581,12 @@ static int macvlan_changelink(struct net_device *dev,
}
vlan->flags = flags;
}
+
+ if (data && data[IFLA_MACVLAN_BC_QUEUE_LEN]) {
+ vlan->bc_queue_len_req = nla_get_u32(data[IFLA_MACVLAN_BC_QUEUE_LEN]);
+ update_port_bc_queue_len(vlan->port);
+ }
+
if (set_mode)
vlan->mode = mode;
if (data && data[IFLA_MACVLAN_MACADDR_MODE]) {
@@ -1602,6 +1617,8 @@ static size_t macvlan_get_size(const struct net_device *dev)
+ nla_total_size(2) /* IFLA_MACVLAN_FLAGS */
+ nla_total_size(4) /* IFLA_MACVLAN_MACADDR_COUNT */
+ macvlan_get_size_mac(vlan) /* IFLA_MACVLAN_MACADDR */
+ + nla_total_size(4) /* IFLA_MACVLAN_BC_QUEUE_LEN */
+ + nla_total_size(4) /* IFLA_MACVLAN_BC_QUEUE_LEN_USED */
);
}
@@ -1625,6 +1642,7 @@ static int macvlan_fill_info(struct sk_buff *skb,
const struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ struct macvlan_port *port = vlan->port;
int i;
struct nlattr *nest;
@@ -1645,6 +1663,10 @@ static int macvlan_fill_info(struct sk_buff *skb,
}
nla_nest_end(skb, nest);
}
+ if (nla_put_u32(skb, IFLA_MACVLAN_BC_QUEUE_LEN, vlan->bc_queue_len_req))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, IFLA_MACVLAN_BC_QUEUE_LEN_USED, port->bc_queue_len_used))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -1658,6 +1680,8 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
[IFLA_MACVLAN_MACADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
[IFLA_MACVLAN_MACADDR_DATA] = { .type = NLA_NESTED },
[IFLA_MACVLAN_MACADDR_COUNT] = { .type = NLA_U32 },
+ [IFLA_MACVLAN_BC_QUEUE_LEN] = { .type = NLA_U32 },
+ [IFLA_MACVLAN_BC_QUEUE_LEN_USED] = { .type = NLA_REJECT },
};
int macvlan_link_register(struct rtnl_link_ops *ops)
@@ -1688,6 +1712,18 @@ static struct rtnl_link_ops macvlan_link_ops = {
.priv_size = sizeof(struct macvlan_dev),
};
+static void update_port_bc_queue_len(struct macvlan_port *port)
+{
+ u32 max_bc_queue_len_req = 0;
+ struct macvlan_dev *vlan;
+
+ list_for_each_entry(vlan, &port->vlans, list) {
+ if (vlan->bc_queue_len_req > max_bc_queue_len_req)
+ max_bc_queue_len_req = vlan->bc_queue_len_req;
+ }
+ port->bc_queue_len_used = max_bc_queue_len_req;
+}
+
static int macvlan_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
new file mode 100644
index 000000000000..d3f92781314e
--- /dev/null
+++ b/drivers/net/mhi_net.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* MHI Network driver - Network over MHI bus
+ *
+ * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
+ */
+
+#include <linux/if_arp.h>
+#include <linux/mhi.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/u64_stats_sync.h>
+
+#define MHI_NET_MIN_MTU ETH_MIN_MTU
+#define MHI_NET_MAX_MTU 0xffff
+#define MHI_NET_DEFAULT_MTU 0x4000
+
+struct mhi_net_stats {
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_errors;
+ u64_stats_t rx_dropped;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_errors;
+ u64_stats_t tx_dropped;
+ atomic_t rx_queued;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync rx_syncp;
+};
+
+struct mhi_net_dev {
+ struct mhi_device *mdev;
+ struct net_device *ndev;
+ struct delayed_work rx_refill;
+ struct mhi_net_stats stats;
+ u32 rx_queue_sz;
+};
+
+static int mhi_ndo_open(struct net_device *ndev)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+
+ /* Feed the rx buffer pool */
+ schedule_delayed_work(&mhi_netdev->rx_refill, 0);
+
+ /* Carrier is established via out-of-band channel (e.g. qmi) */
+ netif_carrier_on(ndev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int mhi_ndo_stop(struct net_device *ndev)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+ cancel_delayed_work_sync(&mhi_netdev->rx_refill);
+
+ return 0;
+}
+
+static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ struct mhi_device *mdev = mhi_netdev->mdev;
+ int err;
+
+ err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
+ if (unlikely(err)) {
+ net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
+ ndev->name, err);
+
+ u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.tx_dropped);
+ u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
+
+ /* drop the packet */
+ dev_kfree_skb_any(skb);
+ }
+
+ if (mhi_queue_is_full(mdev, DMA_TO_DEVICE))
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+static void mhi_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp);
+ stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets);
+ stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes);
+ stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors);
+ stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped);
+ } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp);
+ stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets);
+ stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes);
+ stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors);
+ stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped);
+ } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start));
+}
+
+static const struct net_device_ops mhi_netdev_ops = {
+ .ndo_open = mhi_ndo_open,
+ .ndo_stop = mhi_ndo_stop,
+ .ndo_start_xmit = mhi_ndo_xmit,
+ .ndo_get_stats64 = mhi_ndo_get_stats64,
+};
+
+static void mhi_net_setup(struct net_device *ndev)
+{
+ ndev->header_ops = NULL; /* No header */
+ ndev->type = ARPHRD_NONE; /* QMAP... */
+ ndev->hard_header_len = 0;
+ ndev->addr_len = 0;
+ ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ ndev->netdev_ops = &mhi_netdev_ops;
+ ndev->mtu = MHI_NET_DEFAULT_MTU;
+ ndev->min_mtu = MHI_NET_MIN_MTU;
+ ndev->max_mtu = MHI_NET_MAX_MTU;
+ ndev->tx_queue_len = 1000;
+}
+
+static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct sk_buff *skb = mhi_res->buf_addr;
+ int remaining;
+
+ remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued);
+
+ if (unlikely(mhi_res->transaction_status)) {
+ dev_kfree_skb_any(skb);
+
+ /* MHI layer stopping/resetting the DL channel */
+ if (mhi_res->transaction_status == -ENOTCONN)
+ return;
+
+ u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.rx_errors);
+ u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+ } else {
+ u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_netdev->stats.rx_packets);
+ u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
+ u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+
+ skb->protocol = htons(ETH_P_MAP);
+ skb_put(skb, mhi_res->bytes_xferd);
+ netif_rx(skb);
+ }
+
+ /* Refill if RX buffers queue becomes low */
+ if (remaining <= mhi_netdev->rx_queue_sz / 2)
+ schedule_delayed_work(&mhi_netdev->rx_refill, 0);
+}
+
+static void mhi_net_ul_callback(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct net_device *ndev = mhi_netdev->ndev;
+ struct sk_buff *skb = mhi_res->buf_addr;
+
+ /* Hardware has consumed the buffer, so free the skb (which is not
+ * freed by the MHI stack) and perform accounting.
+ */
+ dev_consume_skb_any(skb);
+
+ u64_stats_update_begin(&mhi_netdev->stats.tx_syncp);
+ if (unlikely(mhi_res->transaction_status)) {
+
+ /* MHI layer stopping/resetting the UL channel */
+ if (mhi_res->transaction_status == -ENOTCONN) {
+ u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
+ return;
+ }
+
+ u64_stats_inc(&mhi_netdev->stats.tx_errors);
+ } else {
+ u64_stats_inc(&mhi_netdev->stats.tx_packets);
+ u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd);
+ }
+ u64_stats_update_end(&mhi_netdev->stats.tx_syncp);
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+}
+
+static void mhi_net_rx_refill_work(struct work_struct *work)
+{
+ struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev,
+ rx_refill.work);
+ struct net_device *ndev = mhi_netdev->ndev;
+ struct mhi_device *mdev = mhi_netdev->mdev;
+ int size = READ_ONCE(ndev->mtu);
+ struct sk_buff *skb;
+ int err;
+
+ while (atomic_read(&mhi_netdev->stats.rx_queued) < mhi_netdev->rx_queue_sz) {
+ skb = netdev_alloc_skb(ndev, size);
+ if (unlikely(!skb))
+ break;
+
+ err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT);
+ if (unlikely(err)) {
+ net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
+ ndev->name, err);
+ kfree_skb(skb);
+ break;
+ }
+
+ atomic_inc(&mhi_netdev->stats.rx_queued);
+
+ /* Do not hog the CPU if rx buffers are consumed faster than
+ * queued (unlikely).
+ */
+ cond_resched();
+ }
+
+ /* If we're still starved of rx buffers, reschedule later */
+ if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued)))
+ schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
+}
+
+static int mhi_net_probe(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id)
+{
+ const char *netname = (char *)id->driver_data;
+ struct device *dev = &mhi_dev->dev;
+ struct mhi_net_dev *mhi_netdev;
+ struct net_device *ndev;
+ int err;
+
+ ndev = alloc_netdev(sizeof(*mhi_netdev), netname, NET_NAME_PREDICTABLE,
+ mhi_net_setup);
+ if (!ndev)
+ return -ENOMEM;
+
+ mhi_netdev = netdev_priv(ndev);
+ dev_set_drvdata(dev, mhi_netdev);
+ mhi_netdev->ndev = ndev;
+ mhi_netdev->mdev = mhi_dev;
+ SET_NETDEV_DEV(ndev, &mhi_dev->dev);
+
+ /* All MHI net channels have 128 ring elements (at least for now) */
+ mhi_netdev->rx_queue_sz = 128;
+
+ INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
+ u64_stats_init(&mhi_netdev->stats.rx_syncp);
+ u64_stats_init(&mhi_netdev->stats.tx_syncp);
+
+ /* Start MHI channels */
+ err = mhi_prepare_for_transfer(mhi_dev);
+ if (err)
+ goto out_err;
+
+ err = register_netdev(ndev);
+ if (err)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ free_netdev(ndev);
+ return err;
+}
+
+static void mhi_net_remove(struct mhi_device *mhi_dev)
+{
+ struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+
+ unregister_netdev(mhi_netdev->ndev);
+
+ mhi_unprepare_from_transfer(mhi_netdev->mdev);
+
+ free_netdev(mhi_netdev->ndev);
+}
+
+static const struct mhi_device_id mhi_net_id_table[] = {
+ { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)"mhi_hwip%d" },
+ { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)"mhi_swip%d" },
+ {}
+};
+MODULE_DEVICE_TABLE(mhi, mhi_net_id_table);
+
+static struct mhi_driver mhi_net_driver = {
+ .probe = mhi_net_probe,
+ .remove = mhi_net_remove,
+ .dl_xfer_cb = mhi_net_dl_callback,
+ .ul_xfer_cb = mhi_net_ul_callback,
+ .id_table = mhi_net_id_table,
+ .driver = {
+ .name = "mhi_net",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_mhi_driver(mhi_net_driver);
+
+MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
+MODULE_DESCRIPTION("Network over MHI");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index f6a97c859f3a..e71ebb933266 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -84,15 +84,16 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000);
}
+
+ ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE);
+ if (mii->supports_gmii)
+ ecmd->advertising |=
+ mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
+
if (bmcr & BMCR_ANENABLE) {
ecmd->advertising |= ADVERTISED_Autoneg;
ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE);
- if (mii->supports_gmii)
- ecmd->advertising |=
- mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
-
if (bmsr & BMSR_ANEGCOMPLETE) {
ecmd->lp_advertising = mii_get_an(mii, MII_LPA);
ecmd->lp_advertising |=
@@ -171,14 +172,15 @@ void mii_ethtool_get_link_ksettings(struct mii_if_info *mii,
ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000);
}
+
+ advertising |= mii_get_an(mii, MII_ADVERTISE);
+ if (mii->supports_gmii)
+ advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
+
if (bmcr & BMCR_ANENABLE) {
advertising |= ADVERTISED_Autoneg;
cmd->base.autoneg = AUTONEG_ENABLE;
- advertising |= mii_get_an(mii, MII_ADVERTISE);
- if (mii->supports_gmii)
- advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
-
if (bmsr & BMSR_ANEGCOMPLETE) {
lp_advertising = mii_get_an(mii, MII_LPA);
lp_advertising |=
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index fb182bec8f06..2a4892402ed8 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -697,7 +697,7 @@ static struct failover_ops net_failover_ops = {
/**
* net_failover_create - Create and register a failover instance
*
- * @dev: standby netdev
+ * @standby_dev: standby netdev
*
* Creates a failover netdev and registers a failover instance for a standby
* netdev. Used by paravirtual drivers that use 3-netdev model.
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 92001f7af380..ccecba908ded 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -83,6 +83,7 @@ static struct console netconsole_ext;
* whether the corresponding netpoll is active or inactive.
* Also, other parameters of a target may be modified at
* runtime only when it is disabled (enabled == 0).
+ * @extended: Denotes whether console is extended or not.
* @np: The netpoll structure for this target.
* Contains the other userspace visible parameters:
* dev_name (read-write)
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index e7972e88ffe0..816af1f55e2c 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -326,6 +326,12 @@ static int nsim_dev_resources_register(struct devlink *devlink)
return err;
}
+ /* Resources for nexthops */
+ err = devlink_resource_register(devlink, "nexthops", (u64)-1,
+ NSIM_RESOURCE_NEXTHOPS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params);
+
out:
return err;
}
@@ -760,7 +766,6 @@ static int nsim_dev_flash_update(struct devlink *devlink,
return -EOPNOTSUPP;
if (nsim_dev->fw_update_status) {
- devlink_flash_update_begin_notify(devlink);
devlink_flash_update_status_notify(devlink,
"Preparing to flash",
params->component, 0, 0);
@@ -784,7 +789,6 @@ static int nsim_dev_flash_update(struct devlink *devlink,
params->component, 81);
devlink_flash_update_status_notify(devlink, "Flashing done",
params->component, 0, 0);
- devlink_flash_update_end_notify(devlink);
}
return 0;
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index f1884d90a876..166f0d6cbcf7 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -13,9 +13,9 @@ nsim_get_pause_stats(struct net_device *dev,
{
struct netdevsim *ns = netdev_priv(dev);
- if (ns->ethtool.report_stats_rx)
+ if (ns->ethtool.pauseparam.report_stats_rx)
pause_stats->rx_pause_frames = 1;
- if (ns->ethtool.report_stats_tx)
+ if (ns->ethtool.pauseparam.report_stats_tx)
pause_stats->tx_pause_frames = 2;
}
@@ -25,8 +25,8 @@ nsim_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
struct netdevsim *ns = netdev_priv(dev);
pause->autoneg = 0; /* We don't support ksettings, so can't pretend */
- pause->rx_pause = ns->ethtool.rx;
- pause->tx_pause = ns->ethtool.tx;
+ pause->rx_pause = ns->ethtool.pauseparam.rx;
+ pause->tx_pause = ns->ethtool.pauseparam.tx;
}
static int
@@ -37,28 +37,88 @@ nsim_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
if (pause->autoneg)
return -EINVAL;
- ns->ethtool.rx = pause->rx_pause;
- ns->ethtool.tx = pause->tx_pause;
+ ns->ethtool.pauseparam.rx = pause->rx_pause;
+ ns->ethtool.pauseparam.tx = pause->tx_pause;
+ return 0;
+}
+
+static int nsim_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ memcpy(coal, &ns->ethtool.coalesce, sizeof(ns->ethtool.coalesce));
+ return 0;
+}
+
+static int nsim_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ memcpy(&ns->ethtool.coalesce, coal, sizeof(ns->ethtool.coalesce));
+ return 0;
+}
+
+static void nsim_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ memcpy(ring, &ns->ethtool.ring, sizeof(ns->ethtool.ring));
+}
+
+static int nsim_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ memcpy(&ns->ethtool.ring, ring, sizeof(ns->ethtool.ring));
return 0;
}
static const struct ethtool_ops nsim_ethtool_ops = {
- .get_pause_stats = nsim_get_pause_stats,
- .get_pauseparam = nsim_get_pauseparam,
- .set_pauseparam = nsim_set_pauseparam,
+ .supported_coalesce_params = ETHTOOL_COALESCE_ALL_PARAMS,
+ .get_pause_stats = nsim_get_pause_stats,
+ .get_pauseparam = nsim_get_pauseparam,
+ .set_pauseparam = nsim_set_pauseparam,
+ .set_coalesce = nsim_set_coalesce,
+ .get_coalesce = nsim_get_coalesce,
+ .get_ringparam = nsim_get_ringparam,
+ .set_ringparam = nsim_set_ringparam,
};
+static void nsim_ethtool_ring_init(struct netdevsim *ns)
+{
+ ns->ethtool.ring.rx_max_pending = 4096;
+ ns->ethtool.ring.rx_jumbo_max_pending = 4096;
+ ns->ethtool.ring.rx_mini_max_pending = 4096;
+ ns->ethtool.ring.tx_max_pending = 4096;
+}
+
void nsim_ethtool_init(struct netdevsim *ns)
{
struct dentry *ethtool, *dir;
ns->netdev->ethtool_ops = &nsim_ethtool_ops;
+ nsim_ethtool_ring_init(ns);
+
ethtool = debugfs_create_dir("ethtool", ns->nsim_dev_port->ddir);
dir = debugfs_create_dir("pause", ethtool);
debugfs_create_bool("report_stats_rx", 0600, dir,
- &ns->ethtool.report_stats_rx);
+ &ns->ethtool.pauseparam.report_stats_rx);
debugfs_create_bool("report_stats_tx", 0600, dir,
- &ns->ethtool.report_stats_tx);
+ &ns->ethtool.pauseparam.report_stats_tx);
+
+ dir = debugfs_create_dir("ring", ethtool);
+ debugfs_create_u32("rx_max_pending", 0600, dir,
+ &ns->ethtool.ring.rx_max_pending);
+ debugfs_create_u32("rx_jumbo_max_pending", 0600, dir,
+ &ns->ethtool.ring.rx_jumbo_max_pending);
+ debugfs_create_u32("rx_mini_max_pending", 0600, dir,
+ &ns->ethtool.ring.rx_mini_max_pending);
+ debugfs_create_u32("tx_max_pending", 0600, dir,
+ &ns->ethtool.ring.tx_max_pending);
}
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index deea17a0e79c..45d8a7790bd5 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -25,6 +25,7 @@
#include <net/ip6_fib.h>
#include <net/fib_rules.h>
#include <net/net_namespace.h>
+#include <net/nexthop.h>
#include "netdevsim.h"
@@ -42,9 +43,12 @@ struct nsim_fib_data {
struct notifier_block fib_nb;
struct nsim_per_fib_data ipv4;
struct nsim_per_fib_data ipv6;
+ struct nsim_fib_entry nexthops;
struct rhashtable fib_rt_ht;
struct list_head fib_rt_list;
spinlock_t fib_lock; /* Protects hashtable, list and accounting */
+ struct notifier_block nexthop_nb;
+ struct rhashtable nexthop_ht;
struct devlink *devlink;
};
@@ -86,6 +90,19 @@ static const struct rhashtable_params nsim_fib_rt_ht_params = {
.automatic_shrinking = true,
};
+struct nsim_nexthop {
+ struct rhash_head ht_node;
+ u64 occ;
+ u32 id;
+};
+
+static const struct rhashtable_params nsim_nexthop_ht_params = {
+ .key_offset = offsetof(struct nsim_nexthop, id),
+ .head_offset = offsetof(struct nsim_nexthop, ht_node),
+ .key_len = sizeof(u32),
+ .automatic_shrinking = true,
+};
+
u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
enum nsim_resource_id res_id, bool max)
{
@@ -104,6 +121,9 @@ u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
case NSIM_RESOURCE_IPV6_FIB_RULES:
entry = &fib_data->ipv6.rules;
break;
+ case NSIM_RESOURCE_NEXTHOPS:
+ entry = &fib_data->nexthops;
+ break;
default:
return 0;
}
@@ -129,6 +149,9 @@ static void nsim_fib_set_max(struct nsim_fib_data *fib_data,
case NSIM_RESOURCE_IPV6_FIB_RULES:
entry = &fib_data->ipv6.rules;
break;
+ case NSIM_RESOURCE_NEXTHOPS:
+ entry = &fib_data->nexthops;
+ break;
default:
WARN_ON(1);
return;
@@ -389,11 +412,6 @@ static int nsim_fib4_event(struct nsim_fib_data *data,
fen_info = container_of(info, struct fib_entry_notifier_info, info);
- if (fen_info->fi->nh) {
- NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
- return 0;
- }
-
switch (event) {
case FIB_EVENT_ENTRY_REPLACE:
err = nsim_fib4_rt_insert(data, fen_info);
@@ -704,11 +722,6 @@ static int nsim_fib6_event(struct nsim_fib_data *data,
fen6_info = container_of(info, struct fib6_entry_notifier_info, info);
- if (fen6_info->rt->nh) {
- NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
- return 0;
- }
-
if (fen6_info->rt->fib6_src.plen) {
NL_SET_ERR_MSG_MOD(info->extack, "IPv6 source-specific route is not supported");
return 0;
@@ -838,6 +851,196 @@ static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
data->ipv6.rules.num = 0ULL;
}
+static struct nsim_nexthop *nsim_nexthop_create(struct nsim_fib_data *data,
+ struct nh_notifier_info *info)
+{
+ struct nsim_nexthop *nexthop;
+ u64 occ = 0;
+ int i;
+
+ nexthop = kzalloc(sizeof(*nexthop), GFP_KERNEL);
+ if (!nexthop)
+ return NULL;
+
+ nexthop->id = info->id;
+
+ /* Determine the number of nexthop entries the new nexthop will
+ * occupy.
+ */
+
+ if (!info->is_grp) {
+ occ = 1;
+ goto out;
+ }
+
+ for (i = 0; i < info->nh_grp->num_nh; i++)
+ occ += info->nh_grp->nh_entries[i].weight;
+
+out:
+ nexthop->occ = occ;
+ return nexthop;
+}
+
+static void nsim_nexthop_destroy(struct nsim_nexthop *nexthop)
+{
+ kfree(nexthop);
+}
+
+static int nsim_nexthop_account(struct nsim_fib_data *data, u64 occ,
+ bool add, struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ if (add) {
+ if (data->nexthops.num + occ <= data->nexthops.max) {
+ data->nexthops.num += occ;
+ } else {
+ err = -ENOSPC;
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported nexthops");
+ }
+ } else {
+ if (WARN_ON(occ > data->nexthops.num))
+ return -EINVAL;
+ data->nexthops.num -= occ;
+ }
+
+ return err;
+}
+
+static int nsim_nexthop_add(struct nsim_fib_data *data,
+ struct nsim_nexthop *nexthop,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = devlink_net(data->devlink);
+ int err;
+
+ err = nsim_nexthop_account(data, nexthop->occ, true, extack);
+ if (err)
+ return err;
+
+ err = rhashtable_insert_fast(&data->nexthop_ht, &nexthop->ht_node,
+ nsim_nexthop_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to insert nexthop");
+ goto err_nexthop_dismiss;
+ }
+
+ nexthop_set_hw_flags(net, nexthop->id, false, true);
+
+ return 0;
+
+err_nexthop_dismiss:
+ nsim_nexthop_account(data, nexthop->occ, false, extack);
+ return err;
+}
+
+static int nsim_nexthop_replace(struct nsim_fib_data *data,
+ struct nsim_nexthop *nexthop,
+ struct nsim_nexthop *nexthop_old,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = devlink_net(data->devlink);
+ int err;
+
+ err = nsim_nexthop_account(data, nexthop->occ, true, extack);
+ if (err)
+ return err;
+
+ err = rhashtable_replace_fast(&data->nexthop_ht,
+ &nexthop_old->ht_node, &nexthop->ht_node,
+ nsim_nexthop_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to replace nexthop");
+ goto err_nexthop_dismiss;
+ }
+
+ nexthop_set_hw_flags(net, nexthop->id, false, true);
+ nsim_nexthop_account(data, nexthop_old->occ, false, extack);
+ nsim_nexthop_destroy(nexthop_old);
+
+ return 0;
+
+err_nexthop_dismiss:
+ nsim_nexthop_account(data, nexthop->occ, false, extack);
+ return err;
+}
+
+static int nsim_nexthop_insert(struct nsim_fib_data *data,
+ struct nh_notifier_info *info)
+{
+ struct nsim_nexthop *nexthop, *nexthop_old;
+ int err;
+
+ nexthop = nsim_nexthop_create(data, info);
+ if (!nexthop)
+ return -ENOMEM;
+
+ nexthop_old = rhashtable_lookup_fast(&data->nexthop_ht, &info->id,
+ nsim_nexthop_ht_params);
+ if (!nexthop_old)
+ err = nsim_nexthop_add(data, nexthop, info->extack);
+ else
+ err = nsim_nexthop_replace(data, nexthop, nexthop_old,
+ info->extack);
+
+ if (err)
+ nsim_nexthop_destroy(nexthop);
+
+ return err;
+}
+
+static void nsim_nexthop_remove(struct nsim_fib_data *data,
+ struct nh_notifier_info *info)
+{
+ struct nsim_nexthop *nexthop;
+
+ nexthop = rhashtable_lookup_fast(&data->nexthop_ht, &info->id,
+ nsim_nexthop_ht_params);
+ if (!nexthop)
+ return;
+
+ rhashtable_remove_fast(&data->nexthop_ht, &nexthop->ht_node,
+ nsim_nexthop_ht_params);
+ nsim_nexthop_account(data, nexthop->occ, false, info->extack);
+ nsim_nexthop_destroy(nexthop);
+}
+
+static int nsim_nexthop_event_nb(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+ nexthop_nb);
+ struct nh_notifier_info *info = ptr;
+ int err = 0;
+
+ ASSERT_RTNL();
+
+ switch (event) {
+ case NEXTHOP_EVENT_REPLACE:
+ err = nsim_nexthop_insert(data, info);
+ break;
+ case NEXTHOP_EVENT_DEL:
+ nsim_nexthop_remove(data, info);
+ break;
+ default:
+ break;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static void nsim_nexthop_free(void *ptr, void *arg)
+{
+ struct nsim_nexthop *nexthop = ptr;
+ struct nsim_fib_data *data = arg;
+ struct net *net;
+
+ net = devlink_net(data->devlink);
+ nexthop_set_hw_flags(net, nexthop->id, false, false);
+ nsim_nexthop_account(data, nexthop->occ, false, NULL);
+ nsim_nexthop_destroy(nexthop);
+}
+
static u64 nsim_fib_ipv4_resource_occ_get(void *priv)
{
struct nsim_fib_data *data = priv;
@@ -866,12 +1069,20 @@ static u64 nsim_fib_ipv6_rules_res_occ_get(void *priv)
return nsim_fib_get_val(data, NSIM_RESOURCE_IPV6_FIB_RULES, false);
}
+static u64 nsim_fib_nexthops_res_occ_get(void *priv)
+{
+ struct nsim_fib_data *data = priv;
+
+ return nsim_fib_get_val(data, NSIM_RESOURCE_NEXTHOPS, false);
+}
+
static void nsim_fib_set_max_all(struct nsim_fib_data *data,
struct devlink *devlink)
{
enum nsim_resource_id res_ids[] = {
NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
- NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
+ NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES,
+ NSIM_RESOURCE_NEXTHOPS,
};
int i;
@@ -897,20 +1108,32 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
return ERR_PTR(-ENOMEM);
data->devlink = devlink;
+ err = rhashtable_init(&data->nexthop_ht, &nsim_nexthop_ht_params);
+ if (err)
+ goto err_data_free;
+
spin_lock_init(&data->fib_lock);
INIT_LIST_HEAD(&data->fib_rt_list);
err = rhashtable_init(&data->fib_rt_ht, &nsim_fib_rt_ht_params);
if (err)
- goto err_data_free;
+ goto err_rhashtable_nexthop_destroy;
nsim_fib_set_max_all(data, devlink);
+ data->nexthop_nb.notifier_call = nsim_nexthop_event_nb;
+ err = register_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb,
+ extack);
+ if (err) {
+ pr_err("Failed to register nexthop notifier\n");
+ goto err_rhashtable_fib_destroy;
+ }
+
data->fib_nb.notifier_call = nsim_fib_event_nb;
err = register_fib_notifier(devlink_net(devlink), &data->fib_nb,
nsim_fib_dump_inconsistent, extack);
if (err) {
pr_err("Failed to register fib notifier\n");
- goto err_rhashtable_destroy;
+ goto err_nexthop_nb_unregister;
}
devlink_resource_occ_get_register(devlink,
@@ -929,11 +1152,20 @@ struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
NSIM_RESOURCE_IPV6_FIB_RULES,
nsim_fib_ipv6_rules_res_occ_get,
data);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_NEXTHOPS,
+ nsim_fib_nexthops_res_occ_get,
+ data);
return data;
-err_rhashtable_destroy:
+err_nexthop_nb_unregister:
+ unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb);
+err_rhashtable_fib_destroy:
rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
data);
+err_rhashtable_nexthop_destroy:
+ rhashtable_free_and_destroy(&data->nexthop_ht, nsim_nexthop_free,
+ data);
err_data_free:
kfree(data);
return ERR_PTR(err);
@@ -942,6 +1174,8 @@ err_data_free:
void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
{
devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_NEXTHOPS);
+ devlink_resource_occ_get_unregister(devlink,
NSIM_RESOURCE_IPV6_FIB_RULES);
devlink_resource_occ_get_unregister(devlink,
NSIM_RESOURCE_IPV6_FIB);
@@ -950,8 +1184,11 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
devlink_resource_occ_get_unregister(devlink,
NSIM_RESOURCE_IPV4_FIB);
unregister_fib_notifier(devlink_net(devlink), &data->fib_nb);
+ unregister_nexthop_notifier(devlink_net(devlink), &data->nexthop_nb);
rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
data);
+ rhashtable_free_and_destroy(&data->nexthop_ht, nsim_nexthop_free,
+ data);
WARN_ON_ONCE(!list_empty(&data->fib_rt_list));
kfree(data);
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 827fc80f50a0..19b1e6ef5573 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -15,6 +15,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
@@ -51,13 +52,19 @@ struct nsim_ipsec {
u32 ok;
};
-struct nsim_ethtool {
+struct nsim_ethtool_pauseparam {
bool rx;
bool tx;
bool report_stats_rx;
bool report_stats_tx;
};
+struct nsim_ethtool {
+ struct nsim_ethtool_pauseparam pauseparam;
+ struct ethtool_coalesce coalesce;
+ struct ethtool_ringparam ring;
+};
+
struct netdevsim {
struct net_device *netdev;
struct nsim_dev *nsim_dev;
@@ -158,6 +165,7 @@ enum nsim_resource_id {
NSIM_RESOURCE_IPV6,
NSIM_RESOURCE_IPV6_FIB,
NSIM_RESOURCE_IPV6_FIB_RULES,
+ NSIM_RESOURCE_NEXTHOPS,
};
struct nsim_dev_health {
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index afb119f38325..5e19a6839dea 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c
index 307f0ac1287b..55a0b91816e2 100644
--- a/drivers/net/phy/adin.c
+++ b/drivers/net/phy/adin.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/ethtool_netlink.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mii.h>
@@ -23,6 +24,7 @@
#define ADIN1300_PHY_CTRL1 0x0012
#define ADIN1300_AUTO_MDI_EN BIT(10)
#define ADIN1300_MAN_MDIX_EN BIT(9)
+#define ADIN1300_DIAG_CLK_EN BIT(2)
#define ADIN1300_RX_ERR_CNT 0x0014
@@ -69,6 +71,31 @@
#define ADIN1300_CLOCK_STOP_REG 0x9400
#define ADIN1300_LPI_WAKE_ERR_CNT_REG 0xa000
+#define ADIN1300_CDIAG_RUN 0xba1b
+#define ADIN1300_CDIAG_RUN_EN BIT(0)
+
+/*
+ * The XSIM3/2/1 and XSHRT3/2/1 are actually relative.
+ * For CDIAG_DTLD_RSLTS(0) it's ADIN1300_CDIAG_RSLT_XSIM3/2/1
+ * For CDIAG_DTLD_RSLTS(1) it's ADIN1300_CDIAG_RSLT_XSIM3/2/0
+ * For CDIAG_DTLD_RSLTS(2) it's ADIN1300_CDIAG_RSLT_XSIM3/1/0
+ * For CDIAG_DTLD_RSLTS(3) it's ADIN1300_CDIAG_RSLT_XSIM2/1/0
+ */
+#define ADIN1300_CDIAG_DTLD_RSLTS(x) (0xba1d + (x))
+#define ADIN1300_CDIAG_RSLT_BUSY BIT(10)
+#define ADIN1300_CDIAG_RSLT_XSIM3 BIT(9)
+#define ADIN1300_CDIAG_RSLT_XSIM2 BIT(8)
+#define ADIN1300_CDIAG_RSLT_XSIM1 BIT(7)
+#define ADIN1300_CDIAG_RSLT_SIM BIT(6)
+#define ADIN1300_CDIAG_RSLT_XSHRT3 BIT(5)
+#define ADIN1300_CDIAG_RSLT_XSHRT2 BIT(4)
+#define ADIN1300_CDIAG_RSLT_XSHRT1 BIT(3)
+#define ADIN1300_CDIAG_RSLT_SHRT BIT(2)
+#define ADIN1300_CDIAG_RSLT_OPEN BIT(1)
+#define ADIN1300_CDIAG_RSLT_GOOD BIT(0)
+
+#define ADIN1300_CDIAG_FLT_DIST(x) (0xba21 + (x))
+
#define ADIN1300_GE_SOFT_RESET_REG 0xff0c
#define ADIN1300_GE_SOFT_RESET BIT(0)
@@ -321,10 +348,9 @@ static int adin_set_downshift(struct phy_device *phydev, u8 cnt)
return -E2BIG;
val = FIELD_PREP(ADIN1300_DOWNSPEED_RETRIES_MSK, cnt);
- val |= ADIN1300_LINKING_EN;
rc = phy_modify(phydev, ADIN1300_PHY_CTRL3,
- ADIN1300_LINKING_EN | ADIN1300_DOWNSPEED_RETRIES_MSK,
+ ADIN1300_DOWNSPEED_RETRIES_MSK,
val);
if (rc < 0)
return rc;
@@ -445,12 +471,43 @@ static int adin_phy_ack_intr(struct phy_device *phydev)
static int adin_phy_config_intr(struct phy_device *phydev)
{
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- return phy_set_bits(phydev, ADIN1300_INT_MASK_REG,
- ADIN1300_INT_MASK_EN);
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = adin_phy_ack_intr(phydev);
+ if (err)
+ return err;
+
+ err = phy_set_bits(phydev, ADIN1300_INT_MASK_REG,
+ ADIN1300_INT_MASK_EN);
+ } else {
+ err = phy_clear_bits(phydev, ADIN1300_INT_MASK_REG,
+ ADIN1300_INT_MASK_EN);
+ if (err)
+ return err;
+
+ err = adin_phy_ack_intr(phydev);
+ }
+
+ return err;
+}
+
+static irqreturn_t adin_phy_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, ADIN1300_INT_STATUS_REG);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
- return phy_clear_bits(phydev, ADIN1300_INT_MASK_REG,
- ADIN1300_INT_MASK_EN);
+ if (!(irq_status & ADIN1300_INT_LINK_STAT_CHNG_EN))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int adin_cl45_to_adin_reg(struct phy_device *phydev, int devad,
@@ -555,6 +612,14 @@ static int adin_config_aneg(struct phy_device *phydev)
{
int ret;
+ ret = phy_clear_bits(phydev, ADIN1300_PHY_CTRL1, ADIN1300_DIAG_CLK_EN);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_set_bits(phydev, ADIN1300_PHY_CTRL3, ADIN1300_LINKING_EN);
+ if (ret < 0)
+ return ret;
+
ret = adin_config_mdix(phydev);
if (ret)
return ret;
@@ -725,10 +790,117 @@ static int adin_probe(struct phy_device *phydev)
return 0;
}
+static int adin_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_clear_bits(phydev, ADIN1300_PHY_CTRL3, ADIN1300_LINKING_EN);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_clear_bits(phydev, ADIN1300_PHY_CTRL1, ADIN1300_DIAG_CLK_EN);
+ if (ret < 0)
+ return ret;
+
+ /* wait a bit for the clock to stabilize */
+ msleep(50);
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_CDIAG_RUN,
+ ADIN1300_CDIAG_RUN_EN);
+}
+
+static int adin_cable_test_report_trans(int result)
+{
+ int mask;
+
+ if (result & ADIN1300_CDIAG_RSLT_GOOD)
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ if (result & ADIN1300_CDIAG_RSLT_OPEN)
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+
+ /* short with other pairs */
+ mask = ADIN1300_CDIAG_RSLT_XSHRT3 |
+ ADIN1300_CDIAG_RSLT_XSHRT2 |
+ ADIN1300_CDIAG_RSLT_XSHRT1;
+ if (result & mask)
+ return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
+
+ if (result & ADIN1300_CDIAG_RSLT_SHRT)
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+}
+
+static int adin_cable_test_report_pair(struct phy_device *phydev,
+ unsigned int pair)
+{
+ int fault_rslt;
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_CDIAG_DTLD_RSLTS(pair));
+ if (ret < 0)
+ return ret;
+
+ fault_rslt = adin_cable_test_report_trans(ret);
+
+ ret = ethnl_cable_test_result(phydev, pair, fault_rslt);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+ ADIN1300_CDIAG_FLT_DIST(pair));
+ if (ret < 0)
+ return ret;
+
+ switch (fault_rslt) {
+ case ETHTOOL_A_CABLE_RESULT_CODE_OPEN:
+ case ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT:
+ case ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT:
+ return ethnl_cable_test_fault_length(phydev, pair, ret * 100);
+ default:
+ return 0;
+ }
+}
+
+static int adin_cable_test_report(struct phy_device *phydev)
+{
+ unsigned int pair;
+ int ret;
+
+ for (pair = ETHTOOL_A_CABLE_PAIR_A; pair <= ETHTOOL_A_CABLE_PAIR_D; pair++) {
+ ret = adin_cable_test_report_pair(phydev, pair);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adin_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret;
+
+ *finished = false;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, ADIN1300_CDIAG_RUN);
+ if (ret < 0)
+ return ret;
+
+ if (ret & ADIN1300_CDIAG_RUN_EN)
+ return 0;
+
+ *finished = true;
+
+ return adin_cable_test_report(phydev);
+}
+
static struct phy_driver adin_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_ADIN1200),
.name = "ADIN1200",
+ .flags = PHY_POLL_CABLE_TEST,
.probe = adin_probe,
.config_init = adin_config_init,
.soft_reset = adin_soft_reset,
@@ -736,8 +908,8 @@ static struct phy_driver adin_driver[] = {
.read_status = adin_read_status,
.get_tunable = adin_get_tunable,
.set_tunable = adin_set_tunable,
- .ack_interrupt = adin_phy_ack_intr,
.config_intr = adin_phy_config_intr,
+ .handle_interrupt = adin_phy_handle_interrupt,
.get_sset_count = adin_get_sset_count,
.get_strings = adin_get_strings,
.get_stats = adin_get_stats,
@@ -745,10 +917,13 @@ static struct phy_driver adin_driver[] = {
.suspend = genphy_suspend,
.read_mmd = adin_read_mmd,
.write_mmd = adin_write_mmd,
+ .cable_test_start = adin_cable_test_start,
+ .cable_test_get_status = adin_cable_test_get_status,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_ADIN1300),
.name = "ADIN1300",
+ .flags = PHY_POLL_CABLE_TEST,
.probe = adin_probe,
.config_init = adin_config_init,
.soft_reset = adin_soft_reset,
@@ -756,8 +931,8 @@ static struct phy_driver adin_driver[] = {
.read_status = adin_read_status,
.get_tunable = adin_get_tunable,
.set_tunable = adin_set_tunable,
- .ack_interrupt = adin_phy_ack_intr,
.config_intr = adin_phy_config_intr,
+ .handle_interrupt = adin_phy_handle_interrupt,
.get_sset_count = adin_get_sset_count,
.get_strings = adin_get_strings,
.get_stats = adin_get_stats,
@@ -765,6 +940,8 @@ static struct phy_driver adin_driver[] = {
.suspend = genphy_suspend,
.read_mmd = adin_read_mmd,
.write_mmd = adin_write_mmd,
+ .cable_test_start = adin_cable_test_start,
+ .cable_test_get_status = adin_cable_test_get_status,
},
};
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index eef35f8c8d45..001bb6d8bfce 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -20,6 +20,10 @@
#define MII_AM79C_IR_EN_ANEG 0x0100 /* IR enable Aneg Complete */
#define MII_AM79C_IR_IMASK_INIT (MII_AM79C_IR_EN_LINK | MII_AM79C_IR_EN_ANEG)
+#define MII_AM79C_IR_LINK_DOWN BIT(2)
+#define MII_AM79C_IR_ANEG_DONE BIT(0)
+#define MII_AM79C_IR_IMASK_STAT (MII_AM79C_IR_LINK_DOWN | MII_AM79C_IR_ANEG_DONE)
+
MODULE_DESCRIPTION("AMD PHY driver");
MODULE_AUTHOR("Heiko Schocher <hs@denx.de>");
MODULE_LICENSE("GPL");
@@ -48,22 +52,49 @@ static int am79c_config_intr(struct phy_device *phydev)
{
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = am79c_ack_interrupt(phydev);
+ if (err)
+ return err;
+
err = phy_write(phydev, MII_AM79C_IR, MII_AM79C_IR_IMASK_INIT);
- else
+ } else {
err = phy_write(phydev, MII_AM79C_IR, 0);
+ if (err)
+ return err;
+
+ err = am79c_ack_interrupt(phydev);
+ }
return err;
}
+static irqreturn_t am79c_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_AM79C_IR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_AM79C_IR_IMASK_STAT))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static struct phy_driver am79c_driver[] = { {
.phy_id = PHY_ID_AM79C874,
.name = "AM79C874",
.phy_id_mask = 0xfffffff0,
/* PHY_BASIC_FEATURES */
.config_init = am79c_config_init,
- .ack_interrupt = am79c_ack_interrupt,
.config_intr = am79c_config_intr,
+ .handle_interrupt = am79c_handle_interrupt,
} };
module_phy_driver(am79c_driver);
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 41e7c1432497..968dd43a2b1e 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -52,6 +52,7 @@
#define MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT BIT(1)
#define MDIO_AN_TX_VEND_INT_STATUS2 0xcc01
+#define MDIO_AN_TX_VEND_INT_STATUS2_MASK BIT(0)
#define MDIO_AN_TX_VEND_INT_MASK2 0xd401
#define MDIO_AN_TX_VEND_INT_MASK2_LINK BIT(0)
@@ -246,6 +247,13 @@ static int aqr_config_intr(struct phy_device *phydev)
bool en = phydev->interrupts == PHY_INTERRUPT_ENABLED;
int err;
+ if (en) {
+ /* Clear any pending interrupts before enabling them */
+ err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2);
+ if (err < 0)
+ return err;
+ }
+
err = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_MASK2,
en ? MDIO_AN_TX_VEND_INT_MASK2_LINK : 0);
if (err < 0)
@@ -256,18 +264,39 @@ static int aqr_config_intr(struct phy_device *phydev)
if (err < 0)
return err;
- return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK,
- en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
- VEND1_GLOBAL_INT_VEND_MASK_AN : 0);
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK,
+ en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
+ VEND1_GLOBAL_INT_VEND_MASK_AN : 0);
+ if (err < 0)
+ return err;
+
+ if (!en) {
+ /* Clear any pending interrupts after we have disabled them */
+ err = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS2);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
}
-static int aqr_ack_interrupt(struct phy_device *phydev)
+static irqreturn_t aqr_handle_interrupt(struct phy_device *phydev)
{
- int reg;
+ int irq_status;
+
+ irq_status = phy_read_mmd(phydev, MDIO_MMD_AN,
+ MDIO_AN_TX_VEND_INT_STATUS2);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MDIO_AN_TX_VEND_INT_STATUS2_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
- reg = phy_read_mmd(phydev, MDIO_MMD_AN,
- MDIO_AN_TX_VEND_INT_STATUS2);
- return (reg < 0) ? reg : 0;
+ return IRQ_HANDLED;
}
static int aqr_read_status(struct phy_device *phydev)
@@ -584,7 +613,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQ1202",
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
},
{
@@ -592,7 +621,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQ2104",
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
},
{
@@ -600,7 +629,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR105",
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
.suspend = aqr107_suspend,
.resume = aqr107_resume,
@@ -610,7 +639,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR106",
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
},
{
@@ -620,7 +649,7 @@ static struct phy_driver aqr_driver[] = {
.config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr107_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
@@ -638,7 +667,7 @@ static struct phy_driver aqr_driver[] = {
.config_init = aqcs109_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr107_read_status,
.get_tunable = aqr107_get_tunable,
.set_tunable = aqr107_set_tunable,
@@ -654,7 +683,7 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR405",
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
- .ack_interrupt = aqr_ack_interrupt,
+ .handle_interrupt = aqr_handle_interrupt,
.read_status = aqr_read_status,
},
};
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index ed601a7e46a0..d0b36fd6c265 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -614,6 +614,11 @@ static int at803x_config_intr(struct phy_device *phydev)
value = phy_read(phydev, AT803X_INTR_ENABLE);
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
value |= AT803X_INTR_ENABLE_AUTONEG_ERR;
value |= AT803X_INTR_ENABLE_SPEED_CHANGED;
value |= AT803X_INTR_ENABLE_DUPLEX_CHANGED;
@@ -621,13 +626,44 @@ static int at803x_config_intr(struct phy_device *phydev)
value |= AT803X_INTR_ENABLE_LINK_SUCCESS;
err = phy_write(phydev, AT803X_INTR_ENABLE, value);
- }
- else
+ } else {
err = phy_write(phydev, AT803X_INTR_ENABLE, 0);
+ if (err)
+ return err;
+
+ /* Clear any pending interrupts */
+ err = at803x_ack_interrupt(phydev);
+ }
return err;
}
+static irqreturn_t at803x_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, int_enabled;
+
+ irq_status = phy_read(phydev, AT803X_INTR_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* Read the current enabled interrupts */
+ int_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (int_enabled < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* See if this was one of our enabled interrupts */
+ if (!(irq_status & int_enabled))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static void at803x_link_change_notify(struct phy_device *phydev)
{
/*
@@ -1062,8 +1098,8 @@ static struct phy_driver at803x_driver[] = {
.resume = at803x_resume,
/* PHY_GBIT_FEATURES */
.read_status = at803x_read_status,
- .ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
.set_tunable = at803x_set_tunable,
.cable_test_start = at803x_cable_test_start,
@@ -1082,8 +1118,8 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
/* PHY_BASIC_FEATURES */
- .ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
}, {
/* Qualcomm Atheros AR8031/AR8033 */
PHY_ID_MATCH_EXACT(ATH8031_PHY_ID),
@@ -1100,8 +1136,8 @@ static struct phy_driver at803x_driver[] = {
/* PHY_GBIT_FEATURES */
.read_status = at803x_read_status,
.aneg_done = at803x_aneg_done,
- .ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
.get_tunable = at803x_get_tunable,
.set_tunable = at803x_set_tunable,
.cable_test_start = at803x_cable_test_start,
@@ -1120,8 +1156,8 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
/* PHY_BASIC_FEATURES */
- .ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
.cable_test_start = at803x_cable_test_start,
.cable_test_get_status = at803x_cable_test_get_status,
}, {
@@ -1132,8 +1168,8 @@ static struct phy_driver at803x_driver[] = {
.resume = at803x_resume,
.flags = PHY_POLL_CABLE_TEST,
/* PHY_BASIC_FEATURES */
- .ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
.cable_test_start = at803x_cable_test_start,
.cable_test_get_status = at803x_cable_test_get_status,
.read_status = at803x_read_status,
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index 9ccf28b0a04d..da8f7cb41b44 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -256,8 +256,8 @@ static struct phy_driver bcm_cygnus_phy_driver[] = {
.name = "Broadcom Cygnus PHY",
/* PHY_GBIT_FEATURES */
.config_init = bcm_cygnus_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = bcm_cygnus_resume,
}, {
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index ef6825b30323..53282a6d5928 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -181,21 +181,62 @@ EXPORT_SYMBOL_GPL(bcm_phy_ack_intr);
int bcm_phy_config_intr(struct phy_device *phydev)
{
- int reg;
+ int reg, err;
reg = phy_read(phydev, MII_BCM54XX_ECR);
if (reg < 0)
return reg;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = bcm_phy_ack_intr(phydev);
+ if (err)
+ return err;
+
reg &= ~MII_BCM54XX_ECR_IM;
- else
+ err = phy_write(phydev, MII_BCM54XX_ECR, reg);
+ } else {
reg |= MII_BCM54XX_ECR_IM;
+ err = phy_write(phydev, MII_BCM54XX_ECR, reg);
+ if (err)
+ return err;
- return phy_write(phydev, MII_BCM54XX_ECR, reg);
+ err = bcm_phy_ack_intr(phydev);
+ }
+ return err;
}
EXPORT_SYMBOL_GPL(bcm_phy_config_intr);
+irqreturn_t bcm_phy_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, irq_mask;
+
+ irq_status = phy_read(phydev, MII_BCM54XX_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ /* If a bit from the Interrupt Mask register is set, the corresponding
+ * bit from the Interrupt Status register is masked. So read the IMR
+ * and then flip the bits to get the list of possible interrupt
+ * sources.
+ */
+ irq_mask = phy_read(phydev, MII_BCM54XX_IMR);
+ if (irq_mask < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+ irq_mask = ~irq_mask;
+
+ if (!(irq_status & irq_mask))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_handle_interrupt);
+
int bcm_phy_read_shadow(struct phy_device *phydev, u16 shadow)
{
phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 237a8503c9b4..c3842f87c33b 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -63,6 +63,7 @@ int bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask,
int bcm_phy_ack_intr(struct phy_device *phydev);
int bcm_phy_config_intr(struct phy_device *phydev);
+irqreturn_t bcm_phy_handle_interrupt(struct phy_device *phydev);
int bcm_phy_enable_apd(struct phy_device *phydev, bool dll_pwr_down);
diff --git a/drivers/net/phy/bcm54140.c b/drivers/net/phy/bcm54140.c
index 8998e68bb26b..d8f3024860dc 100644
--- a/drivers/net/phy/bcm54140.c
+++ b/drivers/net/phy/bcm54140.c
@@ -637,13 +637,29 @@ static int bcm54140_config_init(struct phy_device *phydev)
BCM54140_RDB_C_PWR_ISOLATE, 0);
}
-static int bcm54140_did_interrupt(struct phy_device *phydev)
+static irqreturn_t bcm54140_handle_interrupt(struct phy_device *phydev)
{
- int ret;
+ int irq_status, irq_mask;
+
+ irq_status = bcm_phy_read_rdb(phydev, BCM54140_RDB_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ irq_mask = bcm_phy_read_rdb(phydev, BCM54140_RDB_IMR);
+ if (irq_mask < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+ irq_mask = ~irq_mask;
+
+ if (!(irq_status & irq_mask))
+ return IRQ_NONE;
- ret = bcm_phy_read_rdb(phydev, BCM54140_RDB_ISR);
+ phy_trigger_machine(phydev);
- return (ret < 0) ? 0 : ret;
+ return IRQ_HANDLED;
}
static int bcm54140_ack_intr(struct phy_device *phydev)
@@ -665,7 +681,7 @@ static int bcm54140_config_intr(struct phy_device *phydev)
BCM54140_RDB_TOP_IMR_PORT0, BCM54140_RDB_TOP_IMR_PORT1,
BCM54140_RDB_TOP_IMR_PORT2, BCM54140_RDB_TOP_IMR_PORT3,
};
- int reg;
+ int reg, err;
if (priv->port >= ARRAY_SIZE(port_to_imr_bit))
return -EINVAL;
@@ -674,12 +690,23 @@ static int bcm54140_config_intr(struct phy_device *phydev)
if (reg < 0)
return reg;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = bcm54140_ack_intr(phydev);
+ if (err)
+ return err;
+
reg &= ~port_to_imr_bit[priv->port];
- else
+ err = bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg);
+ } else {
reg |= port_to_imr_bit[priv->port];
+ err = bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg);
+ if (err)
+ return err;
+
+ err = bcm54140_ack_intr(phydev);
+ }
- return bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg);
+ return err;
}
static int bcm54140_get_downshift(struct phy_device *phydev, u8 *data)
@@ -834,8 +861,7 @@ static struct phy_driver bcm54140_drivers[] = {
.flags = PHY_POLL_CABLE_TEST,
.features = PHY_GBIT_FEATURES,
.config_init = bcm54140_config_init,
- .did_interrupt = bcm54140_did_interrupt,
- .ack_interrupt = bcm54140_ack_intr,
+ .handle_interrupt = bcm54140_handle_interrupt,
.config_intr = bcm54140_config_intr,
.probe = bcm54140_probe,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 459fb2069c7e..0eb33be824f1 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -25,12 +25,22 @@ static int bcm63xx_config_intr(struct phy_device *phydev)
if (reg < 0)
return reg;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = bcm_phy_ack_intr(phydev);
+ if (err)
+ return err;
+
reg &= ~MII_BCM63XX_IR_GMASK;
- else
+ err = phy_write(phydev, MII_BCM63XX_IR, reg);
+ } else {
reg |= MII_BCM63XX_IR_GMASK;
+ err = phy_write(phydev, MII_BCM63XX_IR, reg);
+ if (err)
+ return err;
+
+ err = bcm_phy_ack_intr(phydev);
+ }
- err = phy_write(phydev, MII_BCM63XX_IR, reg);
return err;
}
@@ -67,8 +77,8 @@ static struct phy_driver bcm63xx_driver[] = {
/* PHY_BASIC_FEATURES */
.flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
/* same phy as above, with just a different OUI */
.phy_id = 0x002bdc00,
@@ -77,8 +87,8 @@ static struct phy_driver bcm63xx_driver[] = {
/* PHY_BASIC_FEATURES */
.flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
} };
module_phy_driver(bcm63xx_driver);
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index df360e1c5069..4ac8fd190e9d 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -144,35 +144,41 @@ static int bcm87xx_config_intr(struct phy_device *phydev)
if (reg < 0)
return reg;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = phy_read(phydev, BCM87XX_LASI_STATUS);
+ if (err)
+ return err;
+
reg |= 1;
- else
+ err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
+ } else {
reg &= ~1;
+ err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
+ if (err)
+ return err;
+
+ err = phy_read(phydev, BCM87XX_LASI_STATUS);
+ }
- err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
return err;
}
-static int bcm87xx_did_interrupt(struct phy_device *phydev)
+static irqreturn_t bcm87xx_handle_interrupt(struct phy_device *phydev)
{
- int reg;
+ int irq_status;
- reg = phy_read(phydev, BCM87XX_LASI_STATUS);
-
- if (reg < 0) {
- phydev_err(phydev,
- "Error: Read of BCM87XX_LASI_STATUS failed: %d\n",
- reg);
- return 0;
+ irq_status = phy_read(phydev, BCM87XX_LASI_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
}
- return (reg & 1) != 0;
-}
-static int bcm87xx_ack_interrupt(struct phy_device *phydev)
-{
- /* Reading the LASI status clears it. */
- bcm87xx_did_interrupt(phydev);
- return 0;
+ if (irq_status == 0)
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int bcm8706_match_phy_device(struct phy_device *phydev)
@@ -194,9 +200,8 @@ static struct phy_driver bcm87xx_driver[] = {
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
- .ack_interrupt = bcm87xx_ack_interrupt,
.config_intr = bcm87xx_config_intr,
- .did_interrupt = bcm87xx_did_interrupt,
+ .handle_interrupt = bcm87xx_handle_interrupt,
.match_phy_device = bcm8706_match_phy_device,
}, {
.phy_id = PHY_ID_BCM8727,
@@ -206,9 +211,8 @@ static struct phy_driver bcm87xx_driver[] = {
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
- .ack_interrupt = bcm87xx_ack_interrupt,
.config_intr = bcm87xx_config_intr,
- .did_interrupt = bcm87xx_did_interrupt,
+ .handle_interrupt = bcm87xx_handle_interrupt,
.match_phy_device = bcm8727_match_phy_device,
} };
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index cd271de9609b..8a4ec3222168 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -634,15 +634,43 @@ static int brcm_fet_config_intr(struct phy_device *phydev)
if (reg < 0)
return reg;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = brcm_fet_ack_interrupt(phydev);
+ if (err)
+ return err;
+
reg &= ~MII_BRCM_FET_IR_MASK;
- else
+ err = phy_write(phydev, MII_BRCM_FET_INTREG, reg);
+ } else {
reg |= MII_BRCM_FET_IR_MASK;
+ err = phy_write(phydev, MII_BRCM_FET_INTREG, reg);
+ if (err)
+ return err;
+
+ err = brcm_fet_ack_interrupt(phydev);
+ }
- err = phy_write(phydev, MII_BRCM_FET_INTREG, reg);
return err;
}
+static irqreturn_t brcm_fet_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_BRCM_FET_INTREG);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (irq_status == 0)
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
struct bcm53xx_phy_priv {
u64 *stats;
};
@@ -681,40 +709,40 @@ static struct phy_driver broadcom_drivers[] = {
.name = "Broadcom BCM5411",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM5421,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM54210E,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54210E",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM5461,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM54612E,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54612E",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM54616S,
.phy_id_mask = 0xfffffff0,
@@ -722,8 +750,8 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.config_aneg = bcm54616s_config_aneg,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
.read_status = bcm54616s_read_status,
.probe = bcm54616s_probe,
}, {
@@ -732,8 +760,8 @@ static struct phy_driver broadcom_drivers[] = {
.name = "Broadcom BCM5464",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -743,8 +771,8 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM54810,
.phy_id_mask = 0xfffffff0,
@@ -752,8 +780,8 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = bcm54xx_resume,
}, {
@@ -763,8 +791,8 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.config_init = bcm54811_config_init,
.config_aneg = bcm5481_config_aneg,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = bcm54xx_resume,
}, {
@@ -774,48 +802,48 @@ static struct phy_driver broadcom_drivers[] = {
/* PHY_GBIT_FEATURES */
.config_init = bcm5482_config_init,
.read_status = bcm5482_read_status,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM50610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM50610M,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610M",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM57780,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM57780",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCMAC131,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
/* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
- .ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
+ .handle_interrupt = brcm_fet_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM5241,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5241",
/* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
- .ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
+ .handle_interrupt = brcm_fet_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM5395,
.phy_id_mask = 0xfffffff0,
@@ -837,16 +865,16 @@ static struct phy_driver broadcom_drivers[] = {
.get_stats = bcm53xx_phy_get_stats,
.probe = bcm53xx_phy_probe,
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
}, {
.phy_id = PHY_ID_BCM89610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM89610",
/* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
- .ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
} };
module_phy_driver(broadcom_drivers);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 9d1612a4d7e6..ef5f412e101f 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -87,15 +87,42 @@ static int cis820x_config_intr(struct phy_device *phydev)
{
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = cis820x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
err = phy_write(phydev, MII_CIS8201_IMASK,
MII_CIS8201_IMASK_MASK);
- else
+ } else {
err = phy_write(phydev, MII_CIS8201_IMASK, 0);
+ if (err)
+ return err;
+
+ err = cis820x_ack_interrupt(phydev);
+ }
return err;
}
+static irqreturn_t cis820x_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_CIS8201_ISTAT);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_CIS8201_IMASK_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
/* Cicada 8201, a.k.a Vitesse VSC8201 */
static struct phy_driver cis820x_driver[] = {
{
@@ -104,16 +131,16 @@ static struct phy_driver cis820x_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = &cis820x_config_init,
- .ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
+ .handle_interrupt = &cis820x_handle_interrupt,
}, {
.phy_id = 0x000fc440,
.name = "Cicada Cis8204",
.phy_id_mask = 0x000fffc0,
/* PHY_GBIT_FEATURES */
.config_init = &cis820x_config_init,
- .ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
+ .handle_interrupt = &cis820x_handle_interrupt,
} };
module_phy_driver(cis820x_driver);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 942f277463a4..a3b3842c67e5 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -47,6 +47,10 @@
#define MII_DM9161_INTR_STOP \
(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
| MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
+#define MII_DM9161_INTR_CHANGE \
+ (MII_DM9161_INTR_DPLX_CHANGE | \
+ MII_DM9161_INTR_SPD_CHANGE | \
+ MII_DM9161_INTR_LINK_CHANGE)
/* DM9161 10BT Configuration/Status */
#define MII_DM9161_10BTCSR 0x12
@@ -57,24 +61,58 @@ MODULE_AUTHOR("Andy Fleming");
MODULE_LICENSE("GPL");
+static int dm9161_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_DM9161_INTR);
+
+ return (err < 0) ? err : 0;
+}
+
#define DM9161_DELAY 1
static int dm9161_config_intr(struct phy_device *phydev)
{
- int temp;
+ int temp, err;
temp = phy_read(phydev, MII_DM9161_INTR);
if (temp < 0)
return temp;
- if (PHY_INTERRUPT_ENABLED == phydev->interrupts)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = dm9161_ack_interrupt(phydev);
+ if (err)
+ return err;
+
temp &= ~(MII_DM9161_INTR_STOP);
- else
+ err = phy_write(phydev, MII_DM9161_INTR, temp);
+ } else {
temp |= MII_DM9161_INTR_STOP;
+ err = phy_write(phydev, MII_DM9161_INTR, temp);
+ if (err)
+ return err;
+
+ err = dm9161_ack_interrupt(phydev);
+ }
+
+ return err;
+}
- temp = phy_write(phydev, MII_DM9161_INTR, temp);
+static irqreturn_t dm9161_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_DM9161_INTR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
- return temp;
+ if (!(irq_status & MII_DM9161_INTR_CHANGE))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int dm9161_config_aneg(struct phy_device *phydev)
@@ -132,13 +170,6 @@ static int dm9161_config_init(struct phy_device *phydev)
return phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
}
-static int dm9161_ack_interrupt(struct phy_device *phydev)
-{
- int err = phy_read(phydev, MII_DM9161_INTR);
-
- return (err < 0) ? err : 0;
-}
-
static struct phy_driver dm91xx_driver[] = {
{
.phy_id = 0x0181b880,
@@ -147,8 +178,8 @@ static struct phy_driver dm91xx_driver[] = {
/* PHY_BASIC_FEATURES */
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
- .ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
+ .handle_interrupt = dm9161_handle_interrupt,
}, {
.phy_id = 0x0181b8b0,
.name = "Davicom DM9161B/C",
@@ -156,8 +187,8 @@ static struct phy_driver dm91xx_driver[] = {
/* PHY_BASIC_FEATURES */
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
- .ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
+ .handle_interrupt = dm9161_handle_interrupt,
}, {
.phy_id = 0x0181b8a0,
.name = "Davicom DM9161A",
@@ -165,15 +196,15 @@ static struct phy_driver dm91xx_driver[] = {
/* PHY_BASIC_FEATURES */
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
- .ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
+ .handle_interrupt = dm9161_handle_interrupt,
}, {
.phy_id = 0x00181b80,
.name = "Davicom DM9131",
.phy_id_mask = 0x0ffffff0,
/* PHY_BASIC_FEATURES */
- .ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
+ .handle_interrupt = dm9161_handle_interrupt,
} };
module_phy_driver(dm91xx_driver);
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index f2caccaf4408..0d79f68f301c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -50,6 +50,14 @@
#define MII_DP83640_MISR_LINK_INT_EN 0x20
#define MII_DP83640_MISR_ED_INT_EN 0x40
#define MII_DP83640_MISR_LQ_INT_EN 0x80
+#define MII_DP83640_MISR_ANC_INT 0x400
+#define MII_DP83640_MISR_DUP_INT 0x800
+#define MII_DP83640_MISR_SPD_INT 0x1000
+#define MII_DP83640_MISR_LINK_INT 0x2000
+#define MII_DP83640_MISR_INT_MASK (MII_DP83640_MISR_ANC_INT |\
+ MII_DP83640_MISR_DUP_INT |\
+ MII_DP83640_MISR_SPD_INT |\
+ MII_DP83640_MISR_LINK_INT)
/* phyter seems to miss the mark by 16 ns */
#define ADJTIME_FIX 16
@@ -964,15 +972,12 @@ static void decode_status_frame(struct dp83640_private *dp83640,
static int is_sync(struct sk_buff *skb, int type)
{
struct ptp_header *hdr;
- u8 msgtype;
hdr = ptp_parse_header(skb, type);
if (!hdr)
return 0;
- msgtype = ptp_get_msgtype(hdr, type);
-
- return (msgtype & 0xf) == 0;
+ return ptp_get_msgtype(hdr, type) == PTP_MSGTYPE_SYNC;
}
static void dp83640_free_clocks(void)
@@ -1151,6 +1156,10 @@ static int dp83640_config_intr(struct phy_device *phydev)
int err;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = dp83640_ack_interrupt(phydev);
+ if (err)
+ return err;
+
misr = phy_read(phydev, MII_DP83640_MISR);
if (misr < 0)
return misr;
@@ -1189,10 +1198,32 @@ static int dp83640_config_intr(struct phy_device *phydev)
MII_DP83640_MISR_DUP_INT_EN |
MII_DP83640_MISR_SPD_INT_EN |
MII_DP83640_MISR_LINK_INT_EN);
- return phy_write(phydev, MII_DP83640_MISR, misr);
+ err = phy_write(phydev, MII_DP83640_MISR, misr);
+ if (err)
+ return err;
+
+ return dp83640_ack_interrupt(phydev);
}
}
+static irqreturn_t dp83640_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_DP83640_MISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_DP83640_MISR_INT_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
{
struct dp83640_private *dp83640 =
@@ -1515,8 +1546,8 @@ static struct phy_driver dp83640_driver = {
.remove = dp83640_remove,
.soft_reset = dp83640_soft_reset,
.config_init = dp83640_config_init,
- .ack_interrupt = dp83640_ack_interrupt,
.config_intr = dp83640_config_intr,
+ .handle_interrupt = dp83640_handle_interrupt,
};
static int __init dp83640_init(void)
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index c162c9551bd1..fff371ca1086 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -119,21 +119,6 @@ struct dp83822_private {
u16 fx_sd_enable;
};
-static int dp83822_ack_interrupt(struct phy_device *phydev)
-{
- int err;
-
- err = phy_read(phydev, MII_DP83822_MISR1);
- if (err < 0)
- return err;
-
- err = phy_read(phydev, MII_DP83822_MISR2);
- if (err < 0)
- return err;
-
- return 0;
-}
-
static int dp83822_set_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
@@ -303,6 +288,41 @@ static int dp83822_config_intr(struct phy_device *phydev)
return phy_write(phydev, MII_DP83822_PHYSCR, physcr_status);
}
+static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ /* The MISR1 and MISR2 registers are holding the interrupt status in
+ * the upper half (15:8), while the lower half (7:0) is used for
+ * controlling the interrupt enable state of those individual interrupt
+ * sources. To determine the possible interrupt sources, just read the
+ * MISR* register and use it directly to know which interrupts have
+ * been enabled previously or not.
+ */
+ irq_status = phy_read(phydev, MII_DP83822_MISR1);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+ if (irq_status & ((irq_status & GENMASK(7, 0)) << 8))
+ goto trigger_machine;
+
+ irq_status = phy_read(phydev, MII_DP83822_MISR2);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+ if (irq_status & ((irq_status & GENMASK(7, 0)) << 8))
+ goto trigger_machine;
+
+ return IRQ_NONE;
+
+trigger_machine:
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static int dp8382x_disable_wol(struct phy_device *phydev)
{
int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
@@ -574,8 +594,8 @@ static int dp83822_resume(struct phy_device *phydev)
.read_status = dp83822_read_status, \
.get_wol = dp83822_get_wol, \
.set_wol = dp83822_set_wol, \
- .ack_interrupt = dp83822_ack_interrupt, \
.config_intr = dp83822_config_intr, \
+ .handle_interrupt = dp83822_handle_interrupt, \
.suspend = dp83822_suspend, \
.resume = dp83822_resume, \
}
@@ -589,8 +609,8 @@ static int dp83822_resume(struct phy_device *phydev)
.config_init = dp8382x_config_init, \
.get_wol = dp83822_get_wol, \
.set_wol = dp83822_set_wol, \
- .ack_interrupt = dp83822_ack_interrupt, \
.config_intr = dp83822_config_intr, \
+ .handle_interrupt = dp83822_handle_interrupt, \
.suspend = dp83822_suspend, \
.resume = dp83822_resume, \
}
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 54c7c1b44e4d..937061acfc61 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -37,6 +37,20 @@
DP83848_MISR_SPD_INT_EN | \
DP83848_MISR_LINK_INT_EN)
+#define DP83848_MISR_RHF_INT BIT(8)
+#define DP83848_MISR_FHF_INT BIT(9)
+#define DP83848_MISR_ANC_INT BIT(10)
+#define DP83848_MISR_DUP_INT BIT(11)
+#define DP83848_MISR_SPD_INT BIT(12)
+#define DP83848_MISR_LINK_INT BIT(13)
+#define DP83848_MISR_ED_INT BIT(14)
+
+#define DP83848_INT_MASK \
+ (DP83848_MISR_ANC_INT | \
+ DP83848_MISR_DUP_INT | \
+ DP83848_MISR_SPD_INT | \
+ DP83848_MISR_LINK_INT)
+
static int dp83848_ack_interrupt(struct phy_device *phydev)
{
int err = phy_read(phydev, DP83848_MISR);
@@ -53,17 +67,46 @@ static int dp83848_config_intr(struct phy_device *phydev)
return control;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ ret = dp83848_ack_interrupt(phydev);
+ if (ret)
+ return ret;
+
control |= DP83848_MICR_INT_OE;
control |= DP83848_MICR_INTEN;
ret = phy_write(phydev, DP83848_MISR, DP83848_INT_EN_MASK);
if (ret < 0)
return ret;
+
+ ret = phy_write(phydev, DP83848_MICR, control);
} else {
control &= ~DP83848_MICR_INTEN;
+ ret = phy_write(phydev, DP83848_MICR, control);
+ if (ret)
+ return ret;
+
+ ret = dp83848_ack_interrupt(phydev);
+ }
+
+ return ret;
+}
+
+static irqreturn_t dp83848_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, DP83848_MISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
}
- return phy_write(phydev, DP83848_MICR, control);
+ if (!(irq_status & DP83848_INT_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int dp83848_config_init(struct phy_device *phydev)
@@ -102,8 +145,8 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
.resume = genphy_resume, \
\
/* IRQ related */ \
- .ack_interrupt = dp83848_ack_interrupt, \
.config_intr = dp83848_config_intr, \
+ .handle_interrupt = dp83848_handle_interrupt, \
}
static struct phy_driver dp83848_driver[] = {
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 69d3eacc2b96..9bd9a5c0b1db 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -288,9 +288,13 @@ static void dp83867_get_wol(struct phy_device *phydev,
static int dp83867_config_intr(struct phy_device *phydev)
{
- int micr_status;
+ int micr_status, err;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = dp83867_ack_interrupt(phydev);
+ if (err)
+ return err;
+
micr_status = phy_read(phydev, MII_DP83867_MICR);
if (micr_status < 0)
return micr_status;
@@ -303,11 +307,41 @@ static int dp83867_config_intr(struct phy_device *phydev)
MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
- return phy_write(phydev, MII_DP83867_MICR, micr_status);
+ err = phy_write(phydev, MII_DP83867_MICR, micr_status);
+ } else {
+ micr_status = 0x0;
+ err = phy_write(phydev, MII_DP83867_MICR, micr_status);
+ if (err)
+ return err;
+
+ err = dp83867_ack_interrupt(phydev);
}
- micr_status = 0x0;
- return phy_write(phydev, MII_DP83867_MICR, micr_status);
+ return err;
+}
+
+static irqreturn_t dp83867_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, irq_enabled;
+
+ irq_status = phy_read(phydev, MII_DP83867_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ irq_enabled = phy_read(phydev, MII_DP83867_MICR);
+ if (irq_enabled < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & irq_enabled))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int dp83867_read_status(struct phy_device *phydev)
@@ -825,8 +859,8 @@ static struct phy_driver dp83867_driver[] = {
.set_wol = dp83867_set_wol,
/* IRQ related */
- .ack_interrupt = dp83867_ack_interrupt,
.config_intr = dp83867_config_intr,
+ .handle_interrupt = dp83867_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index cf6dec7b7d8e..b30bc142d82e 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -186,9 +186,13 @@ static int dp83869_ack_interrupt(struct phy_device *phydev)
static int dp83869_config_intr(struct phy_device *phydev)
{
- int micr_status = 0;
+ int micr_status = 0, err;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = dp83869_ack_interrupt(phydev);
+ if (err)
+ return err;
+
micr_status = phy_read(phydev, MII_DP83869_MICR);
if (micr_status < 0)
return micr_status;
@@ -201,10 +205,40 @@ static int dp83869_config_intr(struct phy_device *phydev)
MII_DP83869_MICR_DUP_MODE_CHNG_INT_EN |
MII_DP83869_MICR_SLEEP_MODE_CHNG_INT_EN);
- return phy_write(phydev, MII_DP83869_MICR, micr_status);
+ err = phy_write(phydev, MII_DP83869_MICR, micr_status);
+ } else {
+ err = phy_write(phydev, MII_DP83869_MICR, micr_status);
+ if (err)
+ return err;
+
+ err = dp83869_ack_interrupt(phydev);
}
- return phy_write(phydev, MII_DP83869_MICR, micr_status);
+ return err;
+}
+
+static irqreturn_t dp83869_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, irq_enabled;
+
+ irq_status = phy_read(phydev, MII_DP83869_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ irq_enabled = phy_read(phydev, MII_DP83869_MICR);
+ if (irq_enabled < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & irq_enabled))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int dp83869_set_wol(struct phy_device *phydev,
@@ -850,8 +884,8 @@ static struct phy_driver dp83869_driver[] = {
.soft_reset = dp83869_phy_reset,
/* IRQ related */
- .ack_interrupt = dp83869_ack_interrupt,
.config_intr = dp83869_config_intr,
+ .handle_interrupt = dp83869_handle_interrupt,
.read_status = dp83869_read_status,
.get_tunable = dp83869_get_tunable,
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index d73725312c7c..688fadffb249 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -197,6 +197,10 @@ static int dp83811_config_intr(struct phy_device *phydev)
int misr_status, err;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = dp83811_ack_interrupt(phydev);
+ if (err)
+ return err;
+
misr_status = phy_read(phydev, MII_DP83811_INT_STAT1);
if (misr_status < 0)
return misr_status;
@@ -249,11 +253,58 @@ static int dp83811_config_intr(struct phy_device *phydev)
return err;
err = phy_write(phydev, MII_DP83811_INT_STAT3, 0);
+ if (err < 0)
+ return err;
+
+ err = dp83811_ack_interrupt(phydev);
}
return err;
}
+static irqreturn_t dp83811_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ /* The INT_STAT registers 1, 2 and 3 are holding the interrupt status
+ * in the upper half (15:8), while the lower half (7:0) is used for
+ * controlling the interrupt enable state of those individual interrupt
+ * sources. To determine the possible interrupt sources, just read the
+ * INT_STAT* register and use it directly to know which interrupts have
+ * been enabled previously or not.
+ */
+ irq_status = phy_read(phydev, MII_DP83811_INT_STAT1);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+ if (irq_status & ((irq_status & GENMASK(7, 0)) << 8))
+ goto trigger_machine;
+
+ irq_status = phy_read(phydev, MII_DP83811_INT_STAT2);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+ if (irq_status & ((irq_status & GENMASK(7, 0)) << 8))
+ goto trigger_machine;
+
+ irq_status = phy_read(phydev, MII_DP83811_INT_STAT3);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+ if (irq_status & ((irq_status & GENMASK(7, 0)) << 8))
+ goto trigger_machine;
+
+ return IRQ_NONE;
+
+trigger_machine:
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static int dp83811_config_aneg(struct phy_device *phydev)
{
int value, err;
@@ -343,8 +394,8 @@ static struct phy_driver dp83811_driver[] = {
.soft_reset = dp83811_phy_reset,
.get_wol = dp83811_get_wol,
.set_wol = dp83811_set_wol,
- .ack_interrupt = dp83811_ack_interrupt,
.config_intr = dp83811_config_intr,
+ .handle_interrupt = dp83811_handle_interrupt,
.suspend = dp83811_suspend,
.resume = dp83811_resume,
},
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index d6e8516cd146..b632947cbcdf 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -272,38 +272,59 @@ static int ip101a_g_config_init(struct phy_device *phydev)
return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
}
+static int ip101a_g_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
static int ip101a_g_config_intr(struct phy_device *phydev)
{
u16 val;
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = ip101a_g_ack_interrupt(phydev);
+ if (err)
+ return err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
/* INTR pin used: Speed/link/duplex will cause an interrupt */
val = IP101A_G_IRQ_PIN_USED;
- else
+ err = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, val);
+ } else {
val = IP101A_G_IRQ_ALL_MASK;
+ err = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, val);
+ if (err)
+ return err;
+
+ err = ip101a_g_ack_interrupt(phydev);
+ }
- return phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, val);
+ return err;
}
-static int ip101a_g_did_interrupt(struct phy_device *phydev)
+static irqreturn_t ip101a_g_handle_interrupt(struct phy_device *phydev)
{
- int val = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
+ int irq_status;
- if (val < 0)
- return 0;
+ irq_status = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
- return val & (IP101A_G_IRQ_SPEED_CHANGE |
- IP101A_G_IRQ_DUPLEX_CHANGE |
- IP101A_G_IRQ_LINK_CHANGE);
-}
+ if (!(irq_status & (IP101A_G_IRQ_SPEED_CHANGE |
+ IP101A_G_IRQ_DUPLEX_CHANGE |
+ IP101A_G_IRQ_LINK_CHANGE)))
+ return IRQ_NONE;
-static int ip101a_g_ack_interrupt(struct phy_device *phydev)
-{
- int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
- if (err < 0)
- return err;
+ phy_trigger_machine(phydev);
- return 0;
+ return IRQ_HANDLED;
}
static struct phy_driver icplus_driver[] = {
@@ -332,8 +353,7 @@ static struct phy_driver icplus_driver[] = {
/* PHY_BASIC_FEATURES */
.probe = ip101a_g_probe,
.config_intr = ip101a_g_config_intr,
- .did_interrupt = ip101a_g_did_interrupt,
- .ack_interrupt = ip101a_g_ack_interrupt,
+ .handle_interrupt = ip101a_g_handle_interrupt,
.config_init = &ip101a_g_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index b7875b36097f..6eac50d4b42f 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -209,22 +209,45 @@ static int xway_gphy_ack_interrupt(struct phy_device *phydev)
return (reg < 0) ? reg : 0;
}
-static int xway_gphy_did_interrupt(struct phy_device *phydev)
+static int xway_gphy_config_intr(struct phy_device *phydev)
{
- int reg;
+ u16 mask = 0;
+ int err;
- reg = phy_read(phydev, XWAY_MDIO_ISTAT);
- return reg & XWAY_MDIO_INIT_MASK;
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = xway_gphy_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ mask = XWAY_MDIO_INIT_MASK;
+ err = phy_write(phydev, XWAY_MDIO_IMASK, mask);
+ } else {
+ err = phy_write(phydev, XWAY_MDIO_IMASK, mask);
+ if (err)
+ return err;
+
+ err = xway_gphy_ack_interrupt(phydev);
+ }
+
+ return err;
}
-static int xway_gphy_config_intr(struct phy_device *phydev)
+static irqreturn_t xway_gphy_handle_interrupt(struct phy_device *phydev)
{
- u16 mask = 0;
+ int irq_status;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- mask = XWAY_MDIO_INIT_MASK;
+ irq_status = phy_read(phydev, XWAY_MDIO_ISTAT);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & XWAY_MDIO_INIT_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
- return phy_write(phydev, XWAY_MDIO_IMASK, mask);
+ return IRQ_HANDLED;
}
static struct phy_driver xway_gphy[] = {
@@ -235,8 +258,7 @@ static struct phy_driver xway_gphy[] = {
/* PHY_GBIT_FEATURES */
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
- .ack_interrupt = xway_gphy_ack_interrupt,
- .did_interrupt = xway_gphy_did_interrupt,
+ .handle_interrupt = xway_gphy_handle_interrupt,
.config_intr = xway_gphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -247,8 +269,7 @@ static struct phy_driver xway_gphy[] = {
/* PHY_BASIC_FEATURES */
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
- .ack_interrupt = xway_gphy_ack_interrupt,
- .did_interrupt = xway_gphy_did_interrupt,
+ .handle_interrupt = xway_gphy_handle_interrupt,
.config_intr = xway_gphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -259,8 +280,7 @@ static struct phy_driver xway_gphy[] = {
/* PHY_GBIT_FEATURES */
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
- .ack_interrupt = xway_gphy_ack_interrupt,
- .did_interrupt = xway_gphy_did_interrupt,
+ .handle_interrupt = xway_gphy_handle_interrupt,
.config_intr = xway_gphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -271,8 +291,7 @@ static struct phy_driver xway_gphy[] = {
/* PHY_BASIC_FEATURES */
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
- .ack_interrupt = xway_gphy_ack_interrupt,
- .did_interrupt = xway_gphy_did_interrupt,
+ .handle_interrupt = xway_gphy_handle_interrupt,
.config_intr = xway_gphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -282,8 +301,7 @@ static struct phy_driver xway_gphy[] = {
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6",
/* PHY_GBIT_FEATURES */
.config_init = xway_gphy_config_init,
- .ack_interrupt = xway_gphy_ack_interrupt,
- .did_interrupt = xway_gphy_did_interrupt,
+ .handle_interrupt = xway_gphy_handle_interrupt,
.config_intr = xway_gphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -293,8 +311,7 @@ static struct phy_driver xway_gphy[] = {
.name = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6",
/* PHY_BASIC_FEATURES */
.config_init = xway_gphy_config_init,
- .ack_interrupt = xway_gphy_ack_interrupt,
- .did_interrupt = xway_gphy_did_interrupt,
+ .handle_interrupt = xway_gphy_handle_interrupt,
.config_intr = xway_gphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -304,8 +321,7 @@ static struct phy_driver xway_gphy[] = {
.name = "Intel XWAY PHY11G (xRX v1.1 integrated)",
/* PHY_GBIT_FEATURES */
.config_init = xway_gphy_config_init,
- .ack_interrupt = xway_gphy_ack_interrupt,
- .did_interrupt = xway_gphy_did_interrupt,
+ .handle_interrupt = xway_gphy_handle_interrupt,
.config_intr = xway_gphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -315,8 +331,7 @@ static struct phy_driver xway_gphy[] = {
.name = "Intel XWAY PHY22F (xRX v1.1 integrated)",
/* PHY_BASIC_FEATURES */
.config_init = xway_gphy_config_init,
- .ack_interrupt = xway_gphy_ack_interrupt,
- .did_interrupt = xway_gphy_did_interrupt,
+ .handle_interrupt = xway_gphy_handle_interrupt,
.config_intr = xway_gphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -326,8 +341,7 @@ static struct phy_driver xway_gphy[] = {
.name = "Intel XWAY PHY11G (xRX v1.2 integrated)",
/* PHY_GBIT_FEATURES */
.config_init = xway_gphy_config_init,
- .ack_interrupt = xway_gphy_ack_interrupt,
- .did_interrupt = xway_gphy_did_interrupt,
+ .handle_interrupt = xway_gphy_handle_interrupt,
.config_intr = xway_gphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -337,8 +351,7 @@ static struct phy_driver xway_gphy[] = {
.name = "Intel XWAY PHY22F (xRX v1.2 integrated)",
/* PHY_BASIC_FEATURES */
.config_init = xway_gphy_config_init,
- .ack_interrupt = xway_gphy_ack_interrupt,
- .did_interrupt = xway_gphy_did_interrupt,
+ .handle_interrupt = xway_gphy_handle_interrupt,
.config_intr = xway_gphy_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index fec58ad69e02..0ee23d29c0d4 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -37,6 +37,8 @@
#define MII_LXT970_ISR 18 /* Interrupt Status Register */
+#define MII_LXT970_IRS_MINT BIT(15)
+
#define MII_LXT970_CONFIG 19 /* Configuration Register */
/* ------------------------------------------------------------------------- */
@@ -47,6 +49,7 @@
#define MII_LXT971_IER_IEN 0x00f2
#define MII_LXT971_ISR 19 /* Interrupt Status Register */
+#define MII_LXT971_ISR_MASK 0x00f0
/* register definitions for the 973 */
#define MII_LXT973_PCR 16 /* Port Configuration Register */
@@ -75,10 +78,50 @@ static int lxt970_ack_interrupt(struct phy_device *phydev)
static int lxt970_config_intr(struct phy_device *phydev)
{
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- return phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
- else
- return phy_write(phydev, MII_LXT970_IER, 0);
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = lxt970_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
+ } else {
+ err = phy_write(phydev, MII_LXT970_IER, 0);
+ if (err)
+ return err;
+
+ err = lxt970_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
+static irqreturn_t lxt970_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ /* The interrupt status register is cleared by reading BMSR
+ * followed by MII_LXT970_ISR
+ */
+ irq_status = phy_read(phydev, MII_BMSR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ irq_status = phy_read(phydev, MII_LXT970_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_LXT970_IRS_MINT))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int lxt970_config_init(struct phy_device *phydev)
@@ -99,10 +142,41 @@ static int lxt971_ack_interrupt(struct phy_device *phydev)
static int lxt971_config_intr(struct phy_device *phydev)
{
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- return phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
- else
- return phy_write(phydev, MII_LXT971_IER, 0);
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = lxt971_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
+ } else {
+ err = phy_write(phydev, MII_LXT971_IER, 0);
+ if (err)
+ return err;
+
+ err = lxt971_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
+static irqreturn_t lxt971_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_LXT971_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_LXT971_ISR_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
/*
@@ -237,15 +311,15 @@ static struct phy_driver lxt97x_driver[] = {
.phy_id_mask = 0xfffffff0,
/* PHY_BASIC_FEATURES */
.config_init = lxt970_config_init,
- .ack_interrupt = lxt970_ack_interrupt,
.config_intr = lxt970_config_intr,
+ .handle_interrupt = lxt970_handle_interrupt,
}, {
.phy_id = 0x001378e0,
.name = "LXT971",
.phy_id_mask = 0xfffffff0,
/* PHY_BASIC_FEATURES */
- .ack_interrupt = lxt971_ack_interrupt,
.config_intr = lxt971_config_intr,
+ .handle_interrupt = lxt971_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 5aec673a0120..620052c023a5 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -80,8 +80,11 @@
#define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3
#define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4
#define MII_M1111_HWCFG_MODE_RTBI 0x7
+#define MII_M1111_HWCFG_MODE_COPPER_1000X_AN 0x8
#define MII_M1111_HWCFG_MODE_COPPER_RTBI 0x9
#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb
+#define MII_M1111_HWCFG_MODE_COPPER_1000X_NOAN 0xc
+#define MII_M1111_HWCFG_SERIAL_AN_BYPASS BIT(12)
#define MII_M1111_HWCFG_FIBER_COPPER_RES BIT(13)
#define MII_M1111_HWCFG_FIBER_COPPER_AUTO BIT(15)
@@ -314,16 +317,43 @@ static int marvell_config_intr(struct phy_device *phydev)
{
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = marvell_ack_interrupt(phydev);
+ if (err)
+ return err;
+
err = phy_write(phydev, MII_M1011_IMASK,
MII_M1011_IMASK_INIT);
- else
+ } else {
err = phy_write(phydev, MII_M1011_IMASK,
MII_M1011_IMASK_CLEAR);
+ if (err)
+ return err;
+
+ err = marvell_ack_interrupt(phydev);
+ }
return err;
}
+static irqreturn_t marvell_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_M1011_IEVENT);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_M1011_IMASK_INIT))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static int marvell_set_polarity(struct phy_device *phydev, int polarity)
{
int reg;
@@ -629,6 +659,51 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
return genphy_check_and_restart_aneg(phydev, changed);
}
+static int m88e1111_config_aneg(struct phy_device *phydev)
+{
+ int extsr = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ int err;
+
+ if (extsr < 0)
+ return extsr;
+
+ /* If not using SGMII or copper 1000BaseX modes, use normal process.
+ * Steps below are only required for these modes.
+ */
+ if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+ (extsr & MII_M1111_HWCFG_MODE_MASK) !=
+ MII_M1111_HWCFG_MODE_COPPER_1000X_AN)
+ return marvell_config_aneg(phydev);
+
+ err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+ if (err < 0)
+ goto error;
+
+ /* Configure the copper link first */
+ err = marvell_config_aneg(phydev);
+ if (err < 0)
+ goto error;
+
+ /* Do not touch the fiber page if we're in copper->sgmii mode */
+ if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
+ return 0;
+
+ /* Then the fiber link */
+ err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
+ if (err < 0)
+ goto error;
+
+ err = marvell_config_aneg_fiber(phydev);
+ if (err < 0)
+ goto error;
+
+ return marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+
+error:
+ marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
+ return err;
+}
+
static int m88e1510_config_aneg(struct phy_device *phydev)
{
int err;
@@ -696,7 +771,7 @@ static void marvell_config_led(struct phy_device *phydev)
static int marvell_config_init(struct phy_device *phydev)
{
- /* Set defalut LED */
+ /* Set default LED */
marvell_config_led(phydev);
/* Set registers from marvell,reg-init DT property */
@@ -814,6 +889,28 @@ static int m88e1111_config_init_rtbi(struct phy_device *phydev)
MII_M1111_HWCFG_FIBER_COPPER_AUTO);
}
+static int m88e1111_config_init_1000basex(struct phy_device *phydev)
+{
+ int extsr = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ int err, mode;
+
+ if (extsr < 0)
+ return extsr;
+
+ /* If using copper mode, ensure 1000BaseX auto-negotiation is enabled */
+ mode = extsr & MII_M1111_HWCFG_MODE_MASK;
+ if (mode == MII_M1111_HWCFG_MODE_COPPER_1000X_NOAN) {
+ err = phy_modify(phydev, MII_M1111_PHY_EXT_SR,
+ MII_M1111_HWCFG_MODE_MASK |
+ MII_M1111_HWCFG_SERIAL_AN_BYPASS,
+ MII_M1111_HWCFG_MODE_COPPER_1000X_AN |
+ MII_M1111_HWCFG_SERIAL_AN_BYPASS);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
static int m88e1111_config_init(struct phy_device *phydev)
{
int err;
@@ -836,6 +933,12 @@ static int m88e1111_config_init(struct phy_device *phydev)
return err;
}
+ if (phydev->interface == PHY_INTERFACE_MODE_1000BASEX) {
+ err = m88e1111_config_init_1000basex(phydev);
+ if (err < 0)
+ return err;
+ }
+
err = marvell_of_reg_init(phydev);
if (err < 0)
return err;
@@ -1029,8 +1132,8 @@ static int m88e1510_config_init(struct phy_device *phydev)
return err;
/* PHY reset is necessary after changing MODE[2:0] */
- err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1, 0,
- MII_88E1510_GEN_CTRL_REG_1_RESET);
+ err = phy_set_bits(phydev, MII_88E1510_GEN_CTRL_REG_1,
+ MII_88E1510_GEN_CTRL_REG_1_RESET);
if (err < 0)
return err;
@@ -1583,18 +1686,6 @@ static int marvell_aneg_done(struct phy_device *phydev)
return (retval < 0) ? retval : (retval & MII_M1011_PHY_STATUS_RESOLVED);
}
-static int m88e1121_did_interrupt(struct phy_device *phydev)
-{
- int imask;
-
- imask = phy_read(phydev, MII_M1011_IEVENT);
-
- if (imask & MII_M1011_IMASK_INIT)
- return 1;
-
- return 0;
-}
-
static void m88e1318_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
@@ -1634,8 +1725,8 @@ static int m88e1318_set_wol(struct phy_device *phydev,
__phy_read(phydev, MII_M1011_IEVENT);
/* Enable the WOL interrupt */
- err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
- MII_88E1318S_PHY_CSIER_WOL_EIE);
+ err = __phy_set_bits(phydev, MII_88E1318S_PHY_CSIER,
+ MII_88E1318S_PHY_CSIER_WOL_EIE);
if (err < 0)
goto error;
@@ -1673,9 +1764,9 @@ static int m88e1318_set_wol(struct phy_device *phydev,
goto error;
/* Clear WOL status and enable magic packet matching */
- err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL, 0,
- MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS |
- MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE);
+ err = __phy_set_bits(phydev, MII_88E1318S_PHY_WOL_CTRL,
+ MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS |
+ MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE);
if (err < 0)
goto error;
} else {
@@ -1904,7 +1995,7 @@ static int marvell_cable_test_start_common(struct phy_device *phydev)
return bmsr;
if (bmcr & BMCR_ANENABLE) {
- ret = phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0);
+ ret = phy_clear_bits(phydev, MII_BMCR, BMCR_ANENABLE);
if (ret < 0)
return ret;
ret = genphy_soft_reset(phydev);
@@ -2621,8 +2712,8 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = marvell_config_init,
.config_aneg = m88e1101_config_aneg,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2639,8 +2730,8 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = m88e1111_config_init,
.config_aneg = marvell_config_aneg,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2658,10 +2749,31 @@ static struct phy_driver marvell_drivers[] = {
/* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = m88e1111_config_init,
- .config_aneg = marvell_config_aneg,
+ .config_aneg = m88e1111_config_aneg,
+ .read_status = marvell_read_status,
+ .config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .read_page = marvell_read_page,
+ .write_page = marvell_write_page,
+ .get_sset_count = marvell_get_sset_count,
+ .get_strings = marvell_get_strings,
+ .get_stats = marvell_get_stats,
+ .get_tunable = m88e1111_get_tunable,
+ .set_tunable = m88e1111_set_tunable,
+ },
+ {
+ .phy_id = MARVELL_PHY_ID_88E1111_FINISAR,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E1111 (Finisar)",
+ /* PHY_GBIT_FEATURES */
+ .probe = marvell_probe,
+ .config_init = m88e1111_config_init,
+ .config_aneg = m88e1111_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2680,8 +2792,8 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = m88e1118_config_init,
.config_aneg = m88e1118_config_aneg,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2699,9 +2811,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = marvell_config_init,
.config_aneg = m88e1121_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2721,9 +2832,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = m88e1318_config_init,
.config_aneg = m88e1318_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.get_wol = m88e1318_get_wol,
.set_wol = m88e1318_set_wol,
.resume = genphy_resume,
@@ -2743,8 +2853,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = m88e1145_config_init,
.config_aneg = m88e1101_config_aneg,
.read_status = genphy_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2763,8 +2873,8 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = m88e1149_config_init,
.config_aneg = m88e1118_config_aneg,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2781,8 +2891,8 @@ static struct phy_driver marvell_drivers[] = {
.probe = marvell_probe,
.config_init = m88e1111_config_init,
.config_aneg = marvell_config_aneg,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2798,8 +2908,8 @@ static struct phy_driver marvell_drivers[] = {
/* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = m88e1116r_config_init,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2820,9 +2930,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = m88e1510_config_init,
.config_aneg = m88e1510_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.get_wol = m88e1318_get_wol,
.set_wol = m88e1318_set_wol,
.resume = marvell_resume,
@@ -2849,9 +2958,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = marvell_config_init,
.config_aneg = m88e1510_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2875,9 +2983,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = marvell_config_init,
.config_aneg = m88e1510_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2900,9 +3007,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = m88e3016_config_init,
.aneg_done = marvell_aneg_done,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2921,9 +3027,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = marvell_config_init,
.config_aneg = m88e6390_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2946,9 +3051,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = marvell_config_init,
.config_aneg = m88e1510_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2968,9 +3072,8 @@ static struct phy_driver marvell_drivers[] = {
.config_init = marvell_config_init,
.config_aneg = m88e1510_config_aneg,
.read_status = marvell_read_status,
- .ack_interrupt = marvell_ack_interrupt,
.config_intr = marvell_config_intr,
- .did_interrupt = m88e1121_did_interrupt,
+ .handle_interrupt = marvell_handle_interrupt,
.resume = genphy_resume,
.suspend = genphy_suspend,
.read_page = marvell_read_page,
@@ -2989,6 +3092,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1112, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1111, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E1111_FINISAR, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1118, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1121R, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1145, MARVELL_PHY_ID_MASK },
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 757e950fb745..2b42e46066b4 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -472,7 +472,7 @@ static inline void of_mdiobus_link_mdiodev(struct mii_bus *mdio,
#endif
/**
- * mdiobus_create_device_from_board_info - create a full MDIO device given
+ * mdiobus_create_device - create a full MDIO device given
* a mdio_board_info structure
* @bus: MDIO bus to create the devices on
* @bi: mdio_board_info structure describing the devices
@@ -546,10 +546,11 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
/* de-assert bus level PHY GPIO reset */
gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
- dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n",
- bus->id);
+ err = dev_err_probe(&bus->dev, PTR_ERR(gpiod),
+ "mii_bus %s couldn't get reset GPIO\n",
+ bus->id);
device_del(&bus->dev);
- return PTR_ERR(gpiod);
+ return err;
} else if (gpiod) {
bus->reset_gpiod = gpiod;
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index e8f2ca625837..7e7904fee1d9 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -204,22 +204,45 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
int ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* Ack any pending IRQ */
+ ret = meson_gxl_ack_interrupt(phydev);
+ if (ret)
+ return ret;
+
val = INTSRC_ANEG_PR
| INTSRC_PARALLEL_FAULT
| INTSRC_ANEG_LP_ACK
| INTSRC_LINK_DOWN
| INTSRC_REMOTE_FAULT
| INTSRC_ANEG_COMPLETE;
+ ret = phy_write(phydev, INTSRC_MASK, val);
} else {
val = 0;
+ ret = phy_write(phydev, INTSRC_MASK, val);
+
+ /* Ack any pending IRQ */
+ ret = meson_gxl_ack_interrupt(phydev);
}
- /* Ack any pending IRQ */
- ret = meson_gxl_ack_interrupt(phydev);
- if (ret)
- return ret;
+ return ret;
+}
+
+static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, INTSRC_FLAG);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (irq_status == 0)
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
- return phy_write(phydev, INTSRC_MASK, val);
+ return IRQ_HANDLED;
}
static struct phy_driver meson_gxl_phy[] = {
@@ -231,8 +254,8 @@ static struct phy_driver meson_gxl_phy[] = {
.soft_reset = genphy_soft_reset,
.config_init = meson_gxl_config_init,
.read_status = meson_gxl_read_status,
- .ack_interrupt = meson_gxl_ack_interrupt,
.config_intr = meson_gxl_config_intr,
+ .handle_interrupt = meson_gxl_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -241,8 +264,8 @@ static struct phy_driver meson_gxl_phy[] = {
/* PHY_BASIC_FEATURES */
.flags = PHY_IS_INTERNAL,
.soft_reset = genphy_soft_reset,
- .ack_interrupt = meson_gxl_ack_interrupt,
.config_intr = meson_gxl_config_intr,
+ .handle_interrupt = meson_gxl_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
},
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index a7f74b3b97af..54e0d75203da 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -48,6 +48,10 @@
#define KSZPHY_INTCS_LINK_UP BIT(8)
#define KSZPHY_INTCS_ALL (KSZPHY_INTCS_LINK_UP |\
KSZPHY_INTCS_LINK_DOWN)
+#define KSZPHY_INTCS_LINK_DOWN_STATUS BIT(2)
+#define KSZPHY_INTCS_LINK_UP_STATUS BIT(0)
+#define KSZPHY_INTCS_STATUS (KSZPHY_INTCS_LINK_DOWN_STATUS |\
+ KSZPHY_INTCS_LINK_UP_STATUS)
/* PHY Control 1 */
#define MII_KSZPHY_CTRL_1 0x1e
@@ -158,7 +162,7 @@ static int kszphy_ack_interrupt(struct phy_device *phydev)
static int kszphy_config_intr(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
- int temp;
+ int temp, err;
u16 mask;
if (type && type->interrupt_level_mask)
@@ -174,12 +178,41 @@ static int kszphy_config_intr(struct phy_device *phydev)
phy_write(phydev, MII_KSZPHY_CTRL, temp);
/* enable / disable interrupts */
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = kszphy_ack_interrupt(phydev);
+ if (err)
+ return err;
+
temp = KSZPHY_INTCS_ALL;
- else
+ err = phy_write(phydev, MII_KSZPHY_INTCS, temp);
+ } else {
temp = 0;
+ err = phy_write(phydev, MII_KSZPHY_INTCS, temp);
+ if (err)
+ return err;
+
+ err = kszphy_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
+static irqreturn_t kszphy_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_KSZPHY_INTCS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & KSZPHY_INTCS_STATUS))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
- return phy_write(phydev, MII_KSZPHY_INTCS, temp);
+ return IRQ_HANDLED;
}
static int kszphy_rmii_clk_sel(struct phy_device *phydev, bool val)
@@ -1160,8 +1193,8 @@ static struct phy_driver ksphy_driver[] = {
/* PHY_BASIC_FEATURES */
.driver_data = &ks8737_type,
.config_init = kszphy_config_init,
- .ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -1172,8 +1205,8 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
@@ -1187,8 +1220,8 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
@@ -1203,8 +1236,8 @@ static struct phy_driver ksphy_driver[] = {
.probe = kszphy_probe,
.config_init = ksz8041_config_init,
.config_aneg = ksz8041_config_aneg,
- .ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
@@ -1218,8 +1251,8 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
@@ -1231,8 +1264,8 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8051_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
@@ -1247,8 +1280,8 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
- .ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
@@ -1262,8 +1295,8 @@ static struct phy_driver ksphy_driver[] = {
.driver_data = &ksz8081_type,
.probe = kszphy_probe,
.config_init = ksz8081_config_init,
- .ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
@@ -1275,8 +1308,8 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
/* PHY_BASIC_FEATURES */
.config_init = ksz8061_config_init,
- .ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -1288,8 +1321,8 @@ static struct phy_driver ksphy_driver[] = {
.probe = kszphy_probe,
.get_features = ksz9031_get_features,
.config_init = ksz9021_config_init,
- .ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
@@ -1307,8 +1340,8 @@ static struct phy_driver ksphy_driver[] = {
.config_init = ksz9031_config_init,
.soft_reset = genphy_soft_reset,
.read_status = ksz9031_read_status,
- .ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
@@ -1336,8 +1369,8 @@ static struct phy_driver ksphy_driver[] = {
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
.read_status = genphy_read_status,
- .ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index a644e8e5071c..9f1f2b6c97d4 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -44,16 +44,32 @@ static int lan88xx_phy_config_intr(struct phy_device *phydev)
LAN88XX_INT_MASK_LINK_CHANGE_);
} else {
rc = phy_write(phydev, LAN88XX_INT_MASK, 0);
+ if (rc)
+ return rc;
+
+ /* Ack interrupts after they have been disabled */
+ rc = phy_read(phydev, LAN88XX_INT_STS);
}
return rc < 0 ? rc : 0;
}
-static int lan88xx_phy_ack_interrupt(struct phy_device *phydev)
+static irqreturn_t lan88xx_handle_interrupt(struct phy_device *phydev)
{
- int rc = phy_read(phydev, LAN88XX_INT_STS);
+ int irq_status;
- return rc < 0 ? rc : 0;
+ irq_status = phy_read(phydev, LAN88XX_INT_STS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & LAN88XX_INT_STS_LINK_CHANGE_))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int lan88xx_suspend(struct phy_device *phydev)
@@ -340,8 +356,8 @@ static struct phy_driver microchip_phy_driver[] = {
.config_init = lan88xx_config_init,
.config_aneg = lan88xx_config_aneg,
- .ack_interrupt = lan88xx_phy_ack_interrupt,
.config_intr = lan88xx_phy_config_intr,
+ .handle_interrupt = lan88xx_handle_interrupt,
.suspend = lan88xx_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index fed3e395f18e..4dc00bd5a8d2 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -189,18 +189,34 @@ static int lan87xx_phy_config_intr(struct phy_device *phydev)
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, 0x7FFF);
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
val = LAN87XX_MASK_LINK_UP | LAN87XX_MASK_LINK_DOWN;
- }
+ rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ } else {
+ rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ if (rc)
+ return rc;
- rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
+ }
return rc < 0 ? rc : 0;
}
-static int lan87xx_phy_ack_interrupt(struct phy_device *phydev)
+static irqreturn_t lan87xx_handle_interrupt(struct phy_device *phydev)
{
- int rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
+ int irq_status;
- return rc < 0 ? rc : 0;
+ irq_status = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (irq_status == 0)
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int lan87xx_config_init(struct phy_device *phydev)
@@ -219,10 +235,9 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.features = PHY_BASIC_T1_FEATURES,
.config_init = lan87xx_config_init,
- .config_aneg = genphy_config_aneg,
- .ack_interrupt = lan87xx_phy_ack_interrupt,
.config_intr = lan87xx_phy_config_intr,
+ .handle_interrupt = lan87xx_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 6bc7406a1ce7..2f2157e3deab 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -1498,7 +1498,7 @@ static irqreturn_t vsc8584_handle_interrupt(struct phy_device *phydev)
vsc8584_handle_macsec_interrupt(phydev);
if (irq_status & MII_VSC85XX_INT_MASK_LINK_CHG)
- phy_mac_interrupt(phydev);
+ phy_trigger_machine(phydev);
return IRQ_HANDLED;
}
@@ -1541,16 +1541,6 @@ static int vsc85xx_config_init(struct phy_device *phydev)
return 0;
}
-static int vsc8584_did_interrupt(struct phy_device *phydev)
-{
- int rc = 0;
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
-
- return (rc < 0) ? 0 : rc & MII_VSC85XX_INT_MASK_MASK;
-}
-
static int vsc8514_config_pre_init(struct phy_device *phydev)
{
/* These are the settings to override the silicon default
@@ -1933,6 +1923,10 @@ static int vsc85xx_config_intr(struct phy_device *phydev)
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ rc = vsc85xx_ack_interrupt(phydev);
+ if (rc)
+ return rc;
+
vsc8584_config_macsec_intr(phydev);
vsc8584_config_ts_intr(phydev);
@@ -1943,11 +1937,33 @@ static int vsc85xx_config_intr(struct phy_device *phydev)
if (rc < 0)
return rc;
rc = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+ if (rc < 0)
+ return rc;
+
+ rc = vsc85xx_ack_interrupt(phydev);
}
return rc;
}
+static irqreturn_t vsc85xx_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_VSC85XX_INT_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_VSC85XX_INT_MASK_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static int vsc85xx_config_aneg(struct phy_device *phydev)
{
int rc;
@@ -2114,7 +2130,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2139,9 +2155,8 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
@@ -2163,7 +2178,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc8514_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2187,7 +2202,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2211,7 +2226,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2235,7 +2250,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2259,7 +2274,7 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
@@ -2283,9 +2298,8 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
@@ -2308,9 +2322,8 @@ static struct phy_driver vsc85xx_driver[] = {
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
@@ -2333,9 +2346,7 @@ static struct phy_driver vsc85xx_driver[] = {
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
- .ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
@@ -2359,9 +2370,8 @@ static struct phy_driver vsc85xx_driver[] = {
.config_aneg = &vsc85xx_config_aneg,
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
- .ack_interrupt = &vsc85xx_ack_interrupt,
+ .handle_interrupt = vsc85xx_handle_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8574_probe,
@@ -2386,9 +2396,7 @@ static struct phy_driver vsc85xx_driver[] = {
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
- .ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
@@ -2411,9 +2419,7 @@ static struct phy_driver vsc85xx_driver[] = {
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
- .ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
@@ -2436,9 +2442,7 @@ static struct phy_driver vsc85xx_driver[] = {
.aneg_done = &genphy_aneg_done,
.read_status = &vsc85xx_read_status,
.handle_interrupt = &vsc8584_handle_interrupt,
- .ack_interrupt = &vsc85xx_ack_interrupt,
.config_intr = &vsc85xx_config_intr,
- .did_interrupt = &vsc8584_did_interrupt,
.suspend = &genphy_suspend,
.resume = &genphy_resume,
.probe = &vsc8584_probe,
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index b97ee79f3cdf..924ed5b034a4 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -136,7 +136,7 @@ static void vsc85xx_ts_write_csr(struct phy_device *phydev, enum ts_blk blk,
phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
- if (!cond || (cond && upper))
+ if (!cond || upper)
phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_MSB, upper);
phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_LSB, lower);
@@ -506,9 +506,9 @@ static int vsc85xx_ptp_cmp_init(struct phy_device *phydev, enum ts_blk blk)
{
struct vsc8531_private *vsc8531 = phydev->priv;
bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
- enum vsc85xx_ptp_msg_type msgs[] = {
- PTP_MSG_TYPE_SYNC,
- PTP_MSG_TYPE_DELAY_REQ
+ u8 msgs[] = {
+ PTP_MSGTYPE_SYNC,
+ PTP_MSGTYPE_DELAY_REQ
};
u32 val;
u8 i;
@@ -847,9 +847,9 @@ static int vsc85xx_ts_ptp_action_flow(struct phy_device *phydev, enum ts_blk blk
static int vsc85xx_ptp_conf(struct phy_device *phydev, enum ts_blk blk,
bool one_step, bool enable)
{
- enum vsc85xx_ptp_msg_type msgs[] = {
- PTP_MSG_TYPE_SYNC,
- PTP_MSG_TYPE_DELAY_REQ
+ u8 msgs[] = {
+ PTP_MSGTYPE_SYNC,
+ PTP_MSGTYPE_DELAY_REQ
};
u32 val;
u8 i;
@@ -858,7 +858,7 @@ static int vsc85xx_ptp_conf(struct phy_device *phydev, enum ts_blk blk,
if (blk == INGRESS)
vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
PTP_WRITE_NS);
- else if (msgs[i] == PTP_MSG_TYPE_SYNC && one_step)
+ else if (msgs[i] == PTP_MSGTYPE_SYNC && one_step)
/* no need to know Sync t when sending in one_step */
vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
PTP_WRITE_1588);
@@ -1510,6 +1510,8 @@ void vsc8584_config_ts_intr(struct phy_device *phydev)
int vsc8584_ptp_init(struct phy_device *phydev)
{
switch (phydev->phy_id & phydev->drv->phy_id_mask) {
+ case PHY_ID_VSC8572:
+ case PHY_ID_VSC8574:
case PHY_ID_VSC8575:
case PHY_ID_VSC8582:
case PHY_ID_VSC8584:
diff --git a/drivers/net/phy/mscc/mscc_ptp.h b/drivers/net/phy/mscc/mscc_ptp.h
index 3ea163af0f4f..da3465360e90 100644
--- a/drivers/net/phy/mscc/mscc_ptp.h
+++ b/drivers/net/phy/mscc/mscc_ptp.h
@@ -436,11 +436,6 @@ enum ptp_cmd {
PTP_SAVE_IN_TS_FIFO = 11, /* invalid when writing in reg */
};
-enum vsc85xx_ptp_msg_type {
- PTP_MSG_TYPE_SYNC,
- PTP_MSG_TYPE_DELAY_REQ,
-};
-
struct vsc85xx_ptphdr {
u8 tsmt; /* transportSpecific | messageType */
u8 ver; /* reserved0 | versionPTP */
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index a5bf0874c7d8..5a8c8eb18582 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -63,19 +63,6 @@ static void ns_exp_write(struct phy_device *phydev, u16 reg, u8 data)
phy_write(phydev, NS_EXP_MEM_DATA, data);
}
-static int ns_config_intr(struct phy_device *phydev)
-{
- int err;
-
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_write(phydev, DP83865_INT_MASK,
- DP83865_INT_MASK_DEFAULT);
- else
- err = phy_write(phydev, DP83865_INT_MASK, 0);
-
- return err;
-}
-
static int ns_ack_interrupt(struct phy_device *phydev)
{
int ret = phy_read(phydev, DP83865_INT_STATUS);
@@ -89,6 +76,49 @@ static int ns_ack_interrupt(struct phy_device *phydev)
return ret;
}
+static irqreturn_t ns_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, DP83865_INT_STATUS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & DP83865_INT_MASK_DEFAULT))
+ return IRQ_NONE;
+
+ /* clear the interrupt */
+ phy_write(phydev, DP83865_INT_CLEAR, irq_status & ~0x7);
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static int ns_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = ns_ack_interrupt(phydev);
+ if (err)
+ return err;
+
+ err = phy_write(phydev, DP83865_INT_MASK,
+ DP83865_INT_MASK_DEFAULT);
+ } else {
+ err = phy_write(phydev, DP83865_INT_MASK, 0);
+ if (err)
+ return err;
+
+ err = ns_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
{
int bmcr = phy_read(phydev, MII_BMCR);
@@ -133,8 +163,8 @@ static struct phy_driver dp83865_driver[] = { {
.name = "NatSemi DP83865",
/* PHY_GBIT_FEATURES */
.config_init = ns_config_init,
- .ack_interrupt = ns_ack_interrupt,
.config_intr = ns_config_intr,
+ .handle_interrupt = ns_handle_interrupt,
} };
module_phy_driver(dp83865_driver);
diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c
index a72fa0d2e7c7..afd7afa1f498 100644
--- a/drivers/net/phy/nxp-tja11xx.c
+++ b/drivers/net/phy/nxp-tja11xx.c
@@ -44,6 +44,9 @@
#define MII_CFG2_SLEEP_REQUEST_TO_16MS 0x3
#define MII_INTSRC 21
+#define MII_INTSRC_LINK_FAIL BIT(10)
+#define MII_INTSRC_LINK_UP BIT(9)
+#define MII_INTSRC_MASK (MII_INTSRC_LINK_FAIL | MII_INTSRC_LINK_UP)
#define MII_INTSRC_TEMP_ERR BIT(1)
#define MII_INTSRC_UV_ERR BIT(3)
@@ -597,11 +600,42 @@ static int tja11xx_ack_interrupt(struct phy_device *phydev)
static int tja11xx_config_intr(struct phy_device *phydev)
{
int value = 0;
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = tja11xx_ack_interrupt(phydev);
+ if (err)
+ return err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
value = MII_INTEN_LINK_FAIL | MII_INTEN_LINK_UP;
+ err = phy_write(phydev, MII_INTEN, value);
+ } else {
+ err = phy_write(phydev, MII_INTEN, value);
+ if (err)
+ return err;
+
+ err = tja11xx_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
+static irqreturn_t tja11xx_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_INTSRC);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_INTSRC_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
- return phy_write(phydev, MII_INTEN, value);
+ return IRQ_HANDLED;
}
static int tja11xx_cable_test_start(struct phy_device *phydev)
@@ -747,8 +781,8 @@ static struct phy_driver tja11xx_driver[] = {
.get_sset_count = tja11xx_get_sset_count,
.get_strings = tja11xx_get_strings,
.get_stats = tja11xx_get_stats,
- .ack_interrupt = tja11xx_ack_interrupt,
.config_intr = tja11xx_config_intr,
+ .handle_interrupt = tja11xx_handle_interrupt,
.cable_test_start = tja11xx_cable_test_start,
.cable_test_get_status = tja11xx_cable_test_get_status,
}, {
@@ -770,8 +804,8 @@ static struct phy_driver tja11xx_driver[] = {
.get_sset_count = tja11xx_get_sset_count,
.get_strings = tja11xx_get_strings,
.get_stats = tja11xx_get_stats,
- .ack_interrupt = tja11xx_ack_interrupt,
.config_intr = tja11xx_config_intr,
+ .handle_interrupt = tja11xx_handle_interrupt,
.cable_test_start = tja11xx_cable_test_start,
.cable_test_get_status = tja11xx_cable_test_get_status,
}
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index bd11e62bfdfe..077f2929c45e 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -9,7 +9,7 @@
#include <linux/phy.h>
/**
- * genphy_c45_setup_forced - configures a forced speed
+ * genphy_c45_pma_setup_forced - configures a forced speed
* @phydev: target phy_device struct
*/
int genphy_c45_pma_setup_forced(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 35525a671400..45f75533c47c 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -114,23 +114,6 @@ void phy_print_status(struct phy_device *phydev)
EXPORT_SYMBOL(phy_print_status);
/**
- * phy_clear_interrupt - Ack the phy device's interrupt
- * @phydev: the phy_device struct
- *
- * If the @phydev driver has an ack_interrupt function, call it to
- * ack and clear the phy device's interrupt.
- *
- * Returns 0 on success or < 0 on error.
- */
-static int phy_clear_interrupt(struct phy_device *phydev)
-{
- if (phydev->drv->ack_interrupt)
- return phydev->drv->ack_interrupt(phydev);
-
- return 0;
-}
-
-/**
* phy_config_interrupt - configure the PHY device for the requested interrupts
* @phydev: the phy_device struct
* @interrupts: interrupt flags to configure for this @phydev
@@ -489,14 +472,15 @@ void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
EXPORT_SYMBOL(phy_queue_state_machine);
/**
- * phy_queue_state_machine - Trigger the state machine to run now
+ * phy_trigger_machine - Trigger the state machine to run now
*
* @phydev: the phy_device struct
*/
-static void phy_trigger_machine(struct phy_device *phydev)
+void phy_trigger_machine(struct phy_device *phydev)
{
phy_queue_state_machine(phydev, 0);
}
+EXPORT_SYMBOL(phy_trigger_machine);
static void phy_abort_cable_test(struct phy_device *phydev)
{
@@ -924,7 +908,7 @@ void phy_stop_machine(struct phy_device *phydev)
* Must not be called from interrupt context, or while the
* phydev->lock is held.
*/
-static void phy_error(struct phy_device *phydev)
+void phy_error(struct phy_device *phydev)
{
WARN_ON(1);
@@ -934,6 +918,7 @@ static void phy_error(struct phy_device *phydev)
phy_trigger_machine(phydev);
}
+EXPORT_SYMBOL(phy_error);
/**
* phy_disable_interrupts - Disable the PHY interrupts from the PHY side
@@ -941,15 +926,8 @@ static void phy_error(struct phy_device *phydev)
*/
int phy_disable_interrupts(struct phy_device *phydev)
{
- int err;
-
/* Disable PHY interrupts */
- err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
- if (err)
- return err;
-
- /* Clear the interrupt */
- return phy_clear_interrupt(phydev);
+ return phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
}
/**
@@ -964,22 +942,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
struct phy_device *phydev = phy_dat;
struct phy_driver *drv = phydev->drv;
- if (drv->handle_interrupt)
- return drv->handle_interrupt(phydev);
-
- if (drv->did_interrupt && !drv->did_interrupt(phydev))
- return IRQ_NONE;
-
- /* reschedule state queue work to run as soon as possible */
- phy_trigger_machine(phydev);
-
- /* did_interrupt() may have cleared the interrupt already */
- if (!drv->did_interrupt && phy_clear_interrupt(phydev)) {
- phy_error(phydev);
- return IRQ_NONE;
- }
-
- return IRQ_HANDLED;
+ return drv->handle_interrupt(phydev);
}
/**
@@ -988,11 +951,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
*/
static int phy_enable_interrupts(struct phy_device *phydev)
{
- int err = phy_clear_interrupt(phydev);
-
- if (err < 0)
- return err;
-
return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 5dab6be6fc38..80c2e646c093 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1156,7 +1156,7 @@ void phy_attached_info(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_attached_info);
-#define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%s)"
+#define ATTACHED_FMT "attached PHY driver %s(mii_bus:phy_addr=%s, irq=%s)"
char *phy_attached_info_irq(struct phy_device *phydev)
{
char *irq_str;
@@ -1181,19 +1181,17 @@ EXPORT_SYMBOL(phy_attached_info_irq);
void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
{
- const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
+ const char *unbound = phydev->drv ? "" : "[unbound] ";
char *irq_str = phy_attached_info_irq(phydev);
if (!fmt) {
- phydev_info(phydev, ATTACHED_FMT "\n",
- drv_name, phydev_name(phydev),
- irq_str);
+ phydev_info(phydev, ATTACHED_FMT "\n", unbound,
+ phydev_name(phydev), irq_str);
} else {
va_list ap;
- phydev_info(phydev, ATTACHED_FMT,
- drv_name, phydev_name(phydev),
- irq_str);
+ phydev_info(phydev, ATTACHED_FMT, unbound,
+ phydev_name(phydev), irq_str);
va_start(ap, fmt);
vprintk(fmt, ap);
@@ -2463,6 +2461,19 @@ int genphy_soft_reset(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_soft_reset);
+irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev)
+{
+ /* It seems there are cases where the interrupts are handled by another
+ * entity (ie an IRQ controller embedded inside the PHY) and do not
+ * need any other interraction from phylib. In this case, just trigger
+ * the state machine directly.
+ */
+ phy_trigger_machine(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_handle_interrupt_no_ack);
+
/**
* genphy_read_abilities - read PHY abilities from Clause 22 registers
* @phydev: target phy_device struct
@@ -2735,7 +2746,7 @@ static int phy_get_int_delay_property(struct device *dev, const char *name)
#endif
/**
- * phy_get_delay_index - returns the index of the internal delay
+ * phy_get_internal_delay - returns the index of the internal delay
* @phydev: phy_device struct
* @dev: pointer to the devices device struct
* @delay_values: array of delays the PHY supports
@@ -2815,7 +2826,7 @@ EXPORT_SYMBOL(phy_get_internal_delay);
static bool phy_drv_supports_irq(struct phy_driver *phydrv)
{
- return phydrv->config_intr && phydrv->ack_interrupt;
+ return phydrv->config_intr && phydrv->handle_interrupt;
}
/**
@@ -2947,6 +2958,13 @@ static int phy_remove(struct device *dev)
return 0;
}
+static void phy_shutdown(struct device *dev)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ phy_disable_interrupts(phydev);
+}
+
/**
* phy_driver_register - register a phy_driver with the PHY layer
* @new_driver: new phy_driver to register
@@ -2970,6 +2988,7 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
new_driver->mdiodrv.driver.bus = &mdio_bus_type;
new_driver->mdiodrv.driver.probe = phy_probe;
new_driver->mdiodrv.driver.remove = phy_remove;
+ new_driver->mdiodrv.driver.shutdown = phy_shutdown;
new_driver->mdiodrv.driver.owner = owner;
new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index 59a94e07e7c5..f550576eb9da 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -66,11 +66,11 @@ static void phy_led_trigger_format_name(struct phy_device *phy, char *buf,
static int phy_led_trigger_register(struct phy_device *phy,
struct phy_led_trigger *plt,
- unsigned int speed)
+ unsigned int speed,
+ const char *suffix)
{
plt->speed = speed;
- phy_led_trigger_format_name(phy, plt->name, sizeof(plt->name),
- phy_speed_to_str(speed));
+ phy_led_trigger_format_name(phy, plt->name, sizeof(plt->name), suffix);
plt->trigger.name = plt->name;
return led_trigger_register(&plt->trigger);
@@ -99,12 +99,7 @@ int phy_led_triggers_register(struct phy_device *phy)
goto out_clear;
}
- phy_led_trigger_format_name(phy, phy->led_link_trigger->name,
- sizeof(phy->led_link_trigger->name),
- "link");
- phy->led_link_trigger->trigger.name = phy->led_link_trigger->name;
-
- err = led_trigger_register(&phy->led_link_trigger->trigger);
+ err = phy_led_trigger_register(phy, phy->led_link_trigger, 0, "link");
if (err)
goto out_free_link;
@@ -119,7 +114,8 @@ int phy_led_triggers_register(struct phy_device *phy)
for (i = 0; i < phy->phy_num_led_triggers; i++) {
err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i],
- speeds[i]);
+ speeds[i],
+ phy_speed_to_str(speeds[i]));
if (err)
goto out_unreg;
}
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index fe2296fdda19..84f6e197f965 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1649,7 +1649,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
EXPORT_SYMBOL_GPL(phylink_ethtool_set_pauseparam);
/**
- * phylink_ethtool_get_eee_err() - read the energy efficient ethernet error
+ * phylink_get_eee_err() - read the energy efficient ethernet error
* counter
* @pl: a pointer to a &struct phylink returned from phylink_create().
*
@@ -2515,9 +2515,10 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
changed = ret > 0;
+ /* Ensure ISOLATE bit is disabled */
bmcr = mode == MLO_AN_INBAND ? BMCR_ANENABLE : 0;
ret = mdiobus_modify(pcs->bus, pcs->addr, MII_BMCR,
- BMCR_ANENABLE, bmcr);
+ BMCR_ANENABLE | BMCR_ISOLATE, bmcr);
if (ret < 0)
return ret;
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index 1b15a991ee06..d5c1aaa8236a 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -75,6 +75,10 @@ static int qs6612_ack_interrupt(struct phy_device *phydev)
{
int err;
+ /* The Interrupt Source register is not self-clearing, bits 4 and 5 are
+ * cleared when MII_BMSR is read and bits 1 and 3 are cleared when
+ * MII_EXPANSION is read
+ */
err = phy_read(phydev, MII_QS6612_ISR);
if (err < 0)
@@ -96,24 +100,56 @@ static int qs6612_ack_interrupt(struct phy_device *phydev)
static int qs6612_config_intr(struct phy_device *phydev)
{
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* clear any interrupts before enabling them */
+ err = qs6612_ack_interrupt(phydev);
+ if (err)
+ return err;
+
err = phy_write(phydev, MII_QS6612_IMR,
MII_QS6612_IMR_INIT);
- else
+ } else {
err = phy_write(phydev, MII_QS6612_IMR, 0);
+ if (err)
+ return err;
+
+ /* clear any leftover interrupts */
+ err = qs6612_ack_interrupt(phydev);
+ }
return err;
}
+static irqreturn_t qs6612_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, MII_QS6612_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_QS6612_IMR_INIT))
+ return IRQ_NONE;
+
+ /* the interrupt source register is not self-clearing */
+ qs6612_ack_interrupt(phydev);
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static struct phy_driver qs6612_driver[] = { {
.phy_id = 0x00181440,
.name = "QS6612",
.phy_id_mask = 0xfffffff0,
/* PHY_BASIC_FEATURES */
.config_init = qs6612_config_init,
- .ack_interrupt = qs6612_ack_interrupt,
.config_intr = qs6612_config_intr,
+ .handle_interrupt = qs6612_handle_interrupt,
} };
module_phy_driver(qs6612_driver);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 575580d3ffe0..99ecd6c4c15a 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -41,6 +41,12 @@
#define RTL8211E_RX_DELAY BIT(11)
#define RTL8201F_ISR 0x1e
+#define RTL8201F_ISR_ANERR BIT(15)
+#define RTL8201F_ISR_DUPLEX BIT(13)
+#define RTL8201F_ISR_LINK BIT(11)
+#define RTL8201F_ISR_MASK (RTL8201F_ISR_ANERR | \
+ RTL8201F_ISR_DUPLEX | \
+ RTL8201F_ISR_LINK)
#define RTL8201F_IER 0x13
#define RTL8366RB_POWER_SAVE 0x15
@@ -102,24 +108,45 @@ static int rtl8211f_ack_interrupt(struct phy_device *phydev)
static int rtl8201_config_intr(struct phy_device *phydev)
{
u16 val;
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = rtl8201_ack_interrupt(phydev);
+ if (err)
+ return err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
val = BIT(13) | BIT(12) | BIT(11);
- else
+ err = phy_write_paged(phydev, 0x7, RTL8201F_IER, val);
+ } else {
val = 0;
+ err = phy_write_paged(phydev, 0x7, RTL8201F_IER, val);
+ if (err)
+ return err;
- return phy_write_paged(phydev, 0x7, RTL8201F_IER, val);
+ err = rtl8201_ack_interrupt(phydev);
+ }
+
+ return err;
}
static int rtl8211b_config_intr(struct phy_device *phydev)
{
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = rtl821x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
err = phy_write(phydev, RTL821x_INER,
RTL8211B_INER_INIT);
- else
+ } else {
err = phy_write(phydev, RTL821x_INER, 0);
+ if (err)
+ return err;
+
+ err = rtl821x_ack_interrupt(phydev);
+ }
return err;
}
@@ -128,11 +155,20 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
{
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = rtl821x_ack_interrupt(phydev);
+ if (err)
+ return err;
+
err = phy_write(phydev, RTL821x_INER,
RTL8211E_INER_LINK_STATUS);
- else
+ } else {
err = phy_write(phydev, RTL821x_INER, 0);
+ if (err)
+ return err;
+
+ err = rtl821x_ack_interrupt(phydev);
+ }
return err;
}
@@ -140,13 +176,85 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
static int rtl8211f_config_intr(struct phy_device *phydev)
{
u16 val;
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = rtl8211f_ack_interrupt(phydev);
+ if (err)
+ return err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
val = RTL8211F_INER_LINK_STATUS;
- else
+ err = phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
+ } else {
val = 0;
+ err = phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
+ if (err)
+ return err;
- return phy_write_paged(phydev, 0xa42, RTL821x_INER, val);
+ err = rtl8211f_ack_interrupt(phydev);
+ }
+
+ return err;
+}
+
+static irqreturn_t rtl8201_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read(phydev, RTL8201F_ISR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & RTL8201F_ISR_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rtl821x_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, irq_enabled;
+
+ irq_status = phy_read(phydev, RTL821x_INSR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ irq_enabled = phy_read(phydev, RTL821x_INER);
+ if (irq_enabled < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & irq_enabled))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rtl8211f_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status;
+
+ irq_status = phy_read_paged(phydev, 0xa43, RTL8211F_INSR);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & RTL8211F_INER_LINK_STATUS))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int rtl8211_config_aneg(struct phy_device *phydev)
@@ -556,8 +664,8 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc816),
.name = "RTL8201F Fast Ethernet",
- .ack_interrupt = &rtl8201_ack_interrupt,
.config_intr = &rtl8201_config_intr,
+ .handle_interrupt = rtl8201_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
@@ -582,8 +690,8 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc912),
.name = "RTL8211B Gigabit Ethernet",
- .ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211b_config_intr,
+ .handle_interrupt = rtl821x_handle_interrupt,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
.suspend = rtl8211b_suspend,
@@ -601,8 +709,8 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc914),
.name = "RTL8211DN Gigabit Ethernet",
- .ack_interrupt = rtl821x_ack_interrupt,
.config_intr = rtl8211e_config_intr,
+ .handle_interrupt = rtl821x_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
@@ -611,8 +719,8 @@ static struct phy_driver realtek_drvs[] = {
PHY_ID_MATCH_EXACT(0x001cc915),
.name = "RTL8211E Gigabit Ethernet",
.config_init = &rtl8211e_config_init,
- .ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211e_config_intr,
+ .handle_interrupt = rtl821x_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
@@ -621,8 +729,9 @@ static struct phy_driver realtek_drvs[] = {
PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
.config_init = &rtl8211f_config_init,
- .ack_interrupt = &rtl8211f_ack_interrupt,
+ .read_status = rtlgen_read_status,
.config_intr = &rtl8211f_config_intr,
+ .handle_interrupt = rtl8211f_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
@@ -662,6 +771,46 @@ static struct phy_driver realtek_drvs[] = {
.read_mmd = rtl822x_read_mmd,
.write_mmd = rtl822x_write_mmd,
}, {
+ PHY_ID_MATCH_EXACT(0x001cc838),
+ .name = "RTL8226-CG 2.5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc848),
+ .name = "RTL8226B-CG_RTL8221B-CG 2.5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc849),
+ .name = "RTL8221B-VB-CG 2.5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc84a),
+ .name = "RTL8221B-VM-CG 2.5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
+ .suspend = genphy_suspend,
+ .resume = rtlgen_resume,
+ .read_page = rtl821x_read_page,
+ .write_page = rtl821x_write_page,
+ }, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
.config_init = &rtl8366rb_config_init,
@@ -670,8 +819,8 @@ static struct phy_driver realtek_drvs[] = {
* irq is requested and ACKed by reading the status register,
* which is done by the irqchip code.
*/
- .ack_interrupt = genphy_no_ack_interrupt,
.config_intr = genphy_no_config_intr,
+ .handle_interrupt = genphy_handle_interrupt_no_ack,
.suspend = genphy_suspend,
.resume = genphy_resume,
},
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 0fc39ac5ca88..33372756a451 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -48,6 +48,13 @@ struct smsc_phy_priv {
struct clk *refclk;
};
+static int smsc_phy_ack_interrupt(struct phy_device *phydev)
+{
+ int rc = phy_read(phydev, MII_LAN83C185_ISF);
+
+ return rc < 0 ? rc : 0;
+}
+
static int smsc_phy_config_intr(struct phy_device *phydev)
{
struct smsc_phy_priv *priv = phydev->priv;
@@ -55,21 +62,47 @@ static int smsc_phy_config_intr(struct phy_device *phydev)
int rc;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ rc = smsc_phy_ack_interrupt(phydev);
+ if (rc)
+ return rc;
+
intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6;
if (priv->energy_enable)
intmask |= MII_LAN83C185_ISF_INT7;
- }
+ rc = phy_write(phydev, MII_LAN83C185_IM, intmask);
+ } else {
+ rc = phy_write(phydev, MII_LAN83C185_IM, intmask);
+ if (rc)
+ return rc;
- rc = phy_write(phydev, MII_LAN83C185_IM, intmask);
+ rc = smsc_phy_ack_interrupt(phydev);
+ }
return rc < 0 ? rc : 0;
}
-static int smsc_phy_ack_interrupt(struct phy_device *phydev)
+static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev)
{
- int rc = phy_read (phydev, MII_LAN83C185_ISF);
+ int irq_status, irq_enabled;
- return rc < 0 ? rc : 0;
+ irq_enabled = phy_read(phydev, MII_LAN83C185_IM);
+ if (irq_enabled < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ irq_status = phy_read(phydev, MII_LAN83C185_ISF);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & irq_enabled))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static int smsc_phy_config_init(struct phy_device *phydev)
@@ -314,8 +347,8 @@ static struct phy_driver smsc_phy_driver[] = {
.soft_reset = smsc_phy_reset,
/* IRQ related */
- .ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ .handle_interrupt = smsc_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -333,8 +366,8 @@ static struct phy_driver smsc_phy_driver[] = {
.soft_reset = smsc_phy_reset,
/* IRQ related */
- .ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ .handle_interrupt = smsc_phy_handle_interrupt,
/* Statistics */
.get_sset_count = smsc_get_sset_count,
@@ -362,8 +395,8 @@ static struct phy_driver smsc_phy_driver[] = {
.config_aneg = lan87xx_config_aneg,
/* IRQ related */
- .ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ .handle_interrupt = smsc_phy_handle_interrupt,
/* Statistics */
.get_sset_count = smsc_get_sset_count,
@@ -385,8 +418,8 @@ static struct phy_driver smsc_phy_driver[] = {
.config_init = lan911x_config_init,
/* IRQ related */
- .ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ .handle_interrupt = smsc_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -410,8 +443,8 @@ static struct phy_driver smsc_phy_driver[] = {
.config_aneg = lan87xx_config_aneg_ext,
/* IRQ related */
- .ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ .handle_interrupt = smsc_phy_handle_interrupt,
/* Statistics */
.get_sset_count = smsc_get_sset_count,
@@ -436,8 +469,8 @@ static struct phy_driver smsc_phy_driver[] = {
.soft_reset = smsc_phy_reset,
/* IRQ related */
- .ack_interrupt = smsc_phy_ack_interrupt,
.config_intr = smsc_phy_config_intr,
+ .handle_interrupt = smsc_phy_handle_interrupt,
/* Statistics */
.get_sset_count = smsc_get_sset_count,
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index d735a01380ed..431fe5e0ce31 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -48,32 +48,55 @@ static int ste10Xp_config_init(struct phy_device *phydev)
return 0;
}
+static int ste10Xp_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_XCIIS);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
static int ste10Xp_config_intr(struct phy_device *phydev)
{
- int err, value;
+ int err;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ /* clear any pending interrupts */
+ err = ste10Xp_ack_interrupt(phydev);
+ if (err)
+ return err;
+
/* Enable all STe101P interrupts (PR12) */
err = phy_write(phydev, MII_XIE, MII_XIE_DEFAULT_MASK);
- /* clear any pending interrupts */
- if (err == 0) {
- value = phy_read(phydev, MII_XCIIS);
- if (value < 0)
- err = value;
- }
- } else
+ } else {
err = phy_write(phydev, MII_XIE, 0);
+ if (err)
+ return err;
+
+ err = ste10Xp_ack_interrupt(phydev);
+ }
return err;
}
-static int ste10Xp_ack_interrupt(struct phy_device *phydev)
+static irqreturn_t ste10Xp_handle_interrupt(struct phy_device *phydev)
{
- int err = phy_read(phydev, MII_XCIIS);
- if (err < 0)
- return err;
+ int irq_status;
- return 0;
+ irq_status = phy_read(phydev, MII_XCIIS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & MII_XIE_DEFAULT_MASK))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
}
static struct phy_driver ste10xp_pdriver[] = {
@@ -83,8 +106,8 @@ static struct phy_driver ste10xp_pdriver[] = {
.name = "STe101p",
/* PHY_BASIC_FEATURES */
.config_init = ste10Xp_config_init,
- .ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
+ .handle_interrupt = ste10Xp_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
@@ -93,8 +116,8 @@ static struct phy_driver ste10xp_pdriver[] = {
.name = "STe100p",
/* PHY_BASIC_FEATURES */
.config_init = ste10Xp_config_init,
- .ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
+ .handle_interrupt = ste10Xp_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
} };
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index bb680352708a..16704e243162 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -40,6 +40,11 @@
#define MII_VSC8244_ISTAT_SPEED 0x4000
#define MII_VSC8244_ISTAT_LINK 0x2000
#define MII_VSC8244_ISTAT_DUPLEX 0x1000
+#define MII_VSC8244_ISTAT_MASK (MII_VSC8244_ISTAT_SPEED | \
+ MII_VSC8244_ISTAT_LINK | \
+ MII_VSC8244_ISTAT_DUPLEX)
+
+#define MII_VSC8221_ISTAT_MASK MII_VSC8244_ISTAT_LINK
/* Vitesse Auxiliary Control/Status Register */
#define MII_VSC8244_AUX_CONSTAT 0x1c
@@ -270,25 +275,14 @@ static int vsc8601_config_init(struct phy_device *phydev)
return 0;
}
-static int vsc824x_ack_interrupt(struct phy_device *phydev)
-{
- int err = 0;
-
- /* Don't bother to ACK the interrupts if interrupts
- * are disabled. The 824x cannot clear the interrupts
- * if they are disabled.
- */
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_read(phydev, MII_VSC8244_ISTAT);
-
- return (err < 0) ? err : 0;
-}
-
static int vsc82xx_config_intr(struct phy_device *phydev)
{
int err;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ /* Don't bother to ACK the interrupts since the 824x cannot
+ * clear the interrupts if they are disabled.
+ */
err = phy_write(phydev, MII_VSC8244_IMASK,
(phydev->drv->phy_id == PHY_ID_VSC8234 ||
phydev->drv->phy_id == PHY_ID_VSC8244 ||
@@ -311,6 +305,31 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
return err;
}
+static irqreturn_t vsc82xx_handle_interrupt(struct phy_device *phydev)
+{
+ int irq_status, irq_mask;
+
+ if (phydev->drv->phy_id == PHY_ID_VSC8244 ||
+ phydev->drv->phy_id == PHY_ID_VSC8572 ||
+ phydev->drv->phy_id == PHY_ID_VSC8601)
+ irq_mask = MII_VSC8244_ISTAT_MASK;
+ else
+ irq_mask = MII_VSC8221_ISTAT_MASK;
+
+ irq_status = phy_read(phydev, MII_VSC8244_ISTAT);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (!(irq_status & irq_mask))
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
static int vsc8221_config_init(struct phy_device *phydev)
{
int err;
@@ -390,8 +409,8 @@ static struct phy_driver vsc82xx_driver[] = {
/* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
+ .handle_interrupt = &vsc82xx_handle_interrupt,
}, {
.phy_id = PHY_ID_VSC8244,
.name = "Vitesse VSC8244",
@@ -399,8 +418,8 @@ static struct phy_driver vsc82xx_driver[] = {
/* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
+ .handle_interrupt = &vsc82xx_handle_interrupt,
}, {
.phy_id = PHY_ID_VSC8572,
.name = "Vitesse VSC8572",
@@ -408,16 +427,16 @@ static struct phy_driver vsc82xx_driver[] = {
/* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
+ .handle_interrupt = &vsc82xx_handle_interrupt,
}, {
.phy_id = PHY_ID_VSC8601,
.name = "Vitesse VSC8601",
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = &vsc8601_config_init,
- .ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
+ .handle_interrupt = &vsc82xx_handle_interrupt,
}, {
.phy_id = PHY_ID_VSC7385,
.name = "Vitesse VSC7385",
@@ -461,8 +480,8 @@ static struct phy_driver vsc82xx_driver[] = {
/* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
- .ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
+ .handle_interrupt = &vsc82xx_handle_interrupt,
}, {
/* Vitesse 8221 */
.phy_id = PHY_ID_VSC8221,
@@ -470,8 +489,8 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8221",
/* PHY_GBIT_FEATURES */
.config_init = &vsc8221_config_init,
- .ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
+ .handle_interrupt = &vsc82xx_handle_interrupt,
}, {
/* Vitesse 8211 */
.phy_id = PHY_ID_VSC8211,
@@ -479,8 +498,8 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8211",
/* PHY_GBIT_FEATURES */
.config_init = &vsc8221_config_init,
- .ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
+ .handle_interrupt = &vsc82xx_handle_interrupt,
} };
module_phy_driver(vsc82xx_driver);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 07f1f3933927..c19dac21c468 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -4,6 +4,7 @@
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
*/
+#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
@@ -975,11 +976,11 @@ static void team_port_disable(struct team *team,
}
#define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
NETIF_F_HIGHDMA | NETIF_F_LRO)
#define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
- NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
+ NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
static void __team_compute_features(struct team *team)
{
@@ -1009,8 +1010,7 @@ static void __team_compute_features(struct team *team)
team->dev->vlan_features = vlan_features;
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX |
- NETIF_F_GSO_UDP_L4;
+ NETIF_F_HW_VLAN_STAG_TX;
team->dev->hard_header_len = max_hard_header_len;
team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
@@ -2175,7 +2175,7 @@ static void team_setup(struct net_device *dev)
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
- dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
+ dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
dev->features |= dev->hw_features;
dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index cd06cae76035..2dc1988a8973 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -107,17 +107,6 @@ struct tap_filter {
#define TUN_FLOW_EXPIRE (3 * HZ)
-struct tun_pcpu_stats {
- u64_stats_t rx_packets;
- u64_stats_t rx_bytes;
- u64_stats_t tx_packets;
- u64_stats_t tx_bytes;
- struct u64_stats_sync syncp;
- u32 rx_dropped;
- u32 tx_dropped;
- u32 rx_frame_errors;
-};
-
/* A tun_file connects an open character device to a tuntap netdevice. It
* also contains all socket related structures (except sock_fprog and tap_filter)
* to serve as one transmit queue for tuntap device. The sock_fprog and
@@ -207,7 +196,7 @@ struct tun_struct {
void *security;
u32 flow_count;
u32 rx_batched;
- struct tun_pcpu_stats __percpu *pcpu_stats;
+ atomic_long_t rx_frame_errors;
struct bpf_prog __rcu *xdp_prog;
struct tun_prog __rcu *steering_prog;
struct tun_prog __rcu *filter_prog;
@@ -1066,7 +1055,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
drop:
- this_cpu_inc(tun->pcpu_stats->tx_dropped);
+ atomic_long_inc(&dev->tx_dropped);
skb_tx_error(skb);
kfree_skb(skb);
rcu_read_unlock();
@@ -1103,37 +1092,12 @@ static void tun_set_headroom(struct net_device *dev, int new_hr)
static void
tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
struct tun_struct *tun = netdev_priv(dev);
- struct tun_pcpu_stats *p;
- int i;
-
- for_each_possible_cpu(i) {
- u64 rxpackets, rxbytes, txpackets, txbytes;
- unsigned int start;
-
- p = per_cpu_ptr(tun->pcpu_stats, i);
- do {
- start = u64_stats_fetch_begin(&p->syncp);
- rxpackets = u64_stats_read(&p->rx_packets);
- rxbytes = u64_stats_read(&p->rx_bytes);
- txpackets = u64_stats_read(&p->tx_packets);
- txbytes = u64_stats_read(&p->tx_bytes);
- } while (u64_stats_fetch_retry(&p->syncp, start));
- stats->rx_packets += rxpackets;
- stats->rx_bytes += rxbytes;
- stats->tx_packets += txpackets;
- stats->tx_bytes += txbytes;
+ dev_get_tstats64(dev, stats);
- /* u32 counters */
- rx_dropped += p->rx_dropped;
- rx_frame_errors += p->rx_frame_errors;
- tx_dropped += p->tx_dropped;
- }
- stats->rx_dropped = rx_dropped;
- stats->rx_frame_errors = rx_frame_errors;
- stats->tx_dropped = tx_dropped;
+ stats->rx_frame_errors +=
+ (unsigned long)atomic_long_read(&tun->rx_frame_errors);
}
static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
@@ -1247,7 +1211,7 @@ resample:
void *frame = tun_xdp_to_ptr(xdp);
if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
- this_cpu_inc(tun->pcpu_stats->tx_dropped);
+ atomic_long_inc(&dev->tx_dropped);
xdp_return_frame_rx_napi(xdp);
drops++;
}
@@ -1283,7 +1247,7 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_select_queue = tun_select_queue,
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
- .ndo_get_stats64 = tun_net_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_bpf = tun_xdp,
.ndo_xdp_xmit = tun_xdp_xmit,
.ndo_change_carrier = tun_net_change_carrier,
@@ -1577,7 +1541,7 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
trace_xdp_exception(tun->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ atomic_long_inc(&tun->dev->rx_dropped);
break;
}
@@ -1683,7 +1647,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
size_t total_len = iov_iter_count(from);
size_t len = total_len, align = tun->align, linear;
struct virtio_net_hdr gso = { 0 };
- struct tun_pcpu_stats *stats;
int good_linear;
int copylen;
bool zerocopy = false;
@@ -1752,7 +1715,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
*/
skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
if (IS_ERR(skb)) {
- this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ atomic_long_inc(&tun->dev->rx_dropped);
return PTR_ERR(skb);
}
if (!skb)
@@ -1781,7 +1744,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
- this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ atomic_long_inc(&tun->dev->rx_dropped);
if (frags)
mutex_unlock(&tfile->napi_mutex);
return PTR_ERR(skb);
@@ -1795,7 +1758,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (err) {
err = -EFAULT;
drop:
- this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ atomic_long_inc(&tun->dev->rx_dropped);
kfree_skb(skb);
if (frags) {
tfile->napi.skb = NULL;
@@ -1807,7 +1770,7 @@ drop:
}
if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
- this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
+ atomic_long_inc(&tun->rx_frame_errors);
kfree_skb(skb);
if (frags) {
tfile->napi.skb = NULL;
@@ -1830,7 +1793,7 @@ drop:
pi.proto = htons(ETH_P_IPV6);
break;
default:
- this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ atomic_long_inc(&tun->dev->rx_dropped);
kfree_skb(skb);
return -EINVAL;
}
@@ -1910,7 +1873,7 @@ drop:
skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) {
- this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ atomic_long_inc(&tun->dev->rx_dropped);
napi_free_frags(&tfile->napi);
rcu_read_unlock();
mutex_unlock(&tfile->napi_mutex);
@@ -1942,12 +1905,9 @@ drop:
}
rcu_read_unlock();
- stats = get_cpu_ptr(tun->pcpu_stats);
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->rx_packets);
- u64_stats_add(&stats->rx_bytes, len);
- u64_stats_update_end(&stats->syncp);
- put_cpu_ptr(stats);
+ preempt_disable();
+ dev_sw_netstats_rx_add(tun->dev, len);
+ preempt_enable();
if (rxhash)
tun_flow_update(tun, rxhash, tfile);
@@ -1982,7 +1942,6 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun,
{
int vnet_hdr_sz = 0;
size_t size = xdp_frame->len;
- struct tun_pcpu_stats *stats;
size_t ret;
if (tun->flags & IFF_VNET_HDR) {
@@ -1999,12 +1958,9 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun,
ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
- stats = get_cpu_ptr(tun->pcpu_stats);
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->tx_packets);
- u64_stats_add(&stats->tx_bytes, ret);
- u64_stats_update_end(&stats->syncp);
- put_cpu_ptr(tun->pcpu_stats);
+ preempt_disable();
+ dev_sw_netstats_tx_add(tun->dev, 1, ret);
+ preempt_enable();
return ret;
}
@@ -2016,7 +1972,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
struct iov_iter *iter)
{
struct tun_pi pi = { 0, skb->protocol };
- struct tun_pcpu_stats *stats;
ssize_t total;
int vlan_offset = 0;
int vlan_hlen = 0;
@@ -2094,12 +2049,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
done:
/* caller is in process context, */
- stats = get_cpu_ptr(tun->pcpu_stats);
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->tx_packets);
- u64_stats_add(&stats->tx_bytes, skb->len + vlan_hlen);
- u64_stats_update_end(&stats->syncp);
- put_cpu_ptr(tun->pcpu_stats);
+ preempt_disable();
+ dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen);
+ preempt_enable();
return total;
}
@@ -2243,11 +2195,11 @@ static void tun_free_netdev(struct net_device *dev)
BUG_ON(!(list_empty(&tun->disabled)));
- free_percpu(tun->pcpu_stats);
- /* We clear pcpu_stats so that tun_set_iff() can tell if
+ free_percpu(dev->tstats);
+ /* We clear tstats so that tun_set_iff() can tell if
* tun_free_netdev() has been called from register_netdevice().
*/
- tun->pcpu_stats = NULL;
+ dev->tstats = NULL;
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
@@ -2378,7 +2330,6 @@ static int tun_xdp_one(struct tun_struct *tun,
unsigned int datasize = xdp->data_end - xdp->data;
struct tun_xdp_hdr *hdr = xdp->data_hard_start;
struct virtio_net_hdr *gso = &hdr->gso;
- struct tun_pcpu_stats *stats;
struct bpf_prog *xdp_prog;
struct sk_buff *skb = NULL;
u32 rxhash = 0, act;
@@ -2436,7 +2387,7 @@ build:
skb_put(skb, xdp->data_end - xdp->data);
if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
- this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
+ atomic_long_inc(&tun->rx_frame_errors);
kfree_skb(skb);
err = -EINVAL;
goto out;
@@ -2459,14 +2410,10 @@ build:
netif_receive_skb(skb);
- /* No need for get_cpu_ptr() here since this function is
+ /* No need to disable preemption here since this function is
* always called with bh disabled
*/
- stats = this_cpu_ptr(tun->pcpu_stats);
- u64_stats_update_begin(&stats->syncp);
- u64_stats_inc(&stats->rx_packets);
- u64_stats_add(&stats->rx_bytes, datasize);
- u64_stats_update_end(&stats->syncp);
+ dev_sw_netstats_rx_add(tun->dev, datasize);
if (rxhash)
tun_flow_update(tun, rxhash, tfile);
@@ -2759,8 +2706,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->rx_batched = 0;
RCU_INIT_POINTER(tun->steering_prog, NULL);
- tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
- if (!tun->pcpu_stats) {
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats) {
err = -ENOMEM;
goto err_free_dev;
}
@@ -2815,16 +2762,16 @@ err_detach:
tun_detach_all(dev);
/* We are here because register_netdevice() has failed.
* If register_netdevice() already called tun_free_netdev()
- * while dealing with the error, tun->pcpu_stats has been cleared.
+ * while dealing with the error, dev->stats has been cleared.
*/
- if (!tun->pcpu_stats)
+ if (!dev->tstats)
goto err_free_dev;
err_free_flow:
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
err_free_stat:
- free_percpu(tun->pcpu_stats);
+ free_percpu(dev->tstats);
err_free_dev:
free_netdev(dev);
return err;
@@ -3132,10 +3079,19 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
"Linktype set failed because interface is up\n");
ret = -EBUSY;
} else {
+ ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
+ tun->dev);
+ ret = notifier_to_errno(ret);
+ if (ret) {
+ netif_info(tun, drv, tun->dev,
+ "Refused to change device type\n");
+ break;
+ }
tun->dev->type = (int) arg;
netif_info(tun, drv, tun->dev, "linktype set to %d\n",
tun->dev->type);
- ret = 0;
+ call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
+ tun->dev);
}
break;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index b46993d5f997..1e3719028780 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -628,4 +628,13 @@ config USB_NET_AQC111
This driver should work with at least the following devices:
* Aquantia AQtion USB to 5GbE
+config USB_RTL8153_ECM
+ tristate "RTL8153 ECM support"
+ depends on USB_NET_CDCETHER && (USB_RTL8152 || USB_RTL8152=n)
+ default y
+ help
+ This option supports ECM mode for RTL8153 ethernet adapter, when
+ CONFIG_USB_RTL8152 is not set, or the RTL8153 device is not
+ supported by r8152 driver.
+
endif # USB_NET_DRIVERS
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 99fd12be2111..4964f7b326fb 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o
obj-$(CONFIG_USB_NET_CH9200) += ch9200.o
obj-$(CONFIG_USB_NET_AQC111) += aqc111.o
+obj-$(CONFIG_USB_RTL8153_ECM) += r8153_ecm.o
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 0717c18015c9..73b97f4cc1ec 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -641,7 +641,7 @@ static const struct net_device_ops aqc111_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = aqc111_change_mtu,
.ndo_set_mac_address = aqc111_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index ef548beba684..6e13d8165852 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -194,7 +194,7 @@ static const struct net_device_ops ax88172_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = asix_ioctl,
@@ -580,7 +580,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = asix_ioctl,
@@ -1050,7 +1050,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = asix_set_multicast,
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index fd3a04d98dc1..b404c9462dce 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -120,7 +120,7 @@ static const struct net_device_ops ax88172a_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = phy_do_ioctl_running,
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 5541f3faedbc..d650b39b6e5d 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1031,7 +1031,7 @@ static const struct net_device_ops ax88179_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = ax88179_change_mtu,
.ndo_set_mac_address = ax88179_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index eb100eb33de3..5db66272fc82 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -98,7 +98,7 @@ static const struct net_device_ops cdc_mbim_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = cdc_ncm_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index e04f588538cc..2bac57d5e8d5 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -793,7 +793,7 @@ static const struct net_device_ops cdc_ncm_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_rx_mode = usbnet_set_rx_mode,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = cdc_ncm_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -1317,7 +1317,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
break;
}
- /* calculate frame number withing this NDP */
+ /* calculate frame number within this NDP */
if (ctx->is_ndp16) {
ndplen = le16_to_cpu(ndp.ndp16->wLength);
index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 915ac75b55fc..b5d2ac55a874 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -343,7 +343,7 @@ static const struct net_device_ops dm9601_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = dm9601_ioctl,
.ndo_set_rx_mode = dm9601_set_multicast,
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index cb5bc1a7fa5a..ed05f992c612 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -133,7 +133,7 @@ static const struct net_device_ops int51x1_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = int51x1_set_multicast,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 65b315bc60ab..bf243edeb064 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -822,20 +822,19 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
int i;
- int ret;
u32 buf;
unsigned long timeout;
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ lan78xx_write_reg(dev, OTP_PWR_DN, 0);
timeout = jiffies + HZ;
do {
usleep_range(1, 10);
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN");
@@ -845,18 +844,18 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
}
for (i = 0; i < length; i++) {
- ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ lan78xx_write_reg(dev, OTP_ADDR1,
((offset + i) >> 8) & OTP_ADDR1_15_11);
- ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ lan78xx_write_reg(dev, OTP_ADDR2,
((offset + i) & OTP_ADDR2_10_3));
- ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
- ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+ lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
timeout = jiffies + HZ;
do {
udelay(1);
- ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ lan78xx_read_reg(dev, OTP_STATUS, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_STATUS");
@@ -864,7 +863,7 @@ static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
}
} while (buf & OTP_STATUS_BUSY_);
- ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+ lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
data[i] = (u8)(buf & 0xFF);
}
@@ -876,20 +875,19 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
u32 length, u8 *data)
{
int i;
- int ret;
u32 buf;
unsigned long timeout;
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (buf & OTP_PWR_DN_PWRDN_N_) {
/* clear it and wait to be cleared */
- ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+ lan78xx_write_reg(dev, OTP_PWR_DN, 0);
timeout = jiffies + HZ;
do {
udelay(1);
- ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+ lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"timeout on OTP_PWR_DN completion");
@@ -899,21 +897,21 @@ static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
}
/* set to BYTE program mode */
- ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
+ lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
for (i = 0; i < length; i++) {
- ret = lan78xx_write_reg(dev, OTP_ADDR1,
+ lan78xx_write_reg(dev, OTP_ADDR1,
((offset + i) >> 8) & OTP_ADDR1_15_11);
- ret = lan78xx_write_reg(dev, OTP_ADDR2,
+ lan78xx_write_reg(dev, OTP_ADDR2,
((offset + i) & OTP_ADDR2_10_3));
- ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
- ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
- ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+ lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
+ lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
+ lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
timeout = jiffies + HZ;
do {
udelay(1);
- ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+ lan78xx_read_reg(dev, OTP_STATUS, &buf);
if (time_after(jiffies, timeout)) {
netdev_warn(dev->net,
"Timeout on OTP_STATUS completion");
@@ -1038,7 +1036,6 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param)
container_of(param, struct lan78xx_priv, set_multicast);
struct lan78xx_net *dev = pdata->dev;
int i;
- int ret;
netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
pdata->rfe_ctl);
@@ -1047,14 +1044,14 @@ static void lan78xx_deferred_multicast_write(struct work_struct *param)
DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
for (i = 1; i < NUM_OF_MAF; i++) {
- ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
- ret = lan78xx_write_reg(dev, MAF_LO(i),
+ lan78xx_write_reg(dev, MAF_HI(i), 0);
+ lan78xx_write_reg(dev, MAF_LO(i),
pdata->pfilter_table[i][1]);
- ret = lan78xx_write_reg(dev, MAF_HI(i),
+ lan78xx_write_reg(dev, MAF_HI(i),
pdata->pfilter_table[i][0]);
}
- ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
}
static void lan78xx_set_multicast(struct net_device *netdev)
@@ -1124,7 +1121,6 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
u16 lcladv, u16 rmtadv)
{
u32 flow = 0, fct_flow = 0;
- int ret;
u8 cap;
if (dev->fc_autoneg)
@@ -1147,10 +1143,10 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
(cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
(cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
- ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
+ lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
/* threshold value should be set before enabling flow */
- ret = lan78xx_write_reg(dev, FLOW, flow);
+ lan78xx_write_reg(dev, FLOW, flow);
return 0;
}
@@ -1663,11 +1659,10 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
static void lan78xx_init_mac_address(struct lan78xx_net *dev)
{
u32 addr_lo, addr_hi;
- int ret;
u8 addr[6];
- ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
- ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+ lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+ lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
addr[0] = addr_lo & 0xFF;
addr[1] = (addr_lo >> 8) & 0xFF;
@@ -1700,12 +1695,12 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
(addr[2] << 16) | (addr[3] << 24);
addr_hi = addr[4] | (addr[5] << 8);
- ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
}
- ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
ether_addr_copy(dev->net->dev_addr, addr);
}
@@ -1838,7 +1833,7 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
static void lan78xx_link_status_change(struct net_device *net)
{
struct phy_device *phydev = net->phydev;
- int ret, temp;
+ int temp;
/* At forced 100 F/H mode, chip may fail to set mode correctly
* when cable is switched between long(~50+m) and short one.
@@ -1849,7 +1844,7 @@ static void lan78xx_link_status_change(struct net_device *net)
/* disable phy interrupt */
temp = phy_read(phydev, LAN88XX_INT_MASK);
temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
- ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+ phy_write(phydev, LAN88XX_INT_MASK, temp);
temp = phy_read(phydev, MII_BMCR);
temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
@@ -1863,7 +1858,7 @@ static void lan78xx_link_status_change(struct net_device *net)
/* enable phy interrupt back */
temp = phy_read(phydev, LAN88XX_INT_MASK);
temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
- ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+ phy_write(phydev, LAN88XX_INT_MASK, temp);
}
}
@@ -1917,14 +1912,13 @@ static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
struct lan78xx_net *dev =
container_of(data, struct lan78xx_net, domain_data);
u32 buf;
- int ret;
/* call register access here because irq_bus_lock & irq_bus_sync_unlock
* are only two callbacks executed in non-atomic contex.
*/
- ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+ lan78xx_read_reg(dev, INT_EP_CTL, &buf);
if (buf != data->irqenable)
- ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
+ lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
mutex_unlock(&data->irq_lock);
}
@@ -1991,7 +1985,6 @@ static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
static int lan8835_fixup(struct phy_device *phydev)
{
int buf;
- int ret;
struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
/* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
@@ -2001,11 +1994,11 @@ static int lan8835_fixup(struct phy_device *phydev)
phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
/* RGMII MAC TXC Delay Enable */
- ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
+ lan78xx_write_reg(dev, MAC_RGMII_ID,
MAC_RGMII_ID_TXC_DELAY_EN_);
/* RGMII TX DLL Tune Adjust */
- ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
+ lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
@@ -2189,28 +2182,27 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
{
- int ret = 0;
u32 buf;
bool rxenabled;
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ lan78xx_read_reg(dev, MAC_RX, &buf);
rxenabled = ((buf & MAC_RX_RXEN_) != 0);
if (rxenabled) {
buf &= ~MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
}
/* add 4 to size for FCS */
buf &= ~MAC_RX_MAX_SIZE_MASK_;
buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
if (rxenabled) {
buf |= MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
}
return 0;
@@ -2267,13 +2259,12 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
int ll_mtu = new_mtu + netdev->hard_header_len;
int old_hard_mtu = dev->hard_mtu;
int old_rx_urb_size = dev->rx_urb_size;
- int ret;
/* no second zero-length packet read wanted after mtu-sized packets */
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
- ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
+ lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
netdev->mtu = new_mtu;
@@ -2296,7 +2287,6 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
struct lan78xx_net *dev = netdev_priv(netdev);
struct sockaddr *addr = p;
u32 addr_lo, addr_hi;
- int ret;
if (netif_running(netdev))
return -EBUSY;
@@ -2313,12 +2303,12 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
addr_hi = netdev->dev_addr[4] |
netdev->dev_addr[5] << 8;
- ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
- ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+ lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
/* Added to support MAC address changes */
- ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
- ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+ lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
return 0;
}
@@ -2330,7 +2320,6 @@ static int lan78xx_set_features(struct net_device *netdev,
struct lan78xx_net *dev = netdev_priv(netdev);
struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
unsigned long flags;
- int ret;
spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
@@ -2354,7 +2343,7 @@ static int lan78xx_set_features(struct net_device *netdev,
spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
- ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
return 0;
}
@@ -3804,7 +3793,6 @@ static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
{
u32 buf;
- int ret;
int mask_index;
u16 crc;
u32 temp_wucsr;
@@ -3813,26 +3801,26 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
const u8 ipv6_multicast[3] = { 0x33, 0x33 };
const u8 arp_type[2] = { 0x08, 0x06 };
- ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+ lan78xx_read_reg(dev, MAC_TX, &buf);
buf &= ~MAC_TX_TXEN_;
- ret = lan78xx_write_reg(dev, MAC_TX, buf);
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ lan78xx_write_reg(dev, MAC_TX, buf);
+ lan78xx_read_reg(dev, MAC_RX, &buf);
buf &= ~MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
- ret = lan78xx_write_reg(dev, WUCSR, 0);
- ret = lan78xx_write_reg(dev, WUCSR2, 0);
- ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+ lan78xx_write_reg(dev, WUCSR, 0);
+ lan78xx_write_reg(dev, WUCSR2, 0);
+ lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
temp_wucsr = 0;
temp_pmt_ctl = 0;
- ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
+ lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
mask_index = 0;
if (wol & WAKE_PHY) {
@@ -3861,30 +3849,30 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+ lan78xx_write_reg(dev, WUF_CFG(mask_index),
WUF_CFGX_EN_ |
WUF_CFGX_TYPE_MCAST_ |
(0 << WUF_CFGX_OFFSET_SHIFT_) |
(crc & WUF_CFGX_CRC16_MASK_));
- ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
- ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
+ lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
mask_index++;
/* for IPv6 Multicast */
crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+ lan78xx_write_reg(dev, WUF_CFG(mask_index),
WUF_CFGX_EN_ |
WUF_CFGX_TYPE_MCAST_ |
(0 << WUF_CFGX_OFFSET_SHIFT_) |
(crc & WUF_CFGX_CRC16_MASK_));
- ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
- ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
+ lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
mask_index++;
temp_pmt_ctl |= PMT_CTL_WOL_EN_;
@@ -3905,16 +3893,16 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
* for packettype (offset 12,13) = ARP (0x0806)
*/
crc = lan78xx_wakeframe_crc16(arp_type, 2);
- ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+ lan78xx_write_reg(dev, WUF_CFG(mask_index),
WUF_CFGX_EN_ |
WUF_CFGX_TYPE_ALL_ |
(0 << WUF_CFGX_OFFSET_SHIFT_) |
(crc & WUF_CFGX_CRC16_MASK_));
- ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
- ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
- ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
+ lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+ lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
mask_index++;
temp_pmt_ctl |= PMT_CTL_WOL_EN_;
@@ -3922,7 +3910,7 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
}
- ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
+ lan78xx_write_reg(dev, WUCSR, temp_wucsr);
/* when multiple WOL bits are set */
if (hweight_long((unsigned long)wol) > 1) {
@@ -3930,16 +3918,16 @@ static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
}
- ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
+ lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
/* clear WUPS */
- ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+ lan78xx_read_reg(dev, PMT_CTL, &buf);
buf |= PMT_CTL_WUPS_MASK_;
- ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+ lan78xx_write_reg(dev, PMT_CTL, buf);
- ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+ lan78xx_read_reg(dev, MAC_RX, &buf);
buf |= MAC_RX_RXEN_;
- ret = lan78xx_write_reg(dev, MAC_RX, buf);
+ lan78xx_write_reg(dev, MAC_RX, buf);
return 0;
}
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 09bfa6a4dfbc..fc512b780d15 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -462,7 +462,7 @@ static const struct net_device_ops mcs7830_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mcs7830_ioctl,
.ndo_set_rx_mode = mcs7830_set_multicast,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index fc378ff56775..d166c321ee9b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -72,7 +72,6 @@ struct qmimux_hdr {
struct qmimux_priv {
struct net_device *real_dev;
u8 mux_id;
- struct pcpu_sw_netstats __percpu *stats64;
};
static int qmimux_open(struct net_device *dev)
@@ -108,34 +107,19 @@ static netdev_tx_t qmimux_start_xmit(struct sk_buff *skb, struct net_device *dev
skb->dev = priv->real_dev;
ret = dev_queue_xmit(skb);
- if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- struct pcpu_sw_netstats *stats64 = this_cpu_ptr(priv->stats64);
-
- u64_stats_update_begin(&stats64->syncp);
- stats64->tx_packets++;
- stats64->tx_bytes += len;
- u64_stats_update_end(&stats64->syncp);
- } else {
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN))
+ dev_sw_netstats_tx_add(dev, 1, len);
+ else
dev->stats.tx_dropped++;
- }
return ret;
}
-static void qmimux_get_stats64(struct net_device *net,
- struct rtnl_link_stats64 *stats)
-{
- struct qmimux_priv *priv = netdev_priv(net);
-
- netdev_stats_to_stats64(stats, &net->stats);
- dev_fetch_sw_netstats(stats, priv->stats64);
-}
-
static const struct net_device_ops qmimux_netdev_ops = {
.ndo_open = qmimux_open,
.ndo_stop = qmimux_stop,
.ndo_start_xmit = qmimux_start_xmit,
- .ndo_get_stats64 = qmimux_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
};
static void qmimux_setup(struct net_device *dev)
@@ -224,14 +208,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
net->stats.rx_errors++;
return 0;
} else {
- struct pcpu_sw_netstats *stats64;
- struct qmimux_priv *priv = netdev_priv(net);
-
- stats64 = this_cpu_ptr(priv->stats64);
- u64_stats_update_begin(&stats64->syncp);
- stats64->rx_packets++;
- stats64->rx_bytes += pkt_len;
- u64_stats_update_end(&stats64->syncp);
+ dev_sw_netstats_rx_add(net, pkt_len);
}
skip:
@@ -256,8 +233,8 @@ static int qmimux_register_device(struct net_device *real_dev, u8 mux_id)
priv->mux_id = mux_id;
priv->real_dev = real_dev;
- priv->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!priv->stats64) {
+ new_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!new_dev->tstats) {
err = -ENOBUFS;
goto out_free_newdev;
}
@@ -292,7 +269,7 @@ static void qmimux_unregister_device(struct net_device *dev,
struct qmimux_priv *priv = netdev_priv(dev);
struct net_device *real_dev = priv->real_dev;
- free_percpu(priv->stats64);
+ free_percpu(dev->tstats);
netdev_upper_dev_unlink(real_dev, dev);
unregister_netdevice_queue(dev, head);
@@ -598,7 +575,7 @@ static const struct net_device_ops qmi_wwan_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = qmi_wwan_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index b1770489aca5..c448d6089821 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -26,6 +26,7 @@
#include <linux/acpi.h>
#include <linux/firmware.h>
#include <crypto/hash.h>
+#include <linux/usb/r8152.h>
/* Information for net-next */
#define NETNEXT_VERSION "11"
@@ -653,18 +654,6 @@ enum rtl_register_content {
#define INTR_LINK 0x0004
-#define RTL8152_REQT_READ 0xc0
-#define RTL8152_REQT_WRITE 0x40
-#define RTL8152_REQ_GET_REGS 0x05
-#define RTL8152_REQ_SET_REGS 0x05
-
-#define BYTE_EN_DWORD 0xff
-#define BYTE_EN_WORD 0x33
-#define BYTE_EN_BYTE 0x11
-#define BYTE_EN_SIX_BYTES 0x3f
-#define BYTE_EN_START_MASK 0x0f
-#define BYTE_EN_END_MASK 0xf0
-
#define RTL8153_MAX_PACKET 9216 /* 9K */
#define RTL8153_MAX_MTU (RTL8153_MAX_PACKET - VLAN_ETH_HLEN - \
ETH_FCS_LEN)
@@ -689,21 +678,9 @@ enum rtl8152_flags {
LENOVO_MACPASSTHRU,
};
-/* Define these values to match your device */
-#define VENDOR_ID_REALTEK 0x0bda
-#define VENDOR_ID_MICROSOFT 0x045e
-#define VENDOR_ID_SAMSUNG 0x04e8
-#define VENDOR_ID_LENOVO 0x17ef
-#define VENDOR_ID_LINKSYS 0x13b1
-#define VENDOR_ID_NVIDIA 0x0955
-#define VENDOR_ID_TPLINK 0x2357
-
#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
-#define MCU_TYPE_PLA 0x0100
-#define MCU_TYPE_USB 0x0000
-
struct tally_counter {
__le64 tx_packets;
__le64 rx_packets;
@@ -898,6 +875,7 @@ struct fw_header {
* struct fw_mac - a firmware block used by RTL_FW_PLA and RTL_FW_USB.
* The layout of the firmware block is:
* <struct fw_mac> + <info> + <firmware data>.
+ * @blk_hdr: firmware descriptor (type, length)
* @fw_offset: offset of the firmware binary data. The start address of
* the data would be the address of struct fw_mac + @fw_offset.
* @fw_reg: the register to load the firmware. Depends on chip.
@@ -911,6 +889,7 @@ struct fw_header {
* @bp_num: the break point number which needs to be set for this firmware.
* Depends on the firmware.
* @bp: break points. Depends on firmware.
+ * @reserved: reserved space (unused)
* @fw_ver_reg: the register to store the fw version.
* @fw_ver_data: the firmware version of the current type.
* @info: additional information for debugging, and is followed by the
@@ -936,8 +915,10 @@ struct fw_mac {
/**
* struct fw_phy_patch_key - a firmware block used by RTL_FW_PHY_START.
* This is used to set patch key when loading the firmware of PHY.
+ * @blk_hdr: firmware descriptor (type, length)
* @key_reg: the register to write the patch key.
* @key_data: patch key.
+ * @reserved: reserved space (unused)
*/
struct fw_phy_patch_key {
struct fw_block blk_hdr;
@@ -950,6 +931,7 @@ struct fw_phy_patch_key {
* struct fw_phy_nc - a firmware block used by RTL_FW_PHY_NC.
* The layout of the firmware block is:
* <struct fw_phy_nc> + <info> + <firmware data>.
+ * @blk_hdr: firmware descriptor (type, length)
* @fw_offset: offset of the firmware binary data. The start address of
* the data would be the address of struct fw_phy_nc + @fw_offset.
* @fw_reg: the register to load the firmware. Depends on chip.
@@ -958,8 +940,9 @@ struct fw_phy_patch_key {
* @patch_en_addr: the register of enabling patch mode. Depends on chip.
* @patch_en_value: patch mode enabled mask. Depends on the firmware.
* @mode_reg: the regitster of switching the mode.
- * @mod_pre: the mode needing to be set before loading the firmware.
- * @mod_post: the mode to be set when finishing to load the firmware.
+ * @mode_pre: the mode needing to be set before loading the firmware.
+ * @mode_post: the mode to be set when finishing to load the firmware.
+ * @reserved: reserved space (unused)
* @bp_start: the start register of break points. Depends on chip.
* @bp_num: the break point number which needs to be set for this firmware.
* Depends on the firmware.
@@ -6615,7 +6598,7 @@ static int rtl_fw_init(struct r8152 *tp)
return 0;
}
-static u8 rtl_get_version(struct usb_interface *intf)
+u8 rtl8152_get_version(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
u32 ocp_data = 0;
@@ -6673,12 +6656,13 @@ static u8 rtl_get_version(struct usb_interface *intf)
return version;
}
+EXPORT_SYMBOL_GPL(rtl8152_get_version);
static int rtl8152_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
- u8 version = rtl_get_version(intf);
+ u8 version = rtl8152_get_version(intf);
struct r8152 *tp;
struct net_device *netdev;
int ret;
diff --git a/drivers/net/usb/r8153_ecm.c b/drivers/net/usb/r8153_ecm.c
new file mode 100644
index 000000000000..2c3fabd38b16
--- /dev/null
+++ b/drivers/net/usb/r8153_ecm.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/r8152.h>
+
+#define OCP_BASE 0xe86c
+
+static int pla_read_word(struct usbnet *dev, u16 index)
+{
+ u16 byen = BYTE_EN_WORD;
+ u8 shift = index & 2;
+ __le32 tmp;
+ int ret;
+
+ if (shift)
+ byen <<= shift;
+
+ index &= ~3;
+
+ ret = usbnet_read_cmd(dev, RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, index,
+ MCU_TYPE_PLA | byen, &tmp, sizeof(tmp));
+ if (ret < 0)
+ goto out;
+
+ ret = __le32_to_cpu(tmp);
+ ret >>= (shift * 8);
+ ret &= 0xffff;
+
+out:
+ return ret;
+}
+
+static int pla_write_word(struct usbnet *dev, u16 index, u32 data)
+{
+ u32 mask = 0xffff;
+ u16 byen = BYTE_EN_WORD;
+ u8 shift = index & 2;
+ __le32 tmp;
+ int ret;
+
+ data &= mask;
+
+ if (shift) {
+ byen <<= shift;
+ mask <<= (shift * 8);
+ data <<= (shift * 8);
+ }
+
+ index &= ~3;
+
+ ret = usbnet_read_cmd(dev, RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, index,
+ MCU_TYPE_PLA | byen, &tmp, sizeof(tmp));
+
+ if (ret < 0)
+ goto out;
+
+ data |= __le32_to_cpu(tmp) & ~mask;
+ tmp = __cpu_to_le32(data);
+
+ ret = usbnet_write_cmd(dev, RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE, index,
+ MCU_TYPE_PLA | byen, &tmp, sizeof(tmp));
+
+out:
+ return ret;
+}
+
+static int r8153_ecm_mdio_read(struct net_device *netdev, int phy_id, int reg)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ int ret;
+
+ ret = pla_write_word(dev, OCP_BASE, 0xa000);
+ if (ret < 0)
+ goto out;
+
+ ret = pla_read_word(dev, 0xb400 + reg * 2);
+
+out:
+ return ret;
+}
+
+static void r8153_ecm_mdio_write(struct net_device *netdev, int phy_id, int reg, int val)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ int ret;
+
+ ret = pla_write_word(dev, OCP_BASE, 0xa000);
+ if (ret < 0)
+ return;
+
+ ret = pla_write_word(dev, 0xb400 + reg * 2, val);
+}
+
+static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int status;
+
+ status = usbnet_cdc_bind(dev, intf);
+ if (status < 0)
+ return status;
+
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = r8153_ecm_mdio_read;
+ dev->mii.mdio_write = r8153_ecm_mdio_write;
+ dev->mii.reg_num_mask = 0x1f;
+ dev->mii.supports_gmii = 1;
+
+ return status;
+}
+
+static const struct driver_info r8153_info = {
+ .description = "RTL8153 ECM Device",
+ .flags = FLAG_ETHER,
+ .bind = r8153_bind,
+ .unbind = usbnet_cdc_unbind,
+ .status = usbnet_cdc_status,
+ .manage_power = usbnet_manage_power,
+};
+
+static const struct usb_device_id products[] = {
+{
+ USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_REALTEK, 0x8153, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&r8153_info,
+},
+
+ { }, /* END */
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static int rtl8153_ecm_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+#if IS_REACHABLE(CONFIG_USB_RTL8152)
+ if (rtl8152_get_version(intf))
+ return -ENODEV;
+#endif
+
+ return usbnet_probe(intf, id);
+}
+
+static struct usb_driver r8153_ecm_driver = {
+ .name = "r8153_ecm",
+ .id_table = products,
+ .probe = rtl8153_ecm_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .reset_resume = usbnet_resume,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(r8153_ecm_driver);
+
+MODULE_AUTHOR("Hayes Wang");
+MODULE_DESCRIPTION("Realtek USB ECM device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 6fa7a009a24a..6609d21ef894 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -279,7 +279,7 @@ static const struct net_device_ops rndis_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 0abd257b634c..55a244eca5ca 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -184,7 +184,7 @@ static const struct net_device_ops sierra_net_device_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 8689835a5214..4353b370249f 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1435,7 +1435,7 @@ static const struct net_device_ops smsc75xx_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = smsc75xx_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index ea0d5f04dc3a..4c8ee1cff4d4 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1041,7 +1041,7 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = smsc95xx_ioctl,
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index e04c8054c2cf..878557ad03ad 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -308,7 +308,7 @@ static const struct net_device_ops sr9700_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = sr9700_ioctl,
.ndo_set_rx_mode = sr9700_set_multicast,
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 681e0def6356..da56735d7755 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -681,7 +681,7 @@ static const struct net_device_ops sr9800_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = sr_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = sr_ioctl,
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 6062dc27870e..1447da1d5729 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -304,7 +304,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev)
*/
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
- struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
+ struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats);
unsigned long flags;
int status;
@@ -980,15 +980,6 @@ int usbnet_set_link_ksettings(struct net_device *net,
}
EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings);
-void usbnet_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats)
-{
- struct usbnet *dev = netdev_priv(net);
-
- netdev_stats_to_stats64(stats, &net->stats);
- dev_fetch_sw_netstats(stats, dev->stats64);
-}
-EXPORT_SYMBOL_GPL(usbnet_get_stats64);
-
u32 usbnet_get_link (struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
@@ -1220,7 +1211,7 @@ static void tx_complete (struct urb *urb)
struct usbnet *dev = entry->dev;
if (urb->status == 0) {
- struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
+ struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats);
unsigned long flags;
flags = u64_stats_update_begin_irqsave(&stats64->syncp);
@@ -1596,7 +1587,7 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_free_urb(dev->interrupt);
kfree(dev->padding_pkt);
- free_percpu(dev->stats64);
+ free_percpu(net->tstats);
free_netdev(net);
}
EXPORT_SYMBOL_GPL(usbnet_disconnect);
@@ -1608,7 +1599,7 @@ static const struct net_device_ops usbnet_netdev_ops = {
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_rx_mode = usbnet_set_rx_mode,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1671,8 +1662,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->driver_info = info;
dev->driver_name = name;
- dev->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->stats64)
+ net->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!net->tstats)
goto out0;
dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
@@ -1812,7 +1803,7 @@ out1:
*/
cancel_work_sync(&dev->kevent);
del_timer_sync(&dev->delay);
- free_percpu(dev->stats64);
+ free_percpu(net->tstats);
out0:
free_netdev(net);
out:
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 8c737668008a..359d3ab33c4d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1329,7 +1329,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
}
if (ifmp && tbp[IFLA_IFNAME]) {
- nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
+ nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
name_assign_type = NET_NAME_USER;
} else {
snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
@@ -1379,7 +1379,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
eth_hw_addr_random(dev);
if (tb[IFLA_IFNAME])
- nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
+ nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
else
snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index f2793ffde191..f8d711a84763 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -9,6 +9,7 @@
* Based on dummy, team and ipvlan drivers
*/
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index e8563acf98e8..b1bb1b04b664 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/if_arp.h>
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 977f77e2c2ce..e1e44d68ac4b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -66,6 +66,7 @@ struct vxlan_net {
struct list_head vxlan_list;
struct hlist_head sock_list[PORT_HASH_SIZE];
spinlock_t sock_lock;
+ struct notifier_block nexthop_notifier_block;
};
/* Forwarding table entry */
@@ -3210,7 +3211,7 @@ static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_open = vxlan_open,
.ndo_stop = vxlan_stop,
.ndo_start_xmit = vxlan_xmit,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_rx_mode = vxlan_set_multicast_list,
.ndo_change_mtu = vxlan_change_mtu,
.ndo_validate_addr = eth_validate_addr,
@@ -3229,7 +3230,7 @@ static const struct net_device_ops vxlan_netdev_raw_ops = {
.ndo_open = vxlan_open,
.ndo_stop = vxlan_stop,
.ndo_start_xmit = vxlan_xmit,
- .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_change_mtu = vxlan_change_mtu,
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
};
@@ -4688,9 +4689,14 @@ static void vxlan_fdb_nh_flush(struct nexthop *nh)
static int vxlan_nexthop_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct nexthop *nh = ptr;
+ struct nh_notifier_info *info = ptr;
+ struct nexthop *nh;
+
+ if (event != NEXTHOP_EVENT_DEL)
+ return NOTIFY_DONE;
- if (!nh || event != NEXTHOP_EVENT_DEL)
+ nh = nexthop_find_by_id(info->net, info->id);
+ if (!nh)
return NOTIFY_DONE;
vxlan_fdb_nh_flush(nh);
@@ -4698,10 +4704,6 @@ static int vxlan_nexthop_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static struct notifier_block vxlan_nexthop_notifier_block __read_mostly = {
- .notifier_call = vxlan_nexthop_event,
-};
-
static __net_init int vxlan_init_net(struct net *net)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
@@ -4709,11 +4711,13 @@ static __net_init int vxlan_init_net(struct net *net)
INIT_LIST_HEAD(&vn->vxlan_list);
spin_lock_init(&vn->sock_lock);
+ vn->nexthop_notifier_block.notifier_call = vxlan_nexthop_event;
for (h = 0; h < PORT_HASH_SIZE; ++h)
INIT_HLIST_HEAD(&vn->sock_list[h]);
- return register_nexthop_notifier(net, &vxlan_nexthop_notifier_block);
+ return register_nexthop_notifier(net, &vn->nexthop_notifier_block,
+ NULL);
}
static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
@@ -4745,8 +4749,11 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
LIST_HEAD(list);
rtnl_lock();
- list_for_each_entry(net, net_list, exit_list)
- unregister_nexthop_notifier(net, &vxlan_nexthop_notifier_block);
+ list_for_each_entry(net, net_list, exit_list) {
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
+ unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
+ }
list_for_each_entry(net, net_list, exit_list)
vxlan_destroy_tunnels(net, &list);
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 39e5ab261d7c..4029fde71a9e 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -321,51 +321,6 @@ config IXP4XX_HSS
Say Y here if you want to use built-in HSS ports
on IXP4xx processor.
-config DLCI
- tristate "Frame Relay DLCI support"
- help
- Support for the Frame Relay protocol.
-
- Frame Relay is a fast low-cost way to connect to a remote Internet
- access provider or to form a private wide area network. The one
- physical line from your box to the local "switch" (i.e. the entry
- point to the Frame Relay network, usually at the phone company) can
- carry several logical point-to-point connections to other computers
- connected to the Frame Relay network. For a general explanation of
- the protocol, check out <http://www.mplsforum.org/>.
-
- To use frame relay, you need supporting hardware (called FRAD) and
- certain programs from the net-tools package as explained in
- <file:Documentation/networking/framerelay.rst>.
-
- To compile this driver as a module, choose M here: the
- module will be called dlci.
-
-config DLCI_MAX
- int "Max DLCI per device"
- depends on DLCI
- default "8"
- help
- How many logical point-to-point frame relay connections (the
- identifiers of which are called DCLIs) should be handled by each
- of your hardware frame relay access devices.
-
- Go with the default.
-
-config SDLA
- tristate "SDLA (Sangoma S502/S508) support"
- depends on DLCI && ISA
- help
- Driver for the Sangoma S502A, S502E, and S508 Frame Relay Access
- Devices.
-
- These are multi-protocol cards, but only Frame Relay is supported
- by the driver at this time. Please read
- <file:Documentation/networking/framerelay.rst>.
-
- To compile this driver as a module, choose M here: the
- module will be called sdla.
-
# X.25 network drivers
config LAPBETHER
tristate "LAPB over Ethernet driver"
@@ -383,21 +338,6 @@ config LAPBETHER
If unsure, say N.
-config X25_ASY
- tristate "X.25 async driver"
- depends on LAPB && X25 && TTY
- help
- Send and receive X.25 frames over regular asynchronous serial
- lines such as telephone lines equipped with ordinary modems.
-
- Experts should note that this driver doesn't currently comply with
- the asynchronous HDLS framing protocols in CCITT recommendation X.25.
-
- To compile this driver as a module, choose M here: the
- module will be called x25_asy.
-
- If unsure, say N.
-
config SBNI
tristate "Granch SBNI12 Leased Line adapter support"
depends on X86
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 380271a011e4..081666c36ca2 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -18,12 +18,9 @@ obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o
obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o
obj-$(CONFIG_COSA) += cosa.o
obj-$(CONFIG_FARSYNC) += farsync.o
-obj-$(CONFIG_X25_ASY) += x25_asy.o
obj-$(CONFIG_LANMEDIA) += lmc/
-obj-$(CONFIG_DLCI) += dlci.o
-obj-$(CONFIG_SDLA) += sdla.o
obj-$(CONFIG_LAPBETHER) += lapbether.o
obj-$(CONFIG_SBNI) += sbni.o
obj-$(CONFIG_N2) += n2.o
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
deleted file mode 100644
index 3ca4daf63389..000000000000
--- a/drivers/net/wan/dlci.c
+++ /dev/null
@@ -1,541 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * DLCI Implementation of Frame Relay protocol for Linux, according to
- * RFC 1490. This generic device provides en/decapsulation for an
- * underlying hardware driver. Routes & IPs are assigned to these
- * interfaces. Requires 'dlcicfg' program to create usable
- * interfaces, the initial one, 'dlci' is for IOCTL use only.
- *
- * Version: @(#)dlci.c 0.35 4 Jan 1997
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- *
- * 0.15 Mike Mclagan Packet freeing, bug in kmalloc call
- * DLCI_RET handling
- * 0.20 Mike McLagan More conservative on which packets
- * are returned for retry and which are
- * are dropped. If DLCI_RET_DROP is
- * returned from the FRAD, the packet is
- * sent back to Linux for re-transmission
- * 0.25 Mike McLagan Converted to use SIOC IOCTL calls
- * 0.30 Jim Freeman Fixed to allow IPX traffic
- * 0.35 Michael Elizabeth Fixed incorrect memcpy_fromfs
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/if_frad.h>
-#include <linux/bitops.h>
-
-#include <net/sock.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/uaccess.h>
-
-static const char version[] = "DLCI driver v0.35, 4 Jan 1997, mike.mclagan@linux.org";
-
-static LIST_HEAD(dlci_devs);
-
-static void dlci_setup(struct net_device *);
-
-/*
- * these encapsulate the RFC 1490 requirements as well as
- * deal with packet transmission and reception, working with
- * the upper network layers
- */
-
-static int dlci_header(struct sk_buff *skb, struct net_device *dev,
- unsigned short type, const void *daddr,
- const void *saddr, unsigned len)
-{
- struct frhdr hdr;
- unsigned int hlen;
- char *dest;
-
- hdr.control = FRAD_I_UI;
- switch (type)
- {
- case ETH_P_IP:
- hdr.IP_NLPID = FRAD_P_IP;
- hlen = sizeof(hdr.control) + sizeof(hdr.IP_NLPID);
- break;
-
- /* feel free to add other types, if necessary */
-
- default:
- hdr.pad = FRAD_P_PADDING;
- hdr.NLPID = FRAD_P_SNAP;
- memset(hdr.OUI, 0, sizeof(hdr.OUI));
- hdr.PID = htons(type);
- hlen = sizeof(hdr);
- break;
- }
-
- dest = skb_push(skb, hlen);
- if (!dest)
- return 0;
-
- memcpy(dest, &hdr, hlen);
-
- return hlen;
-}
-
-static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
-{
- struct frhdr *hdr;
- int process, header;
-
- if (!pskb_may_pull(skb, sizeof(*hdr))) {
- netdev_notice(dev, "invalid data no header\n");
- dev->stats.rx_errors++;
- kfree_skb(skb);
- return;
- }
-
- hdr = (struct frhdr *) skb->data;
- process = 0;
- header = 0;
- skb->dev = dev;
-
- if (hdr->control != FRAD_I_UI)
- {
- netdev_notice(dev, "Invalid header flag 0x%02X\n",
- hdr->control);
- dev->stats.rx_errors++;
- }
- else
- switch (hdr->IP_NLPID)
- {
- case FRAD_P_PADDING:
- if (hdr->NLPID != FRAD_P_SNAP)
- {
- netdev_notice(dev, "Unsupported NLPID 0x%02X\n",
- hdr->NLPID);
- dev->stats.rx_errors++;
- break;
- }
-
- if (hdr->OUI[0] + hdr->OUI[1] + hdr->OUI[2] != 0)
- {
- netdev_notice(dev, "Unsupported organizationally unique identifier 0x%02X-%02X-%02X\n",
- hdr->OUI[0],
- hdr->OUI[1],
- hdr->OUI[2]);
- dev->stats.rx_errors++;
- break;
- }
-
- /* at this point, it's an EtherType frame */
- header = sizeof(struct frhdr);
- /* Already in network order ! */
- skb->protocol = hdr->PID;
- process = 1;
- break;
-
- case FRAD_P_IP:
- header = sizeof(hdr->control) + sizeof(hdr->IP_NLPID);
- skb->protocol = htons(ETH_P_IP);
- process = 1;
- break;
-
- case FRAD_P_SNAP:
- case FRAD_P_Q933:
- case FRAD_P_CLNP:
- netdev_notice(dev, "Unsupported NLPID 0x%02X\n",
- hdr->pad);
- dev->stats.rx_errors++;
- break;
-
- default:
- netdev_notice(dev, "Invalid pad byte 0x%02X\n",
- hdr->pad);
- dev->stats.rx_errors++;
- break;
- }
-
- if (process)
- {
- /* we've set up the protocol, so discard the header */
- skb_reset_mac_header(skb);
- skb_pull(skb, header);
- dev->stats.rx_bytes += skb->len;
- netif_rx(skb);
- dev->stats.rx_packets++;
- }
- else
- dev_kfree_skb(skb);
-}
-
-static netdev_tx_t dlci_transmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct dlci_local *dlp = netdev_priv(dev);
-
- if (skb) {
- struct netdev_queue *txq = skb_get_tx_queue(dev, skb);
- netdev_start_xmit(skb, dlp->slave, txq, false);
- }
- return NETDEV_TX_OK;
-}
-
-static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, int get)
-{
- struct dlci_conf config;
- struct dlci_local *dlp;
- struct frad_local *flp;
- int err;
-
- dlp = netdev_priv(dev);
-
- flp = netdev_priv(dlp->slave);
-
- if (!get)
- {
- if (copy_from_user(&config, conf, sizeof(struct dlci_conf)))
- return -EFAULT;
- if (config.flags & ~DLCI_VALID_FLAGS)
- return -EINVAL;
- memcpy(&dlp->config, &config, sizeof(struct dlci_conf));
- dlp->configured = 1;
- }
-
- err = (*flp->dlci_conf)(dlp->slave, dev, get);
- if (err)
- return err;
-
- if (get)
- {
- if (copy_to_user(conf, &dlp->config, sizeof(struct dlci_conf)))
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct dlci_local *dlp;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- dlp = netdev_priv(dev);
-
- switch (cmd)
- {
- case DLCI_GET_SLAVE:
- if (!*(short *)(dev->dev_addr))
- return -EINVAL;
-
- strncpy(ifr->ifr_slave, dlp->slave->name, sizeof(ifr->ifr_slave));
- break;
-
- case DLCI_GET_CONF:
- case DLCI_SET_CONF:
- if (!*(short *)(dev->dev_addr))
- return -EINVAL;
-
- return dlci_config(dev, ifr->ifr_data, cmd == DLCI_GET_CONF);
-
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static int dlci_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct dlci_local *dlp = netdev_priv(dev);
-
- return dev_set_mtu(dlp->slave, new_mtu);
-}
-
-static int dlci_open(struct net_device *dev)
-{
- struct dlci_local *dlp;
- struct frad_local *flp;
- int err;
-
- dlp = netdev_priv(dev);
-
- if (!*(short *)(dev->dev_addr))
- return -EINVAL;
-
- if (!netif_running(dlp->slave))
- return -ENOTCONN;
-
- flp = netdev_priv(dlp->slave);
- err = (*flp->activate)(dlp->slave, dev);
- if (err)
- return err;
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-static int dlci_close(struct net_device *dev)
-{
- struct dlci_local *dlp;
- struct frad_local *flp;
-
- netif_stop_queue(dev);
-
- dlp = netdev_priv(dev);
-
- flp = netdev_priv(dlp->slave);
- (*flp->deactivate)(dlp->slave, dev);
-
- return 0;
-}
-
-static int dlci_add(struct dlci_add *dlci)
-{
- struct net_device *master, *slave;
- struct dlci_local *dlp;
- struct frad_local *flp;
- int err = -EINVAL;
-
-
- /* validate slave device */
- slave = dev_get_by_name(&init_net, dlci->devname);
- if (!slave)
- return -ENODEV;
-
- if (slave->type != ARPHRD_FRAD || netdev_priv(slave) == NULL)
- goto err1;
-
- /* create device name */
- master = alloc_netdev(sizeof(struct dlci_local), "dlci%d",
- NET_NAME_UNKNOWN, dlci_setup);
- if (!master) {
- err = -ENOMEM;
- goto err1;
- }
-
- /* make sure same slave not already registered */
- rtnl_lock();
- list_for_each_entry(dlp, &dlci_devs, list) {
- if (dlp->slave == slave) {
- err = -EBUSY;
- goto err2;
- }
- }
-
- *(short *)(master->dev_addr) = dlci->dlci;
-
- dlp = netdev_priv(master);
- dlp->slave = slave;
- dlp->master = master;
-
- flp = netdev_priv(slave);
- err = (*flp->assoc)(slave, master);
- if (err < 0)
- goto err2;
-
- err = register_netdevice(master);
- if (err < 0)
- goto err2;
-
- strcpy(dlci->devname, master->name);
-
- list_add(&dlp->list, &dlci_devs);
- rtnl_unlock();
-
- return 0;
-
- err2:
- rtnl_unlock();
- free_netdev(master);
- err1:
- dev_put(slave);
- return err;
-}
-
-static int dlci_del(struct dlci_add *dlci)
-{
- struct dlci_local *dlp;
- struct frad_local *flp;
- struct net_device *master, *slave;
- int err;
- bool found = false;
-
- rtnl_lock();
-
- /* validate slave device */
- master = __dev_get_by_name(&init_net, dlci->devname);
- if (!master) {
- err = -ENODEV;
- goto out;
- }
-
- list_for_each_entry(dlp, &dlci_devs, list) {
- if (dlp->master == master) {
- found = true;
- break;
- }
- }
- if (!found) {
- err = -ENODEV;
- goto out;
- }
-
- if (netif_running(master)) {
- err = -EBUSY;
- goto out;
- }
-
- dlp = netdev_priv(master);
- slave = dlp->slave;
- flp = netdev_priv(slave);
-
- err = (*flp->deassoc)(slave, master);
- if (!err) {
- list_del(&dlp->list);
-
- unregister_netdevice(master);
-
- dev_put(slave);
- }
-out:
- rtnl_unlock();
- return err;
-}
-
-static int dlci_ioctl(unsigned int cmd, void __user *arg)
-{
- struct dlci_add add;
- int err;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (copy_from_user(&add, arg, sizeof(struct dlci_add)))
- return -EFAULT;
-
- switch (cmd)
- {
- case SIOCADDDLCI:
- err = dlci_add(&add);
-
- if (!err)
- if (copy_to_user(arg, &add, sizeof(struct dlci_add)))
- return -EFAULT;
- break;
-
- case SIOCDELDLCI:
- err = dlci_del(&add);
- break;
-
- default:
- err = -EINVAL;
- }
-
- return err;
-}
-
-static const struct header_ops dlci_header_ops = {
- .create = dlci_header,
-};
-
-static const struct net_device_ops dlci_netdev_ops = {
- .ndo_open = dlci_open,
- .ndo_stop = dlci_close,
- .ndo_do_ioctl = dlci_dev_ioctl,
- .ndo_start_xmit = dlci_transmit,
- .ndo_change_mtu = dlci_change_mtu,
-};
-
-static void dlci_setup(struct net_device *dev)
-{
- struct dlci_local *dlp = netdev_priv(dev);
-
- dev->flags = 0;
- dev->header_ops = &dlci_header_ops;
- dev->netdev_ops = &dlci_netdev_ops;
- dev->needs_free_netdev = true;
-
- dlp->receive = dlci_receive;
-
- dev->type = ARPHRD_DLCI;
- dev->hard_header_len = sizeof(struct frhdr);
- dev->addr_len = sizeof(short);
-
-}
-
-/* if slave is unregistering, then cleanup master */
-static int dlci_dev_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
-{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-
- if (dev_net(dev) != &init_net)
- return NOTIFY_DONE;
-
- if (event == NETDEV_UNREGISTER) {
- struct dlci_local *dlp;
-
- list_for_each_entry(dlp, &dlci_devs, list) {
- if (dlp->slave == dev) {
- list_del(&dlp->list);
- unregister_netdevice(dlp->master);
- dev_put(dlp->slave);
- break;
- }
- }
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block dlci_notifier = {
- .notifier_call = dlci_dev_event,
-};
-
-static int __init init_dlci(void)
-{
- dlci_ioctl_set(dlci_ioctl);
- register_netdevice_notifier(&dlci_notifier);
-
- printk("%s.\n", version);
-
- return 0;
-}
-
-static void __exit dlci_exit(void)
-{
- struct dlci_local *dlp, *nxt;
-
- dlci_ioctl_set(NULL);
- unregister_netdevice_notifier(&dlci_notifier);
-
- rtnl_lock();
- list_for_each_entry_safe(dlp, nxt, &dlci_devs, list) {
- unregister_netdevice(dlp->master);
- dev_put(dlp->slave);
- }
- rtnl_unlock();
-}
-
-module_init(init_dlci);
-module_exit(dlci_exit);
-
-MODULE_AUTHOR("Mike McLagan");
-MODULE_DESCRIPTION("Frame Relay DLCI layer");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 409e5a7ad8e2..0720f5f92caa 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -871,6 +871,45 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
return 0;
}
+static int fr_snap_parse(struct sk_buff *skb, struct pvc_device *pvc)
+{
+ /* OUI 00-00-00 indicates an Ethertype follows */
+ if (skb->data[0] == 0x00 &&
+ skb->data[1] == 0x00 &&
+ skb->data[2] == 0x00) {
+ if (!pvc->main)
+ return -1;
+ skb->dev = pvc->main;
+ skb->protocol = *(__be16 *)(skb->data + 3); /* Ethertype */
+ skb_pull(skb, 5);
+ skb_reset_mac_header(skb);
+ return 0;
+
+ /* OUI 00-80-C2 stands for the 802.1 organization */
+ } else if (skb->data[0] == 0x00 &&
+ skb->data[1] == 0x80 &&
+ skb->data[2] == 0xC2) {
+ /* PID 00-07 stands for Ethernet frames without FCS */
+ if (skb->data[3] == 0x00 &&
+ skb->data[4] == 0x07) {
+ if (!pvc->ether)
+ return -1;
+ skb_pull(skb, 5);
+ if (skb->len < ETH_HLEN)
+ return -1;
+ skb->protocol = eth_type_trans(skb, pvc->ether);
+ return 0;
+
+ /* PID unsupported */
+ } else {
+ return -1;
+ }
+
+ /* OUI unsupported */
+ } else {
+ return -1;
+ }
+}
static int fr_rx(struct sk_buff *skb)
{
@@ -880,9 +919,9 @@ static int fr_rx(struct sk_buff *skb)
u8 *data = skb->data;
u16 dlci;
struct pvc_device *pvc;
- struct net_device *dev = NULL;
+ struct net_device *dev;
- if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
+ if (skb->len < 4 || fh->ea1 || !fh->ea2 || data[2] != FR_UI)
goto rx_error;
dlci = q922_to_dlci(skb->data);
@@ -904,8 +943,7 @@ static int fr_rx(struct sk_buff *skb)
netdev_info(frad, "No PVC for received frame's DLCI %d\n",
dlci);
#endif
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
+ goto rx_drop;
}
if (pvc->state.fecn != fh->fecn) {
@@ -931,63 +969,51 @@ static int fr_rx(struct sk_buff *skb)
}
if (data[3] == NLPID_IP) {
+ if (!pvc->main)
+ goto rx_drop;
skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
- dev = pvc->main;
+ skb->dev = pvc->main;
skb->protocol = htons(ETH_P_IP);
+ skb_reset_mac_header(skb);
} else if (data[3] == NLPID_IPV6) {
+ if (!pvc->main)
+ goto rx_drop;
skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
- dev = pvc->main;
+ skb->dev = pvc->main;
skb->protocol = htons(ETH_P_IPV6);
+ skb_reset_mac_header(skb);
- } else if (skb->len > 10 && data[3] == FR_PAD &&
- data[4] == NLPID_SNAP && data[5] == FR_PAD) {
- u16 oui = ntohs(*(__be16*)(data + 6));
- u16 pid = ntohs(*(__be16*)(data + 8));
- skb_pull(skb, 10);
-
- switch ((((u32)oui) << 16) | pid) {
- case ETH_P_ARP: /* routed frame with SNAP */
- case ETH_P_IPX:
- case ETH_P_IP: /* a long variant */
- case ETH_P_IPV6:
- dev = pvc->main;
- skb->protocol = htons(pid);
- break;
-
- case 0x80C20007: /* bridged Ethernet frame */
- if ((dev = pvc->ether) != NULL)
- skb->protocol = eth_type_trans(skb, dev);
- break;
-
- default:
- netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n",
- oui, pid);
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
+ } else if (data[3] == FR_PAD) {
+ if (skb->len < 5)
+ goto rx_error;
+ if (data[4] == NLPID_SNAP) { /* A SNAP header follows */
+ skb_pull(skb, 5);
+ if (skb->len < 5) /* Incomplete SNAP header */
+ goto rx_error;
+ if (fr_snap_parse(skb, pvc))
+ goto rx_drop;
+ } else {
+ goto rx_drop;
}
+
} else {
netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
data[3], skb->len);
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
+ goto rx_drop;
}
- if (dev) {
- dev->stats.rx_packets++; /* PVC traffic */
- dev->stats.rx_bytes += skb->len;
- if (pvc->state.becn)
- dev->stats.rx_compressed++;
- skb->dev = dev;
- netif_rx(skb);
- return NET_RX_SUCCESS;
- } else {
- dev_kfree_skb_any(skb);
- return NET_RX_DROP;
- }
+ dev = skb->dev;
+ dev->stats.rx_packets++; /* PVC traffic */
+ dev->stats.rx_bytes += skb->len;
+ if (pvc->state.becn)
+ dev->stats.rx_compressed++;
+ netif_rx(skb);
+ return NET_RX_SUCCESS;
- rx_error:
+rx_error:
frad->stats.rx_errors++; /* Mark error */
+rx_drop:
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 36600b0a0ab0..93c7e8502845 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -353,9 +353,8 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
switch(xc.command){
case lmc_xilinx_reset: /*fold02*/
{
- u16 mii;
spin_lock_irqsave(&sc->lmc_lock, flags);
- mii = lmc_mii_readreg (sc, 0, 16);
+ lmc_mii_readreg (sc, 0, 16);
/*
* Make all of them 0 and make input
@@ -424,10 +423,9 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break;
case lmc_xilinx_load_prom: /*fold02*/
{
- u16 mii;
int timeout = 500000;
spin_lock_irqsave(&sc->lmc_lock, flags);
- mii = lmc_mii_readreg (sc, 0, 16);
+ lmc_mii_readreg (sc, 0, 16);
/*
* Make all of them 0 and make input
@@ -1185,7 +1183,6 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
int i;
s32 stat;
unsigned int badtx;
- u32 firstcsr;
int max_work = LMC_RXDESCS;
int handled = 0;
@@ -1203,8 +1200,6 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
goto lmc_int_fail_out;
}
- firstcsr = csr;
-
/* always go through this loop at least once */
while (csr & sc->lmc_intrmask) {
handled = 1;
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index d0062224b216..ba5cc0c53833 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -92,7 +92,7 @@ typedef struct card_s {
#define get_port(card, port) (&card->ports[port])
-#define sca_flush(card) (sca_in(IER0, card));
+#define sca_flush(card) (sca_in(IER0, card))
static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
{
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
deleted file mode 100644
index bc2c1c7fb1a4..000000000000
--- a/drivers/net/wan/sdla.c
+++ /dev/null
@@ -1,1655 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SDLA An implementation of a driver for the Sangoma S502/S508 series
- * multi-protocol PC interface card. Initial offering is with
- * the DLCI driver, providing Frame Relay support for linux.
- *
- * Global definitions for the Frame relay interface.
- *
- * Version: @(#)sdla.c 0.30 12 Sep 1996
- *
- * Credits: Sangoma Technologies, for the use of 2 cards for an extended
- * period of time.
- * David Mandelstam <dm@sangoma.com> for getting me started on
- * this project, and incentive to complete it.
- * Gene Kozen <74604.152@compuserve.com> for providing me with
- * important information about the cards.
- *
- * Author: Mike McLagan <mike.mclagan@linux.org>
- *
- * Changes:
- * 0.15 Mike McLagan Improved error handling, packet dropping
- * 0.20 Mike McLagan New transmit/receive flags for config
- * If in FR mode, don't accept packets from
- * non DLCI devices.
- * 0.25 Mike McLagan Fixed problem with rejecting packets
- * from non DLCI devices.
- * 0.30 Mike McLagan Fixed kernel panic when used with modified
- * ifconfig
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/if_frad.h>
-#include <linux/sdla.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/uaccess.h>
-
-static const char* version = "SDLA driver v0.30, 12 Sep 1996, mike.mclagan@linux.org";
-
-static unsigned int valid_port[] = { 0x250, 0x270, 0x280, 0x300, 0x350, 0x360, 0x380, 0x390};
-
-static unsigned int valid_mem[] = {
- 0xA0000, 0xA2000, 0xA4000, 0xA6000, 0xA8000, 0xAA000, 0xAC000, 0xAE000,
- 0xB0000, 0xB2000, 0xB4000, 0xB6000, 0xB8000, 0xBA000, 0xBC000, 0xBE000,
- 0xC0000, 0xC2000, 0xC4000, 0xC6000, 0xC8000, 0xCA000, 0xCC000, 0xCE000,
- 0xD0000, 0xD2000, 0xD4000, 0xD6000, 0xD8000, 0xDA000, 0xDC000, 0xDE000,
- 0xE0000, 0xE2000, 0xE4000, 0xE6000, 0xE8000, 0xEA000, 0xEC000, 0xEE000};
-
-static DEFINE_SPINLOCK(sdla_lock);
-
-/*********************************************************
- *
- * these are the core routines that access the card itself
- *
- *********************************************************/
-
-#define SDLA_WINDOW(dev,addr) outb((((addr) >> 13) & 0x1F), (dev)->base_addr + SDLA_REG_Z80_WINDOW)
-
-static void __sdla_read(struct net_device *dev, int addr, void *buf, short len)
-{
- char *temp;
- const void *base;
- int offset, bytes;
-
- temp = buf;
- while(len)
- {
- offset = addr & SDLA_ADDR_MASK;
- bytes = offset + len > SDLA_WINDOW_SIZE ? SDLA_WINDOW_SIZE - offset : len;
- base = (const void *) (dev->mem_start + offset);
-
- SDLA_WINDOW(dev, addr);
- memcpy(temp, base, bytes);
-
- addr += bytes;
- temp += bytes;
- len -= bytes;
- }
-}
-
-static void sdla_read(struct net_device *dev, int addr, void *buf, short len)
-{
- unsigned long flags;
- spin_lock_irqsave(&sdla_lock, flags);
- __sdla_read(dev, addr, buf, len);
- spin_unlock_irqrestore(&sdla_lock, flags);
-}
-
-static void __sdla_write(struct net_device *dev, int addr,
- const void *buf, short len)
-{
- const char *temp;
- void *base;
- int offset, bytes;
-
- temp = buf;
- while(len)
- {
- offset = addr & SDLA_ADDR_MASK;
- bytes = offset + len > SDLA_WINDOW_SIZE ? SDLA_WINDOW_SIZE - offset : len;
- base = (void *) (dev->mem_start + offset);
-
- SDLA_WINDOW(dev, addr);
- memcpy(base, temp, bytes);
-
- addr += bytes;
- temp += bytes;
- len -= bytes;
- }
-}
-
-static void sdla_write(struct net_device *dev, int addr,
- const void *buf, short len)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sdla_lock, flags);
- __sdla_write(dev, addr, buf, len);
- spin_unlock_irqrestore(&sdla_lock, flags);
-}
-
-
-static void sdla_clear(struct net_device *dev)
-{
- unsigned long flags;
- char *base;
- int len, addr, bytes;
-
- len = 65536;
- addr = 0;
- bytes = SDLA_WINDOW_SIZE;
- base = (void *) dev->mem_start;
-
- spin_lock_irqsave(&sdla_lock, flags);
- while(len)
- {
- SDLA_WINDOW(dev, addr);
- memset(base, 0, bytes);
-
- addr += bytes;
- len -= bytes;
- }
- spin_unlock_irqrestore(&sdla_lock, flags);
-
-}
-
-static char sdla_byte(struct net_device *dev, int addr)
-{
- unsigned long flags;
- char byte, *temp;
-
- temp = (void *) (dev->mem_start + (addr & SDLA_ADDR_MASK));
-
- spin_lock_irqsave(&sdla_lock, flags);
- SDLA_WINDOW(dev, addr);
- byte = *temp;
- spin_unlock_irqrestore(&sdla_lock, flags);
-
- return byte;
-}
-
-static void sdla_stop(struct net_device *dev)
-{
- struct frad_local *flp;
-
- flp = netdev_priv(dev);
- switch(flp->type)
- {
- case SDLA_S502A:
- outb(SDLA_S502A_HALT, dev->base_addr + SDLA_REG_CONTROL);
- flp->state = SDLA_HALT;
- break;
- case SDLA_S502E:
- outb(SDLA_HALT, dev->base_addr + SDLA_REG_Z80_CONTROL);
- outb(SDLA_S502E_ENABLE, dev->base_addr + SDLA_REG_CONTROL);
- flp->state = SDLA_S502E_ENABLE;
- break;
- case SDLA_S507:
- flp->state &= ~SDLA_CPUEN;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- break;
- case SDLA_S508:
- flp->state &= ~SDLA_CPUEN;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- break;
- }
-}
-
-static void sdla_start(struct net_device *dev)
-{
- struct frad_local *flp;
-
- flp = netdev_priv(dev);
- switch(flp->type)
- {
- case SDLA_S502A:
- outb(SDLA_S502A_NMI, dev->base_addr + SDLA_REG_CONTROL);
- outb(SDLA_S502A_START, dev->base_addr + SDLA_REG_CONTROL);
- flp->state = SDLA_S502A_START;
- break;
- case SDLA_S502E:
- outb(SDLA_S502E_CPUEN, dev->base_addr + SDLA_REG_Z80_CONTROL);
- outb(0x00, dev->base_addr + SDLA_REG_CONTROL);
- flp->state = 0;
- break;
- case SDLA_S507:
- flp->state |= SDLA_CPUEN;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- break;
- case SDLA_S508:
- flp->state |= SDLA_CPUEN;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- break;
- }
-}
-
-/****************************************************
- *
- * this is used for the S502A/E cards to determine
- * the speed of the onboard CPU. Calibration is
- * necessary for the Frame Relay code uploaded
- * later. Incorrect results cause timing problems
- * with link checks & status messages
- *
- ***************************************************/
-
-static int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char resp1, char resp2)
-{
- unsigned long start, done, now;
- char resp, *temp;
-
- start = now = jiffies;
- done = jiffies + jiffs;
-
- temp = (void *)dev->mem_start;
- temp += z80_addr & SDLA_ADDR_MASK;
-
- resp = ~resp1;
- while (time_before(jiffies, done) && (resp != resp1) && (!resp2 || (resp != resp2)))
- {
- if (jiffies != now)
- {
- SDLA_WINDOW(dev, z80_addr);
- now = jiffies;
- resp = *temp;
- }
- }
- return time_before(jiffies, done) ? jiffies - start : -1;
-}
-
-/* constants for Z80 CPU speed */
-#define Z80_READY '1' /* Z80 is ready to begin */
-#define LOADER_READY '2' /* driver is ready to begin */
-#define Z80_SCC_OK '3' /* SCC is on board */
-#define Z80_SCC_BAD '4' /* SCC was not found */
-
-static int sdla_cpuspeed(struct net_device *dev, struct ifreq *ifr)
-{
- int jiffs;
- char data;
-
- sdla_start(dev);
- if (sdla_z80_poll(dev, 0, 3*HZ, Z80_READY, 0) < 0)
- return -EIO;
-
- data = LOADER_READY;
- sdla_write(dev, 0, &data, 1);
-
- if ((jiffs = sdla_z80_poll(dev, 0, 8*HZ, Z80_SCC_OK, Z80_SCC_BAD)) < 0)
- return -EIO;
-
- sdla_stop(dev);
- sdla_read(dev, 0, &data, 1);
-
- if (data == Z80_SCC_BAD)
- {
- printk("%s: SCC bad\n", dev->name);
- return -EIO;
- }
-
- if (data != Z80_SCC_OK)
- return -EINVAL;
-
- if (jiffs < 165)
- ifr->ifr_mtu = SDLA_CPU_16M;
- else if (jiffs < 220)
- ifr->ifr_mtu = SDLA_CPU_10M;
- else if (jiffs < 258)
- ifr->ifr_mtu = SDLA_CPU_8M;
- else if (jiffs < 357)
- ifr->ifr_mtu = SDLA_CPU_7M;
- else if (jiffs < 467)
- ifr->ifr_mtu = SDLA_CPU_5M;
- else
- ifr->ifr_mtu = SDLA_CPU_3M;
-
- return 0;
-}
-
-/************************************************
- *
- * Direct interaction with the Frame Relay code
- * starts here.
- *
- ************************************************/
-
-struct _dlci_stat
-{
- short dlci;
- char flags;
-} __packed;
-
-struct _frad_stat
-{
- char flags;
- struct _dlci_stat dlcis[SDLA_MAX_DLCI];
-};
-
-static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int len, void *data)
-{
- struct _dlci_stat *pstatus;
- short *pdlci;
- int i;
- char *state, line[30];
-
- switch (ret)
- {
- case SDLA_RET_MODEM:
- state = data;
- if (*state & SDLA_MODEM_DCD_LOW)
- netdev_info(dev, "Modem DCD unexpectedly low!\n");
- if (*state & SDLA_MODEM_CTS_LOW)
- netdev_info(dev, "Modem CTS unexpectedly low!\n");
- /* I should probably do something about this! */
- break;
-
- case SDLA_RET_CHANNEL_OFF:
- netdev_info(dev, "Channel became inoperative!\n");
- /* same here */
- break;
-
- case SDLA_RET_CHANNEL_ON:
- netdev_info(dev, "Channel became operative!\n");
- /* same here */
- break;
-
- case SDLA_RET_DLCI_STATUS:
- netdev_info(dev, "Status change reported by Access Node\n");
- len /= sizeof(struct _dlci_stat);
- for(pstatus = data, i=0;i < len;i++,pstatus++)
- {
- if (pstatus->flags & SDLA_DLCI_NEW)
- state = "new";
- else if (pstatus->flags & SDLA_DLCI_DELETED)
- state = "deleted";
- else if (pstatus->flags & SDLA_DLCI_ACTIVE)
- state = "active";
- else
- {
- sprintf(line, "unknown status: %02X", pstatus->flags);
- state = line;
- }
- netdev_info(dev, "DLCI %i: %s\n",
- pstatus->dlci, state);
- /* same here */
- }
- break;
-
- case SDLA_RET_DLCI_UNKNOWN:
- netdev_info(dev, "Received unknown DLCIs:");
- len /= sizeof(short);
- for(pdlci = data,i=0;i < len;i++,pdlci++)
- pr_cont(" %i", *pdlci);
- pr_cont("\n");
- break;
-
- case SDLA_RET_TIMEOUT:
- netdev_err(dev, "Command timed out!\n");
- break;
-
- case SDLA_RET_BUF_OVERSIZE:
- netdev_info(dev, "Bc/CIR overflow, acceptable size is %i\n",
- len);
- break;
-
- case SDLA_RET_BUF_TOO_BIG:
- netdev_info(dev, "Buffer size over specified max of %i\n",
- len);
- break;
-
- case SDLA_RET_CHANNEL_INACTIVE:
- case SDLA_RET_DLCI_INACTIVE:
- case SDLA_RET_CIR_OVERFLOW:
- case SDLA_RET_NO_BUFS:
- if (cmd == SDLA_INFORMATION_WRITE)
- break;
- fallthrough;
-
- default:
- netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n",
- cmd, ret);
- /* Further processing could be done here */
- break;
- }
-}
-
-static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags,
- void *inbuf, short inlen, void *outbuf, short *outlen)
-{
- static struct _frad_stat status;
- struct frad_local *flp;
- struct sdla_cmd *cmd_buf;
- unsigned long pflags;
- unsigned long jiffs;
- int ret, waiting, len;
- long window;
-
- flp = netdev_priv(dev);
- window = flp->type == SDLA_S508 ? SDLA_508_CMD_BUF : SDLA_502_CMD_BUF;
- cmd_buf = (struct sdla_cmd *)(dev->mem_start + (window & SDLA_ADDR_MASK));
- ret = 0;
- len = 0;
- jiffs = jiffies + HZ; /* 1 second is plenty */
-
- spin_lock_irqsave(&sdla_lock, pflags);
- SDLA_WINDOW(dev, window);
- cmd_buf->cmd = cmd;
- cmd_buf->dlci = dlci;
- cmd_buf->flags = flags;
-
- if (inbuf)
- memcpy(cmd_buf->data, inbuf, inlen);
-
- cmd_buf->length = inlen;
-
- cmd_buf->opp_flag = 1;
- spin_unlock_irqrestore(&sdla_lock, pflags);
-
- waiting = 1;
- len = 0;
- while (waiting && time_before_eq(jiffies, jiffs))
- {
- if (waiting++ % 3)
- {
- spin_lock_irqsave(&sdla_lock, pflags);
- SDLA_WINDOW(dev, window);
- waiting = ((volatile int)(cmd_buf->opp_flag));
- spin_unlock_irqrestore(&sdla_lock, pflags);
- }
- }
-
- if (!waiting)
- {
-
- spin_lock_irqsave(&sdla_lock, pflags);
- SDLA_WINDOW(dev, window);
- ret = cmd_buf->retval;
- len = cmd_buf->length;
- if (outbuf && outlen)
- {
- *outlen = *outlen >= len ? len : *outlen;
-
- if (*outlen)
- memcpy(outbuf, cmd_buf->data, *outlen);
- }
-
- /* This is a local copy that's used for error handling */
- if (ret)
- memcpy(&status, cmd_buf->data, len > sizeof(status) ? sizeof(status) : len);
-
- spin_unlock_irqrestore(&sdla_lock, pflags);
- }
- else
- ret = SDLA_RET_TIMEOUT;
-
- if (ret != SDLA_RET_OK)
- sdla_errors(dev, cmd, dlci, ret, len, &status);
-
- return ret;
-}
-
-/***********************************************
- *
- * these functions are called by the DLCI driver
- *
- ***********************************************/
-
-static int sdla_reconfig(struct net_device *dev);
-
-static int sdla_activate(struct net_device *slave, struct net_device *master)
-{
- struct frad_local *flp;
- int i;
-
- flp = netdev_priv(slave);
-
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->master[i] == master)
- break;
-
- if (i == CONFIG_DLCI_MAX)
- return -ENODEV;
-
- flp->dlci[i] = abs(flp->dlci[i]);
-
- if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
- sdla_cmd(slave, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
-
- return 0;
-}
-
-static int sdla_deactivate(struct net_device *slave, struct net_device *master)
-{
- struct frad_local *flp;
- int i;
-
- flp = netdev_priv(slave);
-
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->master[i] == master)
- break;
-
- if (i == CONFIG_DLCI_MAX)
- return -ENODEV;
-
- flp->dlci[i] = -abs(flp->dlci[i]);
-
- if (netif_running(slave) && (flp->config.station == FRAD_STATION_NODE))
- sdla_cmd(slave, SDLA_DEACTIVATE_DLCI, 0, 0, &flp->dlci[i], sizeof(short), NULL, NULL);
-
- return 0;
-}
-
-static int sdla_assoc(struct net_device *slave, struct net_device *master)
-{
- struct frad_local *flp;
- int i;
-
- if (master->type != ARPHRD_DLCI)
- return -EINVAL;
-
- flp = netdev_priv(slave);
-
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- {
- if (!flp->master[i])
- break;
- if (abs(flp->dlci[i]) == *(short *)(master->dev_addr))
- return -EADDRINUSE;
- }
-
- if (i == CONFIG_DLCI_MAX)
- return -EMLINK; /* #### Alan: Comments on this ?? */
-
-
- flp->master[i] = master;
- flp->dlci[i] = -*(short *)(master->dev_addr);
- master->mtu = slave->mtu;
-
- if (netif_running(slave)) {
- if (flp->config.station == FRAD_STATION_CPE)
- sdla_reconfig(slave);
- else
- sdla_cmd(slave, SDLA_ADD_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
- }
-
- return 0;
-}
-
-static int sdla_deassoc(struct net_device *slave, struct net_device *master)
-{
- struct frad_local *flp;
- int i;
-
- flp = netdev_priv(slave);
-
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->master[i] == master)
- break;
-
- if (i == CONFIG_DLCI_MAX)
- return -ENODEV;
-
- flp->master[i] = NULL;
- flp->dlci[i] = 0;
-
-
- if (netif_running(slave)) {
- if (flp->config.station == FRAD_STATION_CPE)
- sdla_reconfig(slave);
- else
- sdla_cmd(slave, SDLA_DELETE_DLCI, 0, 0, master->dev_addr, sizeof(short), NULL, NULL);
- }
-
- return 0;
-}
-
-static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get)
-{
- struct frad_local *flp;
- struct dlci_local *dlp;
- int i;
- short len, ret;
-
- flp = netdev_priv(slave);
-
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->master[i] == master)
- break;
-
- if (i == CONFIG_DLCI_MAX)
- return -ENODEV;
-
- dlp = netdev_priv(master);
-
- ret = SDLA_RET_OK;
- len = sizeof(struct dlci_conf);
- if (netif_running(slave)) {
- if (get)
- ret = sdla_cmd(slave, SDLA_READ_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0,
- NULL, 0, &dlp->config, &len);
- else
- ret = sdla_cmd(slave, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0,
- &dlp->config, sizeof(struct dlci_conf) - 4 * sizeof(short), NULL, NULL);
- }
-
- return ret == SDLA_RET_OK ? 0 : -EIO;
-}
-
-/**************************
- *
- * now for the Linux driver
- *
- **************************/
-
-/* NOTE: the DLCI driver deals with freeing the SKB!! */
-static netdev_tx_t sdla_transmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct frad_local *flp;
- int ret, addr, accept, i;
- short size;
- unsigned long flags;
- struct buf_entry *pbuf;
-
- flp = netdev_priv(dev);
- ret = 0;
- accept = 1;
-
- netif_stop_queue(dev);
-
- /*
- * stupid GateD insists on setting up the multicast router thru us
- * and we're ill equipped to handle a non Frame Relay packet at this
- * time!
- */
-
- accept = 1;
- switch (dev->type)
- {
- case ARPHRD_FRAD:
- if (skb->dev->type != ARPHRD_DLCI)
- {
- netdev_warn(dev, "Non DLCI device, type %i, tried to send on FRAD module\n",
- skb->dev->type);
- accept = 0;
- }
- break;
- default:
- netdev_warn(dev, "unknown firmware type 0x%04X\n",
- dev->type);
- accept = 0;
- break;
- }
- if (accept)
- {
- /* this is frame specific, but till there's a PPP module, it's the default */
- switch (flp->type)
- {
- case SDLA_S502A:
- case SDLA_S502E:
- ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, skb->data, skb->len, NULL, NULL);
- break;
- case SDLA_S508:
- size = sizeof(addr);
- ret = sdla_cmd(dev, SDLA_INFORMATION_WRITE, *(short *)(skb->dev->dev_addr), 0, NULL, skb->len, &addr, &size);
- if (ret == SDLA_RET_OK)
- {
-
- spin_lock_irqsave(&sdla_lock, flags);
- SDLA_WINDOW(dev, addr);
- pbuf = (void *)(dev->mem_start + (addr & SDLA_ADDR_MASK));
- __sdla_write(dev, pbuf->buf_addr, skb->data, skb->len);
- SDLA_WINDOW(dev, addr);
- pbuf->opp_flag = 1;
- spin_unlock_irqrestore(&sdla_lock, flags);
- }
- break;
- }
-
- switch (ret)
- {
- case SDLA_RET_OK:
- dev->stats.tx_packets++;
- break;
-
- case SDLA_RET_CIR_OVERFLOW:
- case SDLA_RET_BUF_OVERSIZE:
- case SDLA_RET_NO_BUFS:
- dev->stats.tx_dropped++;
- break;
-
- default:
- dev->stats.tx_errors++;
- break;
- }
- }
- netif_wake_queue(dev);
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- {
- if(flp->master[i]!=NULL)
- netif_wake_queue(flp->master[i]);
- }
-
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-static void sdla_receive(struct net_device *dev)
-{
- struct net_device *master;
- struct frad_local *flp;
- struct dlci_local *dlp;
- struct sk_buff *skb;
-
- struct sdla_cmd *cmd;
- struct buf_info *pbufi;
- struct buf_entry *pbuf;
-
- unsigned long flags;
- int i=0, received, success, addr, buf_base, buf_top;
- short dlci, len, len2, split;
-
- flp = netdev_priv(dev);
- success = 1;
- received = addr = buf_top = buf_base = 0;
- len = dlci = 0;
- skb = NULL;
- master = NULL;
- cmd = NULL;
- pbufi = NULL;
- pbuf = NULL;
-
- spin_lock_irqsave(&sdla_lock, flags);
-
- switch (flp->type)
- {
- case SDLA_S502A:
- case SDLA_S502E:
- cmd = (void *) (dev->mem_start + (SDLA_502_RCV_BUF & SDLA_ADDR_MASK));
- SDLA_WINDOW(dev, SDLA_502_RCV_BUF);
- success = cmd->opp_flag;
- if (!success)
- break;
-
- dlci = cmd->dlci;
- len = cmd->length;
- break;
-
- case SDLA_S508:
- pbufi = (void *) (dev->mem_start + (SDLA_508_RXBUF_INFO & SDLA_ADDR_MASK));
- SDLA_WINDOW(dev, SDLA_508_RXBUF_INFO);
- pbuf = (void *) (dev->mem_start + ((pbufi->rse_base + flp->buffer * sizeof(struct buf_entry)) & SDLA_ADDR_MASK));
- success = pbuf->opp_flag;
- if (!success)
- break;
-
- buf_top = pbufi->buf_top;
- buf_base = pbufi->buf_base;
- dlci = pbuf->dlci;
- len = pbuf->length;
- addr = pbuf->buf_addr;
- break;
- }
-
- /* common code, find the DLCI and get the SKB */
- if (success)
- {
- for (i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->dlci[i] == dlci)
- break;
-
- if (i == CONFIG_DLCI_MAX)
- {
- netdev_notice(dev, "Received packet from invalid DLCI %i, ignoring\n",
- dlci);
- dev->stats.rx_errors++;
- success = 0;
- }
- }
-
- if (success)
- {
- master = flp->master[i];
- skb = dev_alloc_skb(len + sizeof(struct frhdr));
- if (skb == NULL)
- {
- netdev_notice(dev, "Memory squeeze, dropping packet\n");
- dev->stats.rx_dropped++;
- success = 0;
- }
- else
- skb_reserve(skb, sizeof(struct frhdr));
- }
-
- /* pick up the data */
- switch (flp->type)
- {
- case SDLA_S502A:
- case SDLA_S502E:
- if (success)
- __sdla_read(dev, SDLA_502_RCV_BUF + SDLA_502_DATA_OFS, skb_put(skb,len), len);
-
- SDLA_WINDOW(dev, SDLA_502_RCV_BUF);
- cmd->opp_flag = 0;
- break;
-
- case SDLA_S508:
- if (success)
- {
- /* is this buffer split off the end of the internal ring buffer */
- split = addr + len > buf_top + 1 ? len - (buf_top - addr + 1) : 0;
- len2 = len - split;
-
- __sdla_read(dev, addr, skb_put(skb, len2), len2);
- if (split)
- __sdla_read(dev, buf_base, skb_put(skb, split), split);
- }
-
- /* increment the buffer we're looking at */
- SDLA_WINDOW(dev, SDLA_508_RXBUF_INFO);
- flp->buffer = (flp->buffer + 1) % pbufi->rse_num;
- pbuf->opp_flag = 0;
- break;
- }
-
- if (success)
- {
- dev->stats.rx_packets++;
- dlp = netdev_priv(master);
- (*dlp->receive)(skb, master);
- }
-
- spin_unlock_irqrestore(&sdla_lock, flags);
-}
-
-static irqreturn_t sdla_isr(int dummy, void *dev_id)
-{
- struct net_device *dev;
- struct frad_local *flp;
- char byte;
-
- dev = dev_id;
-
- flp = netdev_priv(dev);
-
- if (!flp->initialized)
- {
- netdev_warn(dev, "irq %d for uninitialized device\n", dev->irq);
- return IRQ_NONE;
- }
-
- byte = sdla_byte(dev, flp->type == SDLA_S508 ? SDLA_508_IRQ_INTERFACE : SDLA_502_IRQ_INTERFACE);
- switch (byte)
- {
- case SDLA_INTR_RX:
- sdla_receive(dev);
- break;
-
- /* the command will get an error return, which is processed above */
- case SDLA_INTR_MODEM:
- case SDLA_INTR_STATUS:
- sdla_cmd(dev, SDLA_READ_DLC_STATUS, 0, 0, NULL, 0, NULL, NULL);
- break;
-
- case SDLA_INTR_TX:
- case SDLA_INTR_COMPLETE:
- case SDLA_INTR_TIMER:
- netdev_warn(dev, "invalid irq flag 0x%02X\n", byte);
- break;
- }
-
- /* the S502E requires a manual acknowledgement of the interrupt */
- if (flp->type == SDLA_S502E)
- {
- flp->state &= ~SDLA_S502E_INTACK;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- flp->state |= SDLA_S502E_INTACK;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- }
-
- /* this clears the byte, informing the Z80 we're done */
- byte = 0;
- sdla_write(dev, flp->type == SDLA_S508 ? SDLA_508_IRQ_INTERFACE : SDLA_502_IRQ_INTERFACE, &byte, sizeof(byte));
- return IRQ_HANDLED;
-}
-
-static void sdla_poll(struct timer_list *t)
-{
- struct frad_local *flp = from_timer(flp, t, timer);
- struct net_device *dev = flp->dev;
-
- if (sdla_byte(dev, SDLA_502_RCV_BUF))
- sdla_receive(dev);
-
- flp->timer.expires = 1;
- add_timer(&flp->timer);
-}
-
-static int sdla_close(struct net_device *dev)
-{
- struct frad_local *flp;
- struct intr_info intr;
- int len, i;
- short dlcis[CONFIG_DLCI_MAX];
-
- flp = netdev_priv(dev);
-
- len = 0;
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->dlci[i])
- dlcis[len++] = abs(flp->dlci[i]);
- len *= 2;
-
- if (flp->config.station == FRAD_STATION_NODE)
- {
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->dlci[i] > 0)
- sdla_cmd(dev, SDLA_DEACTIVATE_DLCI, 0, 0, dlcis, len, NULL, NULL);
- sdla_cmd(dev, SDLA_DELETE_DLCI, 0, 0, &flp->dlci[i], sizeof(flp->dlci[i]), NULL, NULL);
- }
-
- memset(&intr, 0, sizeof(intr));
- /* let's start up the reception */
- switch(flp->type)
- {
- case SDLA_S502A:
- del_timer(&flp->timer);
- break;
-
- case SDLA_S502E:
- sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(char) + sizeof(short), NULL, NULL);
- flp->state &= ~SDLA_S502E_INTACK;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- break;
-
- case SDLA_S507:
- break;
-
- case SDLA_S508:
- sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(struct intr_info), NULL, NULL);
- flp->state &= ~SDLA_S508_INTEN;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- break;
- }
-
- sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
-
- netif_stop_queue(dev);
-
- return 0;
-}
-
-struct conf_data {
- struct frad_conf config;
- short dlci[CONFIG_DLCI_MAX];
-};
-
-static int sdla_open(struct net_device *dev)
-{
- struct frad_local *flp;
- struct dlci_local *dlp;
- struct conf_data data;
- struct intr_info intr;
- int len, i;
- char byte;
-
- flp = netdev_priv(dev);
-
- if (!flp->initialized)
- return -EPERM;
-
- if (!flp->configured)
- return -EPERM;
-
- /* time to send in the configuration */
- len = 0;
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->dlci[i])
- data.dlci[len++] = abs(flp->dlci[i]);
- len *= 2;
-
- memcpy(&data.config, &flp->config, sizeof(struct frad_conf));
- len += sizeof(struct frad_conf);
-
- sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
- sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL);
-
- if (flp->type == SDLA_S508)
- flp->buffer = 0;
-
- sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
-
- /* let's start up the reception */
- memset(&intr, 0, sizeof(intr));
- switch(flp->type)
- {
- case SDLA_S502A:
- flp->timer.expires = 1;
- add_timer(&flp->timer);
- break;
-
- case SDLA_S502E:
- flp->state |= SDLA_S502E_ENABLE;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- flp->state |= SDLA_S502E_INTACK;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- byte = 0;
- sdla_write(dev, SDLA_502_IRQ_INTERFACE, &byte, sizeof(byte));
- intr.flags = SDLA_INTR_RX | SDLA_INTR_STATUS | SDLA_INTR_MODEM;
- sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(char) + sizeof(short), NULL, NULL);
- break;
-
- case SDLA_S507:
- break;
-
- case SDLA_S508:
- flp->state |= SDLA_S508_INTEN;
- outb(flp->state, dev->base_addr + SDLA_REG_CONTROL);
- byte = 0;
- sdla_write(dev, SDLA_508_IRQ_INTERFACE, &byte, sizeof(byte));
- intr.flags = SDLA_INTR_RX | SDLA_INTR_STATUS | SDLA_INTR_MODEM;
- intr.irq = dev->irq;
- sdla_cmd(dev, SDLA_SET_IRQ_TRIGGER, 0, 0, &intr, sizeof(struct intr_info), NULL, NULL);
- break;
- }
-
- if (flp->config.station == FRAD_STATION_CPE)
- {
- byte = SDLA_ICS_STATUS_ENQ;
- sdla_cmd(dev, SDLA_ISSUE_IN_CHANNEL_SIGNAL, 0, 0, &byte, sizeof(byte), NULL, NULL);
- }
- else
- {
- sdla_cmd(dev, SDLA_ADD_DLCI, 0, 0, data.dlci, len - sizeof(struct frad_conf), NULL, NULL);
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->dlci[i] > 0)
- sdla_cmd(dev, SDLA_ACTIVATE_DLCI, 0, 0, &flp->dlci[i], 2*sizeof(flp->dlci[i]), NULL, NULL);
- }
-
- /* configure any specific DLCI settings */
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->dlci[i])
- {
- dlp = netdev_priv(flp->master[i]);
- if (dlp->configured)
- sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0, &dlp->config, sizeof(struct dlci_conf), NULL, NULL);
- }
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, int get)
-{
- struct frad_local *flp;
- struct conf_data data;
- int i;
- short size;
-
- if (dev->type == 0xFFFF)
- return -EUNATCH;
-
- flp = netdev_priv(dev);
-
- if (!get)
- {
- if (netif_running(dev))
- return -EBUSY;
-
- if(copy_from_user(&data.config, conf, sizeof(struct frad_conf)))
- return -EFAULT;
-
- if (data.config.station & ~FRAD_STATION_NODE)
- return -EINVAL;
-
- if (data.config.flags & ~FRAD_VALID_FLAGS)
- return -EINVAL;
-
- if ((data.config.kbaud < 0) ||
- ((data.config.kbaud > 128) && (flp->type != SDLA_S508)))
- return -EINVAL;
-
- if (data.config.clocking & ~(FRAD_CLOCK_INT | SDLA_S508_PORT_RS232))
- return -EINVAL;
-
- if ((data.config.mtu < 0) || (data.config.mtu > SDLA_MAX_MTU))
- return -EINVAL;
-
- if ((data.config.T391 < 5) || (data.config.T391 > 30))
- return -EINVAL;
-
- if ((data.config.T392 < 5) || (data.config.T392 > 30))
- return -EINVAL;
-
- if ((data.config.N391 < 1) || (data.config.N391 > 255))
- return -EINVAL;
-
- if ((data.config.N392 < 1) || (data.config.N392 > 10))
- return -EINVAL;
-
- if ((data.config.N393 < 1) || (data.config.N393 > 10))
- return -EINVAL;
-
- memcpy(&flp->config, &data.config, sizeof(struct frad_conf));
- flp->config.flags |= SDLA_DIRECT_RECV;
-
- if (flp->type == SDLA_S508)
- flp->config.flags |= SDLA_TX70_RX30;
-
- if (dev->mtu != flp->config.mtu)
- {
- /* this is required to change the MTU */
- dev->mtu = flp->config.mtu;
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->master[i])
- flp->master[i]->mtu = flp->config.mtu;
- }
-
- flp->config.mtu += sizeof(struct frhdr);
-
- /* off to the races! */
- if (!flp->configured)
- sdla_start(dev);
-
- flp->configured = 1;
- }
- else
- {
- /* no sense reading if the CPU isn't started */
- if (netif_running(dev))
- {
- size = sizeof(data);
- if (sdla_cmd(dev, SDLA_READ_DLCI_CONFIGURATION, 0, 0, NULL, 0, &data, &size) != SDLA_RET_OK)
- return -EIO;
- }
- else
- if (flp->configured)
- memcpy(&data.config, &flp->config, sizeof(struct frad_conf));
- else
- memset(&data.config, 0, sizeof(struct frad_conf));
-
- memcpy(&flp->config, &data.config, sizeof(struct frad_conf));
- data.config.flags &= FRAD_VALID_FLAGS;
- data.config.mtu -= data.config.mtu > sizeof(struct frhdr) ? sizeof(struct frhdr) : data.config.mtu;
- return copy_to_user(conf, &data.config, sizeof(struct frad_conf))?-EFAULT:0;
- }
-
- return 0;
-}
-
-static int sdla_xfer(struct net_device *dev, struct sdla_mem __user *info, int read)
-{
- struct sdla_mem mem;
- char *temp;
-
- if(copy_from_user(&mem, info, sizeof(mem)))
- return -EFAULT;
-
- if (read)
- {
- temp = kzalloc(mem.len, GFP_KERNEL);
- if (!temp)
- return -ENOMEM;
- sdla_read(dev, mem.addr, temp, mem.len);
- if(copy_to_user(mem.data, temp, mem.len))
- {
- kfree(temp);
- return -EFAULT;
- }
- kfree(temp);
- }
- else
- {
- temp = memdup_user(mem.data, mem.len);
- if (IS_ERR(temp))
- return PTR_ERR(temp);
- sdla_write(dev, mem.addr, temp, mem.len);
- kfree(temp);
- }
- return 0;
-}
-
-static int sdla_reconfig(struct net_device *dev)
-{
- struct frad_local *flp;
- struct conf_data data;
- int i, len;
-
- flp = netdev_priv(dev);
-
- len = 0;
- for(i=0;i<CONFIG_DLCI_MAX;i++)
- if (flp->dlci[i])
- data.dlci[len++] = flp->dlci[i];
- len *= 2;
-
- memcpy(&data, &flp->config, sizeof(struct frad_conf));
- len += sizeof(struct frad_conf);
-
- sdla_cmd(dev, SDLA_DISABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
- sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, 0, 0, &data, len, NULL, NULL);
- sdla_cmd(dev, SDLA_ENABLE_COMMUNICATIONS, 0, 0, NULL, 0, NULL, NULL);
-
- return 0;
-}
-
-static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct frad_local *flp;
-
- if(!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- flp = netdev_priv(dev);
-
- if (!flp->initialized)
- return -EINVAL;
-
- switch (cmd)
- {
- case FRAD_GET_CONF:
- case FRAD_SET_CONF:
- return sdla_config(dev, ifr->ifr_data, cmd == FRAD_GET_CONF);
-
- case SDLA_IDENTIFY:
- ifr->ifr_flags = flp->type;
- break;
-
- case SDLA_CPUSPEED:
- return sdla_cpuspeed(dev, ifr);
-
-/* ==========================================================
-NOTE: This is rather a useless action right now, as the
- current driver does not support protocols other than
- FR. However, Sangoma has modules for a number of
- other protocols in the works.
-============================================================*/
- case SDLA_PROTOCOL:
- if (flp->configured)
- return -EALREADY;
-
- switch (ifr->ifr_flags)
- {
- case ARPHRD_FRAD:
- dev->type = ifr->ifr_flags;
- break;
- default:
- return -ENOPROTOOPT;
- }
- break;
-
- case SDLA_CLEARMEM:
- sdla_clear(dev);
- break;
-
- case SDLA_WRITEMEM:
- case SDLA_READMEM:
- if(!capable(CAP_SYS_RAWIO))
- return -EPERM;
- return sdla_xfer(dev, ifr->ifr_data, cmd == SDLA_READMEM);
-
- case SDLA_START:
- sdla_start(dev);
- break;
-
- case SDLA_STOP:
- sdla_stop(dev);
- break;
-
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static int sdla_change_mtu(struct net_device *dev, int new_mtu)
-{
- if (netif_running(dev))
- return -EBUSY;
-
- /* for now, you can't change the MTU! */
- return -EOPNOTSUPP;
-}
-
-static int sdla_set_config(struct net_device *dev, struct ifmap *map)
-{
- struct frad_local *flp;
- int i;
- char byte;
- unsigned base;
- int err = -EINVAL;
-
- flp = netdev_priv(dev);
-
- if (flp->initialized)
- return -EINVAL;
-
- for(i=0; i < ARRAY_SIZE(valid_port); i++)
- if (valid_port[i] == map->base_addr)
- break;
-
- if (i == ARRAY_SIZE(valid_port))
- return -EINVAL;
-
- if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
- pr_warn("io-port 0x%04lx in use\n", dev->base_addr);
- return -EINVAL;
- }
- base = map->base_addr;
-
- /* test for card types, S502A, S502E, S507, S508 */
- /* these tests shut down the card completely, so clear the state */
- flp->type = SDLA_UNKNOWN;
- flp->state = 0;
-
- for(i=1;i<SDLA_IO_EXTENTS;i++)
- if (inb(base + i) != 0xFF)
- break;
-
- if (i == SDLA_IO_EXTENTS) {
- outb(SDLA_HALT, base + SDLA_REG_Z80_CONTROL);
- if ((inb(base + SDLA_S502_STS) & 0x0F) == 0x08) {
- outb(SDLA_S502E_INTACK, base + SDLA_REG_CONTROL);
- if ((inb(base + SDLA_S502_STS) & 0x0F) == 0x0C) {
- outb(SDLA_HALT, base + SDLA_REG_CONTROL);
- flp->type = SDLA_S502E;
- goto got_type;
- }
- }
- }
-
- for(byte=inb(base),i=0;i<SDLA_IO_EXTENTS;i++)
- if (inb(base + i) != byte)
- break;
-
- if (i == SDLA_IO_EXTENTS) {
- outb(SDLA_HALT, base + SDLA_REG_CONTROL);
- if ((inb(base + SDLA_S502_STS) & 0x7E) == 0x30) {
- outb(SDLA_S507_ENABLE, base + SDLA_REG_CONTROL);
- if ((inb(base + SDLA_S502_STS) & 0x7E) == 0x32) {
- outb(SDLA_HALT, base + SDLA_REG_CONTROL);
- flp->type = SDLA_S507;
- goto got_type;
- }
- }
- }
-
- outb(SDLA_HALT, base + SDLA_REG_CONTROL);
- if ((inb(base + SDLA_S508_STS) & 0x3F) == 0x00) {
- outb(SDLA_S508_INTEN, base + SDLA_REG_CONTROL);
- if ((inb(base + SDLA_S508_STS) & 0x3F) == 0x10) {
- outb(SDLA_HALT, base + SDLA_REG_CONTROL);
- flp->type = SDLA_S508;
- goto got_type;
- }
- }
-
- outb(SDLA_S502A_HALT, base + SDLA_REG_CONTROL);
- if (inb(base + SDLA_S502_STS) == 0x40) {
- outb(SDLA_S502A_START, base + SDLA_REG_CONTROL);
- if (inb(base + SDLA_S502_STS) == 0x40) {
- outb(SDLA_S502A_INTEN, base + SDLA_REG_CONTROL);
- if (inb(base + SDLA_S502_STS) == 0x44) {
- outb(SDLA_S502A_START, base + SDLA_REG_CONTROL);
- flp->type = SDLA_S502A;
- goto got_type;
- }
- }
- }
-
- netdev_notice(dev, "Unknown card type\n");
- err = -ENODEV;
- goto fail;
-
-got_type:
- switch(base) {
- case 0x270:
- case 0x280:
- case 0x380:
- case 0x390:
- if (flp->type != SDLA_S508 && flp->type != SDLA_S507)
- goto fail;
- }
-
- switch (map->irq) {
- case 2:
- if (flp->type != SDLA_S502E)
- goto fail;
- break;
-
- case 10:
- case 11:
- case 12:
- case 15:
- case 4:
- if (flp->type != SDLA_S508 && flp->type != SDLA_S507)
- goto fail;
- break;
- case 3:
- case 5:
- case 7:
- if (flp->type == SDLA_S502A)
- goto fail;
- break;
-
- default:
- goto fail;
- }
-
- err = -EAGAIN;
- if (request_irq(dev->irq, sdla_isr, 0, dev->name, dev))
- goto fail;
-
- if (flp->type == SDLA_S507) {
- switch(dev->irq) {
- case 3:
- flp->state = SDLA_S507_IRQ3;
- break;
- case 4:
- flp->state = SDLA_S507_IRQ4;
- break;
- case 5:
- flp->state = SDLA_S507_IRQ5;
- break;
- case 7:
- flp->state = SDLA_S507_IRQ7;
- break;
- case 10:
- flp->state = SDLA_S507_IRQ10;
- break;
- case 11:
- flp->state = SDLA_S507_IRQ11;
- break;
- case 12:
- flp->state = SDLA_S507_IRQ12;
- break;
- case 15:
- flp->state = SDLA_S507_IRQ15;
- break;
- }
- }
-
- for(i=0; i < ARRAY_SIZE(valid_mem); i++)
- if (valid_mem[i] == map->mem_start)
- break;
-
- err = -EINVAL;
- if (i == ARRAY_SIZE(valid_mem))
- goto fail2;
-
- if (flp->type == SDLA_S502A && (map->mem_start & 0xF000) >> 12 == 0x0E)
- goto fail2;
-
- if (flp->type != SDLA_S507 && map->mem_start >> 16 == 0x0B)
- goto fail2;
-
- if (flp->type == SDLA_S507 && map->mem_start >> 16 == 0x0D)
- goto fail2;
-
- byte = flp->type != SDLA_S508 ? SDLA_8K_WINDOW : 0;
- byte |= (map->mem_start & 0xF000) >> (12 + (flp->type == SDLA_S508 ? 1 : 0));
- switch(flp->type) {
- case SDLA_S502A:
- case SDLA_S502E:
- switch (map->mem_start >> 16) {
- case 0x0A:
- byte |= SDLA_S502_SEG_A;
- break;
- case 0x0C:
- byte |= SDLA_S502_SEG_C;
- break;
- case 0x0D:
- byte |= SDLA_S502_SEG_D;
- break;
- case 0x0E:
- byte |= SDLA_S502_SEG_E;
- break;
- }
- break;
- case SDLA_S507:
- switch (map->mem_start >> 16) {
- case 0x0A:
- byte |= SDLA_S507_SEG_A;
- break;
- case 0x0B:
- byte |= SDLA_S507_SEG_B;
- break;
- case 0x0C:
- byte |= SDLA_S507_SEG_C;
- break;
- case 0x0E:
- byte |= SDLA_S507_SEG_E;
- break;
- }
- break;
- case SDLA_S508:
- switch (map->mem_start >> 16) {
- case 0x0A:
- byte |= SDLA_S508_SEG_A;
- break;
- case 0x0C:
- byte |= SDLA_S508_SEG_C;
- break;
- case 0x0D:
- byte |= SDLA_S508_SEG_D;
- break;
- case 0x0E:
- byte |= SDLA_S508_SEG_E;
- break;
- }
- break;
- }
-
- /* set the memory bits, and enable access */
- outb(byte, base + SDLA_REG_PC_WINDOW);
-
- switch(flp->type)
- {
- case SDLA_S502E:
- flp->state = SDLA_S502E_ENABLE;
- break;
- case SDLA_S507:
- flp->state |= SDLA_MEMEN;
- break;
- case SDLA_S508:
- flp->state = SDLA_MEMEN;
- break;
- }
- outb(flp->state, base + SDLA_REG_CONTROL);
-
- dev->irq = map->irq;
- dev->base_addr = base;
- dev->mem_start = map->mem_start;
- dev->mem_end = dev->mem_start + 0x2000;
- flp->initialized = 1;
- return 0;
-
-fail2:
- free_irq(map->irq, dev);
-fail:
- release_region(base, SDLA_IO_EXTENTS);
- return err;
-}
-
-static const struct net_device_ops sdla_netdev_ops = {
- .ndo_open = sdla_open,
- .ndo_stop = sdla_close,
- .ndo_do_ioctl = sdla_ioctl,
- .ndo_set_config = sdla_set_config,
- .ndo_start_xmit = sdla_transmit,
- .ndo_change_mtu = sdla_change_mtu,
-};
-
-static void setup_sdla(struct net_device *dev)
-{
- struct frad_local *flp = netdev_priv(dev);
-
- netdev_boot_setup_check(dev);
-
- dev->netdev_ops = &sdla_netdev_ops;
- dev->flags = 0;
- dev->type = 0xFFFF;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- dev->mtu = SDLA_MAX_MTU;
-
- flp->activate = sdla_activate;
- flp->deactivate = sdla_deactivate;
- flp->assoc = sdla_assoc;
- flp->deassoc = sdla_deassoc;
- flp->dlci_conf = sdla_dlci_conf;
- flp->dev = dev;
-
- timer_setup(&flp->timer, sdla_poll, 0);
- flp->timer.expires = 1;
-}
-
-static struct net_device *sdla;
-
-static int __init init_sdla(void)
-{
- int err;
-
- printk("%s.\n", version);
-
- sdla = alloc_netdev(sizeof(struct frad_local), "sdla0",
- NET_NAME_UNKNOWN, setup_sdla);
- if (!sdla)
- return -ENOMEM;
-
- err = register_netdev(sdla);
- if (err)
- free_netdev(sdla);
-
- return err;
-}
-
-static void __exit exit_sdla(void)
-{
- struct frad_local *flp = netdev_priv(sdla);
-
- unregister_netdev(sdla);
- if (flp->initialized) {
- free_irq(sdla->irq, sdla);
- release_region(sdla->base_addr, SDLA_IO_EXTENTS);
- }
- del_timer_sync(&flp->timer);
- free_netdev(sdla);
-}
-
-MODULE_LICENSE("GPL");
-
-module_init(init_sdla);
-module_exit(exit_sdla);
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
deleted file mode 100644
index 54b1a5aee82d..000000000000
--- a/drivers/net/wan/x25_asy.c
+++ /dev/null
@@ -1,836 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Things to sort out:
- *
- * o tbusy handling
- * o allow users to set the parameters
- * o sync/async switching ?
- *
- * Note: This does _not_ implement CCITT X.25 asynchronous framing
- * recommendations. Its primarily for testing purposes. If you wanted
- * to do CCITT then in theory all you need is to nick the HDLC async
- * checksum routines from ppp.c
- * Changes:
- *
- * 2000-10-29 Henner Eisen lapb_data_indication() return status.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-
-#include <linux/uaccess.h>
-#include <linux/bitops.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/tty.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/lapb.h>
-#include <linux/init.h>
-#include <linux/rtnetlink.h>
-#include <linux/slab.h>
-#include <net/x25device.h>
-#include "x25_asy.h"
-
-static struct net_device **x25_asy_devs;
-static int x25_asy_maxdev = SL_NRUNIT;
-
-module_param(x25_asy_maxdev, int, 0);
-MODULE_LICENSE("GPL");
-
-static int x25_asy_esc(unsigned char *p, unsigned char *d, int len);
-static void x25_asy_unesc(struct x25_asy *sl, unsigned char c);
-static void x25_asy_setup(struct net_device *dev);
-
-/* Find a free X.25 channel, and link in this `tty' line. */
-static struct x25_asy *x25_asy_alloc(void)
-{
- struct net_device *dev = NULL;
- struct x25_asy *sl;
- int i;
-
- if (x25_asy_devs == NULL)
- return NULL; /* Master array missing ! */
-
- for (i = 0; i < x25_asy_maxdev; i++) {
- dev = x25_asy_devs[i];
-
- /* Not allocated ? */
- if (dev == NULL)
- break;
-
- sl = netdev_priv(dev);
- /* Not in use ? */
- if (!test_and_set_bit(SLF_INUSE, &sl->flags))
- return sl;
- }
-
-
- /* Sorry, too many, all slots in use */
- if (i >= x25_asy_maxdev)
- return NULL;
-
- /* If no channels are available, allocate one */
- if (!dev) {
- char name[IFNAMSIZ];
- sprintf(name, "x25asy%d", i);
-
- dev = alloc_netdev(sizeof(struct x25_asy), name,
- NET_NAME_UNKNOWN, x25_asy_setup);
- if (!dev)
- return NULL;
-
- /* Initialize channel control data */
- sl = netdev_priv(dev);
- dev->base_addr = i;
-
- /* register device so that it can be ifconfig'ed */
- if (register_netdev(dev) == 0) {
- /* (Re-)Set the INUSE bit. Very Important! */
- set_bit(SLF_INUSE, &sl->flags);
- x25_asy_devs[i] = dev;
- return sl;
- } else {
- pr_warn("%s(): register_netdev() failure\n", __func__);
- free_netdev(dev);
- }
- }
- return NULL;
-}
-
-
-/* Free an X.25 channel. */
-static void x25_asy_free(struct x25_asy *sl)
-{
- /* Free all X.25 frame buffers. */
- kfree(sl->rbuff);
- sl->rbuff = NULL;
- kfree(sl->xbuff);
- sl->xbuff = NULL;
-
- if (!test_and_clear_bit(SLF_INUSE, &sl->flags))
- netdev_err(sl->dev, "x25_asy_free for already free unit\n");
-}
-
-static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
-{
- struct x25_asy *sl = netdev_priv(dev);
- unsigned char *xbuff, *rbuff;
- int len;
-
- len = 2 * newmtu;
- xbuff = kmalloc(len + 4, GFP_ATOMIC);
- rbuff = kmalloc(len + 4, GFP_ATOMIC);
-
- if (xbuff == NULL || rbuff == NULL) {
- kfree(xbuff);
- kfree(rbuff);
- return -ENOMEM;
- }
-
- spin_lock_bh(&sl->lock);
- xbuff = xchg(&sl->xbuff, xbuff);
- if (sl->xleft) {
- if (sl->xleft <= len) {
- memcpy(sl->xbuff, sl->xhead, sl->xleft);
- } else {
- sl->xleft = 0;
- dev->stats.tx_dropped++;
- }
- }
- sl->xhead = sl->xbuff;
-
- rbuff = xchg(&sl->rbuff, rbuff);
- if (sl->rcount) {
- if (sl->rcount <= len) {
- memcpy(sl->rbuff, rbuff, sl->rcount);
- } else {
- sl->rcount = 0;
- dev->stats.rx_over_errors++;
- set_bit(SLF_ERROR, &sl->flags);
- }
- }
-
- dev->mtu = newmtu;
- sl->buffsize = len;
-
- spin_unlock_bh(&sl->lock);
-
- kfree(xbuff);
- kfree(rbuff);
- return 0;
-}
-
-
-/* Set the "sending" flag. This must be atomic, hence the ASM. */
-
-static inline void x25_asy_lock(struct x25_asy *sl)
-{
- netif_stop_queue(sl->dev);
-}
-
-
-/* Clear the "sending" flag. This must be atomic, hence the ASM. */
-
-static inline void x25_asy_unlock(struct x25_asy *sl)
-{
- netif_wake_queue(sl->dev);
-}
-
-/* Send an LAPB frame to the LAPB module to process. */
-
-static void x25_asy_bump(struct x25_asy *sl)
-{
- struct net_device *dev = sl->dev;
- struct sk_buff *skb;
- int count;
- int err;
-
- count = sl->rcount;
- dev->stats.rx_bytes += count;
-
- skb = dev_alloc_skb(count);
- if (skb == NULL) {
- netdev_warn(sl->dev, "memory squeeze, dropping packet\n");
- dev->stats.rx_dropped++;
- return;
- }
- skb_put_data(skb, sl->rbuff, count);
- err = lapb_data_received(sl->dev, skb);
- if (err != LAPB_OK) {
- kfree_skb(skb);
- printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
- } else {
- dev->stats.rx_packets++;
- }
-}
-
-/* Encapsulate one IP datagram and stuff into a TTY queue. */
-static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
-{
- unsigned char *p;
- int actual, count, mtu = sl->dev->mtu;
-
- if (len > mtu) {
- /* Sigh, shouldn't occur BUT ... */
- len = mtu;
- printk(KERN_DEBUG "%s: truncating oversized transmit packet!\n",
- sl->dev->name);
- sl->dev->stats.tx_dropped++;
- x25_asy_unlock(sl);
- return;
- }
-
- p = icp;
- count = x25_asy_esc(p, sl->xbuff, len);
-
- /* Order of next two lines is *very* important.
- * When we are sending a little amount of data,
- * the transfer may be completed inside driver.write()
- * routine, because it's running with interrupts enabled.
- * In this case we *never* got WRITE_WAKEUP event,
- * if we did not request it before write operation.
- * 14 Oct 1994 Dmitry Gorodchanin.
- */
- set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
- sl->xleft = count - actual;
- sl->xhead = sl->xbuff + actual;
-}
-
-/*
- * Called by the driver when there's room for more data. If we have
- * more packets to send, we send them here.
- */
-static void x25_asy_write_wakeup(struct tty_struct *tty)
-{
- int actual;
- struct x25_asy *sl = tty->disc_data;
-
- /* First make sure we're connected. */
- if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
- return;
-
- if (sl->xleft <= 0) {
- /* Now serial buffer is almost free & we can start
- * transmission of another packet */
- sl->dev->stats.tx_packets++;
- clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- x25_asy_unlock(sl);
- return;
- }
-
- actual = tty->ops->write(tty, sl->xhead, sl->xleft);
- sl->xleft -= actual;
- sl->xhead += actual;
-}
-
-static void x25_asy_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct x25_asy *sl = netdev_priv(dev);
-
- spin_lock(&sl->lock);
- if (netif_queue_stopped(dev)) {
- /* May be we must check transmitter timeout here ?
- * 14 Oct 1994 Dmitry Gorodchanin.
- */
- netdev_warn(dev, "transmit timed out, %s?\n",
- (tty_chars_in_buffer(sl->tty) || sl->xleft) ?
- "bad line quality" : "driver error");
- sl->xleft = 0;
- clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- x25_asy_unlock(sl);
- }
- spin_unlock(&sl->lock);
-}
-
-/* Encapsulate an IP datagram and kick it into a TTY queue. */
-
-static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct x25_asy *sl = netdev_priv(dev);
- int err;
-
- if (!netif_running(sl->dev)) {
- netdev_err(dev, "xmit call when iface is down\n");
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- /* There should be a pseudo header of 1 byte added by upper layers.
- * Check to make sure it is there before reading it.
- */
- if (skb->len < 1) {
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
- switch (skb->data[0]) {
- case X25_IFACE_DATA:
- break;
- case X25_IFACE_CONNECT: /* Connection request .. do nothing */
- err = lapb_connect_request(dev);
- if (err != LAPB_OK)
- netdev_err(dev, "lapb_connect_request error: %d\n",
- err);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- case X25_IFACE_DISCONNECT: /* do nothing - hang up ?? */
- err = lapb_disconnect_request(dev);
- if (err != LAPB_OK)
- netdev_err(dev, "lapb_disconnect_request error: %d\n",
- err);
- fallthrough;
- default:
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- skb_pull(skb, 1); /* Remove control byte */
- /*
- * If we are busy already- too bad. We ought to be able
- * to queue things at this point, to allow for a little
- * frame buffer. Oh well...
- * -----------------------------------------------------
- * I hate queues in X.25 driver. May be it's efficient,
- * but for me latency is more important. ;)
- * So, no queues !
- * 14 Oct 1994 Dmitry Gorodchanin.
- */
-
- err = lapb_data_request(dev, skb);
- if (err != LAPB_OK) {
- netdev_err(dev, "lapb_data_request error: %d\n", err);
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- return NETDEV_TX_OK;
-}
-
-
-/*
- * LAPB interface boilerplate
- */
-
-/*
- * Called when I frame data arrive. We add a pseudo header for upper
- * layers and pass it to upper layers.
- */
-
-static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
-{
- if (skb_cow(skb, 1)) {
- kfree_skb(skb);
- return NET_RX_DROP;
- }
- skb_push(skb, 1);
- skb->data[0] = X25_IFACE_DATA;
-
- skb->protocol = x25_type_trans(skb, dev);
-
- return netif_rx(skb);
-}
-
-/*
- * Data has emerged from the LAPB protocol machine. We don't handle
- * busy cases too well. Its tricky to see how to do this nicely -
- * perhaps lapb should allow us to bounce this ?
- */
-
-static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb)
-{
- struct x25_asy *sl = netdev_priv(dev);
-
- spin_lock(&sl->lock);
- if (netif_queue_stopped(sl->dev) || sl->tty == NULL) {
- spin_unlock(&sl->lock);
- netdev_err(dev, "tbusy drop\n");
- kfree_skb(skb);
- return;
- }
- /* We were not busy, so we are now... :-) */
- if (skb != NULL) {
- x25_asy_lock(sl);
- dev->stats.tx_bytes += skb->len;
- x25_asy_encaps(sl, skb->data, skb->len);
- dev_kfree_skb(skb);
- }
- spin_unlock(&sl->lock);
-}
-
-/*
- * LAPB connection establish/down information.
- */
-
-static void x25_asy_connected(struct net_device *dev, int reason)
-{
- struct x25_asy *sl = netdev_priv(dev);
- struct sk_buff *skb;
- unsigned char *ptr;
-
- skb = dev_alloc_skb(1);
- if (skb == NULL) {
- netdev_err(dev, "out of memory\n");
- return;
- }
-
- ptr = skb_put(skb, 1);
- *ptr = X25_IFACE_CONNECT;
-
- skb->protocol = x25_type_trans(skb, sl->dev);
- netif_rx(skb);
-}
-
-static void x25_asy_disconnected(struct net_device *dev, int reason)
-{
- struct x25_asy *sl = netdev_priv(dev);
- struct sk_buff *skb;
- unsigned char *ptr;
-
- skb = dev_alloc_skb(1);
- if (skb == NULL) {
- netdev_err(dev, "out of memory\n");
- return;
- }
-
- ptr = skb_put(skb, 1);
- *ptr = X25_IFACE_DISCONNECT;
-
- skb->protocol = x25_type_trans(skb, sl->dev);
- netif_rx(skb);
-}
-
-static const struct lapb_register_struct x25_asy_callbacks = {
- .connect_confirmation = x25_asy_connected,
- .connect_indication = x25_asy_connected,
- .disconnect_confirmation = x25_asy_disconnected,
- .disconnect_indication = x25_asy_disconnected,
- .data_indication = x25_asy_data_indication,
- .data_transmit = x25_asy_data_transmit,
-};
-
-
-/* Open the low-level part of the X.25 channel. Easy! */
-static int x25_asy_open(struct net_device *dev)
-{
- struct x25_asy *sl = netdev_priv(dev);
- unsigned long len;
-
- if (sl->tty == NULL)
- return -ENODEV;
-
- /*
- * Allocate the X.25 frame buffers:
- *
- * rbuff Receive buffer.
- * xbuff Transmit buffer.
- */
-
- len = dev->mtu * 2;
-
- sl->rbuff = kmalloc(len + 4, GFP_KERNEL);
- if (sl->rbuff == NULL)
- goto norbuff;
- sl->xbuff = kmalloc(len + 4, GFP_KERNEL);
- if (sl->xbuff == NULL)
- goto noxbuff;
-
- sl->buffsize = len;
- sl->rcount = 0;
- sl->xleft = 0;
- sl->flags &= (1 << SLF_INUSE); /* Clear ESCAPE & ERROR flags */
-
- return 0;
-
- /* Cleanup */
- kfree(sl->xbuff);
- sl->xbuff = NULL;
-noxbuff:
- kfree(sl->rbuff);
- sl->rbuff = NULL;
-norbuff:
- return -ENOMEM;
-}
-
-
-/* Close the low-level part of the X.25 channel. Easy! */
-static int x25_asy_close(struct net_device *dev)
-{
- struct x25_asy *sl = netdev_priv(dev);
-
- spin_lock(&sl->lock);
- if (sl->tty)
- clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
-
- sl->rcount = 0;
- sl->xleft = 0;
- spin_unlock(&sl->lock);
- return 0;
-}
-
-/*
- * Handle the 'receiver data ready' interrupt.
- * This function is called by the 'tty_io' module in the kernel when
- * a block of X.25 data has been received, which can now be decapsulated
- * and sent on to some IP layer for further processing.
- */
-
-static void x25_asy_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, char *fp, int count)
-{
- struct x25_asy *sl = tty->disc_data;
-
- if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
- return;
-
-
- /* Read the characters out of the buffer */
- while (count--) {
- if (fp && *fp++) {
- if (!test_and_set_bit(SLF_ERROR, &sl->flags))
- sl->dev->stats.rx_errors++;
- cp++;
- continue;
- }
- x25_asy_unesc(sl, *cp++);
- }
-}
-
-/*
- * Open the high-level part of the X.25 channel.
- * This function is called by the TTY module when the
- * X.25 line discipline is called for. Because we are
- * sure the tty line exists, we only have to link it to
- * a free X.25 channel...
- */
-
-static int x25_asy_open_tty(struct tty_struct *tty)
-{
- struct x25_asy *sl;
- int err;
-
- if (tty->ops->write == NULL)
- return -EOPNOTSUPP;
-
- /* OK. Find a free X.25 channel to use. */
- sl = x25_asy_alloc();
- if (sl == NULL)
- return -ENFILE;
-
- sl->tty = tty;
- tty->disc_data = sl;
- tty->receive_room = 65536;
- tty_driver_flush_buffer(tty);
- tty_ldisc_flush(tty);
-
- /* Restore default settings */
- sl->dev->type = ARPHRD_X25;
-
- /* Perform the low-level X.25 async init */
- err = x25_asy_open(sl->dev);
- if (err) {
- x25_asy_free(sl);
- return err;
- }
- /* Done. We have linked the TTY line to a channel. */
- return 0;
-}
-
-
-/*
- * Close down an X.25 channel.
- * This means flushing out any pending queues, and then restoring the
- * TTY line discipline to what it was before it got hooked to X.25
- * (which usually is TTY again).
- */
-static void x25_asy_close_tty(struct tty_struct *tty)
-{
- struct x25_asy *sl = tty->disc_data;
-
- /* First make sure we're connected. */
- if (!sl || sl->magic != X25_ASY_MAGIC)
- return;
-
- rtnl_lock();
- if (sl->dev->flags & IFF_UP)
- dev_close(sl->dev);
- rtnl_unlock();
-
- tty->disc_data = NULL;
- sl->tty = NULL;
- x25_asy_free(sl);
-}
-
- /************************************************************************
- * STANDARD X.25 ENCAPSULATION *
- ************************************************************************/
-
-static int x25_asy_esc(unsigned char *s, unsigned char *d, int len)
-{
- unsigned char *ptr = d;
- unsigned char c;
-
- /*
- * Send an initial END character to flush out any
- * data that may have accumulated in the receiver
- * due to line noise.
- */
-
- *ptr++ = X25_END; /* Send 10111110 bit seq */
-
- /*
- * For each byte in the packet, send the appropriate
- * character sequence, according to the X.25 protocol.
- */
-
- while (len-- > 0) {
- switch (c = *s++) {
- case X25_END:
- *ptr++ = X25_ESC;
- *ptr++ = X25_ESCAPE(X25_END);
- break;
- case X25_ESC:
- *ptr++ = X25_ESC;
- *ptr++ = X25_ESCAPE(X25_ESC);
- break;
- default:
- *ptr++ = c;
- break;
- }
- }
- *ptr++ = X25_END;
- return ptr - d;
-}
-
-static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
-{
-
- switch (s) {
- case X25_END:
- if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
- sl->rcount >= 2)
- x25_asy_bump(sl);
- clear_bit(SLF_ESCAPE, &sl->flags);
- sl->rcount = 0;
- return;
- case X25_ESC:
- set_bit(SLF_ESCAPE, &sl->flags);
- return;
- case X25_ESCAPE(X25_ESC):
- case X25_ESCAPE(X25_END):
- if (test_and_clear_bit(SLF_ESCAPE, &sl->flags))
- s = X25_UNESCAPE(s);
- break;
- }
- if (!test_bit(SLF_ERROR, &sl->flags)) {
- if (sl->rcount < sl->buffsize) {
- sl->rbuff[sl->rcount++] = s;
- return;
- }
- sl->dev->stats.rx_over_errors++;
- set_bit(SLF_ERROR, &sl->flags);
- }
-}
-
-
-/* Perform I/O control on an active X.25 channel. */
-static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct x25_asy *sl = tty->disc_data;
-
- /* First make sure we're connected. */
- if (!sl || sl->magic != X25_ASY_MAGIC)
- return -EINVAL;
-
- switch (cmd) {
- case SIOCGIFNAME:
- if (copy_to_user((void __user *)arg, sl->dev->name,
- strlen(sl->dev->name) + 1))
- return -EFAULT;
- return 0;
- case SIOCSIFHWADDR:
- return -EINVAL;
- default:
- return tty_mode_ioctl(tty, file, cmd, arg);
- }
-}
-
-static int x25_asy_open_dev(struct net_device *dev)
-{
- int err;
- struct x25_asy *sl = netdev_priv(dev);
- if (sl->tty == NULL)
- return -ENODEV;
-
- err = lapb_register(dev, &x25_asy_callbacks);
- if (err != LAPB_OK)
- return -ENOMEM;
-
- netif_start_queue(dev);
-
- return 0;
-}
-
-static int x25_asy_close_dev(struct net_device *dev)
-{
- int err;
-
- netif_stop_queue(dev);
-
- err = lapb_unregister(dev);
- if (err != LAPB_OK)
- pr_err("%s: lapb_unregister error: %d\n",
- __func__, err);
-
- x25_asy_close(dev);
-
- return 0;
-}
-
-static const struct net_device_ops x25_asy_netdev_ops = {
- .ndo_open = x25_asy_open_dev,
- .ndo_stop = x25_asy_close_dev,
- .ndo_start_xmit = x25_asy_xmit,
- .ndo_tx_timeout = x25_asy_timeout,
- .ndo_change_mtu = x25_asy_change_mtu,
-};
-
-/* Initialise the X.25 driver. Called by the device init code */
-static void x25_asy_setup(struct net_device *dev)
-{
- struct x25_asy *sl = netdev_priv(dev);
-
- sl->magic = X25_ASY_MAGIC;
- sl->dev = dev;
- spin_lock_init(&sl->lock);
- set_bit(SLF_INUSE, &sl->flags);
-
- /*
- * Finish setting up the DEVICE info.
- */
-
- dev->mtu = SL_MTU;
- dev->min_mtu = 0;
- dev->max_mtu = 65534;
- dev->netdev_ops = &x25_asy_netdev_ops;
- dev->watchdog_timeo = HZ*20;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- dev->type = ARPHRD_X25;
- dev->tx_queue_len = 10;
-
- /* When transmitting data:
- * first this driver removes a pseudo header of 1 byte,
- * then the lapb module prepends an LAPB header of at most 3 bytes.
- */
- dev->needed_headroom = 3 - 1;
-
- /* New-style flags. */
- dev->flags = IFF_NOARP;
-}
-
-static struct tty_ldisc_ops x25_ldisc = {
- .owner = THIS_MODULE,
- .magic = TTY_LDISC_MAGIC,
- .name = "X.25",
- .open = x25_asy_open_tty,
- .close = x25_asy_close_tty,
- .ioctl = x25_asy_ioctl,
- .receive_buf = x25_asy_receive_buf,
- .write_wakeup = x25_asy_write_wakeup,
-};
-
-static int __init init_x25_asy(void)
-{
- if (x25_asy_maxdev < 4)
- x25_asy_maxdev = 4; /* Sanity */
-
- pr_info("X.25 async: version 0.00 ALPHA (dynamic channels, max=%d)\n",
- x25_asy_maxdev);
-
- x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device *),
- GFP_KERNEL);
- if (!x25_asy_devs)
- return -ENOMEM;
-
- return tty_register_ldisc(N_X25, &x25_ldisc);
-}
-
-
-static void __exit exit_x25_asy(void)
-{
- struct net_device *dev;
- int i;
-
- for (i = 0; i < x25_asy_maxdev; i++) {
- dev = x25_asy_devs[i];
- if (dev) {
- struct x25_asy *sl = netdev_priv(dev);
-
- spin_lock_bh(&sl->lock);
- if (sl->tty)
- tty_hangup(sl->tty);
-
- spin_unlock_bh(&sl->lock);
- /*
- * VSV = if dev->start==0, then device
- * unregistered while close proc.
- */
- unregister_netdev(dev);
- free_netdev(dev);
- }
- }
-
- kfree(x25_asy_devs);
- tty_unregister_ldisc(N_X25);
-}
-
-module_init(init_x25_asy);
-module_exit(exit_x25_asy);
diff --git a/drivers/net/wan/x25_asy.h b/drivers/net/wan/x25_asy.h
deleted file mode 100644
index 87798287c9ca..000000000000
--- a/drivers/net/wan/x25_asy.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_X25_ASY_H
-#define _LINUX_X25_ASY_H
-
-/* X.25 asy configuration. */
-#define SL_NRUNIT 256 /* MAX number of X.25 channels;
- This can be overridden with
- insmod -ox25_asy_maxdev=nnn */
-#define SL_MTU 256
-
-/* X25 async protocol characters. */
-#define X25_END 0x7E /* indicates end of frame */
-#define X25_ESC 0x7D /* indicates byte stuffing */
-#define X25_ESCAPE(x) ((x)^0x20)
-#define X25_UNESCAPE(x) ((x)^0x20)
-
-
-struct x25_asy {
- int magic;
-
- /* Various fields. */
- spinlock_t lock;
- struct tty_struct *tty; /* ptr to TTY structure */
- struct net_device *dev; /* easy for intr handling */
-
- /* These are pointers to the malloc()ed frame buffers. */
- unsigned char *rbuff; /* receiver buffer */
- int rcount; /* received chars counter */
- unsigned char *xbuff; /* transmitter buffer */
- unsigned char *xhead; /* pointer to next byte to XMIT */
- int xleft; /* bytes left in XMIT queue */
- int buffsize; /* Max buffers sizes */
-
- unsigned long flags; /* Flag values/ mode etc */
-#define SLF_INUSE 0 /* Channel in use */
-#define SLF_ESCAPE 1 /* ESC received */
-#define SLF_ERROR 2 /* Parity, etc. error */
-};
-
-
-
-#define X25_ASY_MAGIC 0x5303
-
-int x25_asy_init(struct net_device *dev);
-
-#endif /* _LINUX_X25_ASY.H */
diff --git a/drivers/net/wimax/Kconfig b/drivers/net/wimax/Kconfig
deleted file mode 100644
index 2249e3d77a76..000000000000
--- a/drivers/net/wimax/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# WiMAX LAN device drivers configuration
-#
-
-
-comment "Enable WiMAX (Networking options) to see the WiMAX drivers"
- depends on WIMAX = n
-
-if WIMAX
-
-menu "WiMAX Wireless Broadband devices"
-
-source "drivers/net/wimax/i2400m/Kconfig"
-
-endmenu
-
-endif
diff --git a/drivers/net/wimax/Makefile b/drivers/net/wimax/Makefile
deleted file mode 100644
index b4575bacf994..000000000000
--- a/drivers/net/wimax/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_WIMAX_I2400M) += i2400m/
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index c9f65e96ccb0..a3ed49cd95c3 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -215,7 +215,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_open = wg_open,
.ndo_stop = wg_stop,
.ndo_start_xmit = wg_xmit,
- .ndo_get_stats64 = ip_tunnel_get_stats64
+ .ndo_get_stats64 = dev_get_tstats64
};
static void wg_destruct(struct net_device *dev)
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 170a64e67709..7add2002ff4c 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -18,19 +18,6 @@ menuconfig WLAN
if WLAN
-config WIRELESS_WDS
- bool "mac80211-based legacy WDS support" if EXPERT
- help
- This option enables the deprecated WDS support, the newer
- mac80211-based 4-addr AP/client support supersedes it with
- a much better feature set (HT, VHT, ...)
-
- We plan to remove this option and code, so if you find
- that you have to enable it, please let us know on the
- linux-wireless@vger.kernel.org mailing list, so we can
- help you migrate to 4-addr AP/client (or, if it's really
- necessary, give up on our plan of removing it).
-
source "drivers/net/wireless/admtek/Kconfig"
source "drivers/net/wireless/ath/Kconfig"
source "drivers/net/wireless/atmel/Kconfig"
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index e06b74a54a69..13b4f5f50f8a 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -661,7 +661,6 @@ struct ath9k_vif_iter_data {
int naps; /* number of AP vifs */
int nmeshes; /* number of mesh vifs */
int nstations; /* number of station vifs */
- int nwds; /* number of WDS vifs */
int nadhocs; /* number of adhoc vifs */
int nocbs; /* number of OCB vifs */
int nbcnvifs; /* number of beaconing vifs */
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 26ea51a72156..017a43bc400c 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -735,10 +735,10 @@ static int read_file_misc(struct seq_file *file, void *data)
ath9k_calculate_iter_data(sc, ctx, &iter_data);
seq_printf(file,
- "VIFS: CTX %i(%i) AP: %i STA: %i MESH: %i WDS: %i",
+ "VIFS: CTX %i(%i) AP: %i STA: %i MESH: %i",
i++, (int)(ctx->assigned), iter_data.naps,
iter_data.nstations,
- iter_data.nmeshes, iter_data.nwds);
+ iter_data.nmeshes);
seq_printf(file, " ADHOC: %i OCB: %i TOTAL: %hi BEACON-VIF: %hi\n",
iter_data.nadhocs, iter_data.nocbs, sc->cur_chan->nvifs,
sc->nbcnvifs);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 690fe3a1b516..42a208787f5a 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -832,12 +832,6 @@ static const struct ieee80211_iface_limit if_limits[] = {
BIT(NL80211_IFTYPE_P2P_GO) },
};
-#ifdef CONFIG_WIRELESS_WDS
-static const struct ieee80211_iface_limit wds_limits[] = {
- { .max = 2048, .types = BIT(NL80211_IFTYPE_WDS) },
-};
-#endif
-
#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
static const struct ieee80211_iface_limit if_limits_multi[] = {
@@ -874,15 +868,6 @@ static const struct ieee80211_iface_combination if_comb[] = {
BIT(NL80211_CHAN_WIDTH_40),
#endif
},
-#ifdef CONFIG_WIRELESS_WDS
- {
- .limits = wds_limits,
- .n_limits = ARRAY_SIZE(wds_limits),
- .max_interfaces = 2048,
- .num_different_channels = 1,
- .beacon_int_infra_match = true,
- },
-#endif
};
#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
@@ -897,7 +882,6 @@ static void ath9k_set_mcc_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, QUEUE_CONTROL);
hw->queues = ATH9K_NUM_TX_QUEUES;
hw->offchannel_tx_hw_queue = hw->queues - 1;
- hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS);
hw->wiphy->iface_combinations = if_comb_multi;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi);
hw->wiphy->max_scan_ssids = 255;
@@ -953,9 +937,6 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT) |
-#ifdef CONFIG_WIRELESS_WDS
- BIT(NL80211_IFTYPE_WDS) |
-#endif
BIT(NL80211_IFTYPE_OCB);
if (ath9k_is_chanctx_enabled())
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 8dbf68b94228..caebe3fd6869 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -973,9 +973,6 @@ static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data,
if (vif->bss_conf.enable_beacon)
ath9k_vif_iter_set_beacon(iter_data, vif);
break;
- case NL80211_IFTYPE_WDS:
- iter_data->nwds++;
- break;
default:
break;
}
@@ -1136,8 +1133,6 @@ void ath9k_calculate_summary_state(struct ath_softc *sc,
ah->opmode = NL80211_IFTYPE_MESH_POINT;
else if (iter_data.nocbs)
ah->opmode = NL80211_IFTYPE_OCB;
- else if (iter_data.nwds)
- ah->opmode = NL80211_IFTYPE_AP;
else if (iter_data.nadhocs)
ah->opmode = NL80211_IFTYPE_ADHOC;
else
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index b2eeb9fd68d2..6cdbee5beb07 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -329,10 +329,6 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
/* iwlagn 802.11n STA Workaround */
rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
break;
- case NL80211_IFTYPE_WDS:
- cam_mode |= AR9170_MAC_CAM_AP_WDS;
- rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
- break;
case NL80211_IFTYPE_STATION:
cam_mode |= AR9170_MAC_CAM_STA;
rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index dbef9d8fc893..cca3b086aa70 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -646,7 +646,6 @@ static int carl9170_op_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
if ((vif->type == NL80211_IFTYPE_STATION) ||
- (vif->type == NL80211_IFTYPE_WDS) ||
(vif->type == NL80211_IFTYPE_AP) ||
(vif->type == NL80211_IFTYPE_MESH_POINT))
break;
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index f175dbaffc30..150a366e8f62 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -4961,12 +4961,11 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
struct b43_wldev *dev;
int err = -EOPNOTSUPP;
- /* TODO: allow WDS/AP devices to coexist */
+ /* TODO: allow AP devices to coexist */
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_MESH_POINT &&
vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_WDS &&
vif->type != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
@@ -5576,9 +5575,6 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_STATION) |
-#ifdef CONFIG_WIRELESS_WDS
- BIT(NL80211_IFTYPE_WDS) |
-#endif
BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index a27125b7922c..7692a2618c97 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -3381,11 +3381,10 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
unsigned long flags;
int err = -EOPNOTSUPP;
- /* TODO: allow WDS/AP devices to coexist */
+ /* TODO: allow AP devices to coexist */
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_WDS &&
vif->type != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
@@ -3805,9 +3804,6 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
-#ifdef CONFIG_WIRELESS_WDS
- BIT(NL80211_IFTYPE_WDS) |
-#endif
BIT(NL80211_IFTYPE_ADHOC);
hw->queues = 1; /* FIXME: hardware has more queues */
hw->max_rates = 2;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index bf6dbeb61842..ad726bd100ec 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -126,28 +126,13 @@ qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
qtnf_packet_send_hi_pri(skb);
- qtnf_update_tx_stats(ndev, skb);
+ dev_sw_netstats_tx_add(ndev, 1, skb->len);
return NETDEV_TX_OK;
}
return qtnf_bus_data_tx(mac->bus, skb, mac->macid, vif->vifid);
}
-/* Netdev handler for getting stats.
- */
-static void qtnf_netdev_get_stats64(struct net_device *ndev,
- struct rtnl_link_stats64 *stats)
-{
- struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
-
- netdev_stats_to_stats64(stats, &ndev->stats);
-
- if (!vif->stats64)
- return;
-
- dev_fetch_sw_netstats(stats, vif->stats64);
-}
-
/* Netdev handler for transmission timeout.
*/
static void qtnf_netdev_tx_timeout(struct net_device *ndev, unsigned int txqueue)
@@ -211,13 +196,27 @@ static int qtnf_netdev_port_parent_id(struct net_device *ndev,
return 0;
}
+static int qtnf_netdev_alloc_pcpu_stats(struct net_device *dev)
+{
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+
+ return dev->tstats ? 0 : -ENOMEM;
+}
+
+static void qtnf_netdev_free_pcpu_stats(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+}
+
/* Network device ops handlers */
const struct net_device_ops qtnf_netdev_ops = {
+ .ndo_init = qtnf_netdev_alloc_pcpu_stats,
+ .ndo_uninit = qtnf_netdev_free_pcpu_stats,
.ndo_open = qtnf_netdev_open,
.ndo_stop = qtnf_netdev_close,
.ndo_start_xmit = qtnf_netdev_hard_start_xmit,
.ndo_tx_timeout = qtnf_netdev_tx_timeout,
- .ndo_get_stats64 = qtnf_netdev_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = qtnf_netdev_set_mac_address,
.ndo_get_port_parent_id = qtnf_netdev_port_parent_id,
};
@@ -448,10 +447,6 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
qtnf_sta_list_init(&vif->sta_list);
INIT_WORK(&vif->high_pri_tx_work, qtnf_vif_send_data_high_pri);
skb_queue_head_init(&vif->high_pri_tx_queue);
- vif->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!vif->stats64)
- pr_warn("VIF%u.%u: per cpu stats allocation failed\n",
- macid, i);
}
qtnf_mac_init_primary_intf(mac);
@@ -531,7 +526,6 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
}
rtnl_unlock();
qtnf_sta_list_free(&vif->sta_list);
- free_percpu(vif->stats64);
}
if (mac->wiphy_registered)
@@ -924,46 +918,6 @@ void qtnf_wake_all_queues(struct net_device *ndev)
}
EXPORT_SYMBOL_GPL(qtnf_wake_all_queues);
-void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb)
-{
- struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
- struct pcpu_sw_netstats *stats64;
-
- if (unlikely(!vif || !vif->stats64)) {
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += skb->len;
- return;
- }
-
- stats64 = this_cpu_ptr(vif->stats64);
-
- u64_stats_update_begin(&stats64->syncp);
- stats64->rx_packets++;
- stats64->rx_bytes += skb->len;
- u64_stats_update_end(&stats64->syncp);
-}
-EXPORT_SYMBOL_GPL(qtnf_update_rx_stats);
-
-void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb)
-{
- struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
- struct pcpu_sw_netstats *stats64;
-
- if (unlikely(!vif || !vif->stats64)) {
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
- return;
- }
-
- stats64 = this_cpu_ptr(vif->stats64);
-
- u64_stats_update_begin(&stats64->syncp);
- stats64->tx_packets++;
- stats64->tx_bytes += skb->len;
- u64_stats_update_end(&stats64->syncp);
-}
-EXPORT_SYMBOL_GPL(qtnf_update_tx_stats);
-
struct dentry *qtnf_get_debugfs_dir(void)
{
return qtnf_debugfs_dir;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index 269ce12cf8bf..b204a24074ab 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -70,8 +70,6 @@ struct qtnf_vif {
struct qtnf_sta_list sta_list;
unsigned long cons_tx_timeout_cnt;
int generation;
-
- struct pcpu_sw_netstats __percpu *stats64;
};
struct qtnf_mac_info {
@@ -139,8 +137,6 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed);
struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid);
struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb);
void qtnf_wake_all_queues(struct net_device *ndev);
-void qtnf_update_rx_stats(struct net_device *ndev, const struct sk_buff *skb);
-void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb);
void qtnf_virtual_intf_cleanup(struct net_device *ndev);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 9a20c0f29078..0003df577cb3 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -489,7 +489,7 @@ static void qtnf_pearl_data_tx_reclaim(struct qtnf_pcie_pearl_state *ps)
PCI_DMA_TODEVICE);
if (skb->dev) {
- qtnf_update_tx_stats(skb->dev, skb);
+ dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
if (unlikely(priv->tx_stopped)) {
qtnf_wake_all_queues(skb->dev);
priv->tx_stopped = 0;
@@ -756,7 +756,7 @@ static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
skb_put(skb, psize);
ndev = qtnf_classify_skb(bus, skb);
if (likely(ndev)) {
- qtnf_update_rx_stats(ndev, skb);
+ dev_sw_netstats_rx_add(ndev, skb->len);
skb->protocol = eth_type_trans(skb, ndev);
napi_gro_receive(napi, skb);
} else {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index 4b87d3151017..24f1be8ddcef 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -418,7 +418,7 @@ static void qtnf_topaz_data_tx_reclaim(struct qtnf_pcie_topaz_state *ts)
PCI_DMA_TODEVICE);
if (skb->dev) {
- qtnf_update_tx_stats(skb->dev, skb);
+ dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
if (unlikely(priv->tx_stopped)) {
qtnf_wake_all_queues(skb->dev);
priv->tx_stopped = 0;
@@ -662,7 +662,7 @@ static int qtnf_topaz_rx_poll(struct napi_struct *napi, int budget)
skb_put(skb, psize);
ndev = qtnf_classify_skb(bus, skb);
if (likely(ndev)) {
- qtnf_update_rx_stats(ndev, skb);
+ dev_sw_netstats_rx_add(ndev, skb->len);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
} else {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
index 0ee1813e8453..6bafdd991171 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c
@@ -32,7 +32,6 @@ void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_WDS:
conf.sync = TSF_SYNC_AP_NONE;
break;
case NL80211_IFTYPE_STATION:
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index b04f76551ca4..61a4f1ad31e2 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -194,8 +194,7 @@ static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac,
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_ADHOC &&
- vif->type != NL80211_IFTYPE_MESH_POINT &&
- vif->type != NL80211_IFTYPE_WDS)
+ vif->type != NL80211_IFTYPE_MESH_POINT)
return;
/*
@@ -1437,9 +1436,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
-#ifdef CONFIG_WIRELESS_WDS
- BIT(NL80211_IFTYPE_WDS) |
-#endif
BIT(NL80211_IFTYPE_AP);
rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 2f68a31072ae..dea5babd30fe 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -408,8 +408,7 @@ static void rt2x00mac_set_tim_iter(void *data, u8 *mac,
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_ADHOC &&
- vif->type != NL80211_IFTYPE_MESH_POINT &&
- vif->type != NL80211_IFTYPE_WDS)
+ vif->type != NL80211_IFTYPE_MESH_POINT)
return;
set_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 75b5d545b49e..9fe77556858e 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -3379,7 +3379,7 @@ static const struct net_device_ops rndis_wlan_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = rndis_wlan_set_multicast_list,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 3e9895bec15f..920cac4385bf 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -2413,12 +2413,11 @@ static ssize_t store_rxbuf(struct device *dev,
const char *buf, size_t len)
{
char *endp;
- unsigned long target;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- target = simple_strtoul(buf, &endp, 0);
+ simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EBADMSG;
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 9f60e4dc5a90..7e451c10985d 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -286,7 +286,7 @@ static int nxp_nci_i2c_probe(struct i2c_client *client,
return PTR_ERR(phy->gpiod_en);
}
- phy->gpiod_fw = devm_gpiod_get(dev, "firmware", GPIOD_OUT_LOW);
+ phy->gpiod_fw = devm_gpiod_get_optional(dev, "firmware", GPIOD_OUT_LOW);
if (IS_ERR(phy->gpiod_fw)) {
nfc_err(dev, "Failed to get FW gpio\n");
return PTR_ERR(phy->gpiod_fw);
diff --git a/drivers/nfc/s3fwrn5/Kconfig b/drivers/nfc/s3fwrn5/Kconfig
index 3f8b6da58280..8a6b1a79de25 100644
--- a/drivers/nfc/s3fwrn5/Kconfig
+++ b/drivers/nfc/s3fwrn5/Kconfig
@@ -20,3 +20,15 @@ config NFC_S3FWRN5_I2C
To compile this driver as a module, choose m here. The module will
be called s3fwrn5_i2c.ko.
Say N if unsure.
+
+config NFC_S3FWRN82_UART
+ tristate "Samsung S3FWRN82 UART support"
+ depends on NFC_NCI && SERIAL_DEV_BUS
+ select NFC_S3FWRN5
+ help
+ This module adds support for a UART interface to the S3FWRN82 chip.
+ Select this if your platform is using the UART bus.
+
+ To compile this driver as a module, choose m here. The module will
+ be called s3fwrn82_uart.ko.
+ Say N if unsure.
diff --git a/drivers/nfc/s3fwrn5/Makefile b/drivers/nfc/s3fwrn5/Makefile
index d0ffa35f50e8..7da827ac149c 100644
--- a/drivers/nfc/s3fwrn5/Makefile
+++ b/drivers/nfc/s3fwrn5/Makefile
@@ -3,8 +3,10 @@
# Makefile for Samsung S3FWRN5 NFC driver
#
-s3fwrn5-objs = core.o firmware.o nci.o
+s3fwrn5-objs = core.o firmware.o nci.o phy_common.o
s3fwrn5_i2c-objs = i2c.o
+s3fwrn82_uart-objs = uart.o
obj-$(CONFIG_NFC_S3FWRN5) += s3fwrn5.o
obj-$(CONFIG_NFC_S3FWRN5_I2C) += s3fwrn5_i2c.o
+obj-$(CONFIG_NFC_S3FWRN82_UART) += s3fwrn82_uart.o
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
index ba6c486d6465..f8e5d78d9078 100644
--- a/drivers/nfc/s3fwrn5/core.c
+++ b/drivers/nfc/s3fwrn5/core.c
@@ -136,7 +136,7 @@ static struct nci_ops s3fwrn5_nci_ops = {
};
int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
- const struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload)
+ const struct s3fwrn5_phy_ops *phy_ops)
{
struct s3fwrn5_info *info;
int ret;
@@ -148,7 +148,6 @@ int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
info->phy_id = phy_id;
info->pdev = pdev;
info->phy_ops = phy_ops;
- info->max_payload = max_payload;
mutex_init(&info->mutex);
s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index ec930ee2c847..4cde6dd5c019 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -266,7 +266,7 @@ static int s3fwrn5_fw_complete_update_mode(struct s3fwrn5_fw_info *fw_info)
}
/*
- * Firmware header stucture:
+ * Firmware header structure:
*
* 0x00 - 0x0B : Date and time string (w/o NUL termination)
* 0x10 - 0x13 : Firmware version
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index d0a3bd9ff3c3..e1bdde105f24 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -15,76 +15,30 @@
#include <net/nfc/nfc.h>
-#include "s3fwrn5.h"
+#include "phy_common.h"
#define S3FWRN5_I2C_DRIVER_NAME "s3fwrn5_i2c"
-#define S3FWRN5_I2C_MAX_PAYLOAD 32
-#define S3FWRN5_EN_WAIT_TIME 150
-
struct s3fwrn5_i2c_phy {
+ struct phy_common common;
struct i2c_client *i2c_dev;
- struct nci_dev *ndev;
-
- int gpio_en;
- int gpio_fw_wake;
-
- struct mutex mutex;
- enum s3fwrn5_mode mode;
unsigned int irq_skip:1;
};
-static void s3fwrn5_i2c_set_wake(void *phy_id, bool wake)
-{
- struct s3fwrn5_i2c_phy *phy = phy_id;
-
- mutex_lock(&phy->mutex);
- gpio_set_value(phy->gpio_fw_wake, wake);
- msleep(S3FWRN5_EN_WAIT_TIME/2);
- mutex_unlock(&phy->mutex);
-}
-
static void s3fwrn5_i2c_set_mode(void *phy_id, enum s3fwrn5_mode mode)
{
struct s3fwrn5_i2c_phy *phy = phy_id;
- mutex_lock(&phy->mutex);
+ mutex_lock(&phy->common.mutex);
- if (phy->mode == mode)
+ if (s3fwrn5_phy_power_ctrl(&phy->common, mode) == false)
goto out;
- phy->mode = mode;
-
- gpio_set_value(phy->gpio_en, 1);
- gpio_set_value(phy->gpio_fw_wake, 0);
- if (mode == S3FWRN5_MODE_FW)
- gpio_set_value(phy->gpio_fw_wake, 1);
-
- if (mode != S3FWRN5_MODE_COLD) {
- msleep(S3FWRN5_EN_WAIT_TIME);
- gpio_set_value(phy->gpio_en, 0);
- msleep(S3FWRN5_EN_WAIT_TIME/2);
- }
-
phy->irq_skip = true;
out:
- mutex_unlock(&phy->mutex);
-}
-
-static enum s3fwrn5_mode s3fwrn5_i2c_get_mode(void *phy_id)
-{
- struct s3fwrn5_i2c_phy *phy = phy_id;
- enum s3fwrn5_mode mode;
-
- mutex_lock(&phy->mutex);
-
- mode = phy->mode;
-
- mutex_unlock(&phy->mutex);
-
- return mode;
+ mutex_unlock(&phy->common.mutex);
}
static int s3fwrn5_i2c_write(void *phy_id, struct sk_buff *skb)
@@ -92,7 +46,7 @@ static int s3fwrn5_i2c_write(void *phy_id, struct sk_buff *skb)
struct s3fwrn5_i2c_phy *phy = phy_id;
int ret;
- mutex_lock(&phy->mutex);
+ mutex_lock(&phy->common.mutex);
phy->irq_skip = false;
@@ -103,7 +57,7 @@ static int s3fwrn5_i2c_write(void *phy_id, struct sk_buff *skb)
ret = i2c_master_send(phy->i2c_dev, skb->data, skb->len);
}
- mutex_unlock(&phy->mutex);
+ mutex_unlock(&phy->common.mutex);
if (ret < 0)
return ret;
@@ -115,9 +69,9 @@ static int s3fwrn5_i2c_write(void *phy_id, struct sk_buff *skb)
}
static const struct s3fwrn5_phy_ops i2c_phy_ops = {
- .set_wake = s3fwrn5_i2c_set_wake,
+ .set_wake = s3fwrn5_phy_set_wake,
.set_mode = s3fwrn5_i2c_set_mode,
- .get_mode = s3fwrn5_i2c_get_mode,
+ .get_mode = s3fwrn5_phy_get_mode,
.write = s3fwrn5_i2c_write,
};
@@ -129,7 +83,7 @@ static int s3fwrn5_i2c_read(struct s3fwrn5_i2c_phy *phy)
char hdr[4];
int ret;
- hdr_size = (phy->mode == S3FWRN5_MODE_NCI) ?
+ hdr_size = (phy->common.mode == S3FWRN5_MODE_NCI) ?
NCI_CTRL_HDR_SIZE : S3FWRN5_FW_HDR_SIZE;
ret = i2c_master_recv(phy->i2c_dev, hdr, hdr_size);
if (ret < 0)
@@ -138,7 +92,7 @@ static int s3fwrn5_i2c_read(struct s3fwrn5_i2c_phy *phy)
if (ret < hdr_size)
return -EBADMSG;
- data_len = (phy->mode == S3FWRN5_MODE_NCI) ?
+ data_len = (phy->common.mode == S3FWRN5_MODE_NCI) ?
((struct nci_ctrl_hdr *)hdr)->plen :
((struct s3fwrn5_fw_header *)hdr)->len;
@@ -158,24 +112,24 @@ static int s3fwrn5_i2c_read(struct s3fwrn5_i2c_phy *phy)
}
out:
- return s3fwrn5_recv_frame(phy->ndev, skb, phy->mode);
+ return s3fwrn5_recv_frame(phy->common.ndev, skb, phy->common.mode);
}
static irqreturn_t s3fwrn5_i2c_irq_thread_fn(int irq, void *phy_id)
{
struct s3fwrn5_i2c_phy *phy = phy_id;
- if (!phy || !phy->ndev) {
+ if (!phy || !phy->common.ndev) {
WARN_ON_ONCE(1);
return IRQ_NONE;
}
- mutex_lock(&phy->mutex);
+ mutex_lock(&phy->common.mutex);
if (phy->irq_skip)
goto out;
- switch (phy->mode) {
+ switch (phy->common.mode) {
case S3FWRN5_MODE_NCI:
case S3FWRN5_MODE_FW:
s3fwrn5_i2c_read(phy);
@@ -185,7 +139,7 @@ static irqreturn_t s3fwrn5_i2c_irq_thread_fn(int irq, void *phy_id)
}
out:
- mutex_unlock(&phy->mutex);
+ mutex_unlock(&phy->common.mutex);
return IRQ_HANDLED;
}
@@ -198,19 +152,23 @@ static int s3fwrn5_i2c_parse_dt(struct i2c_client *client)
if (!np)
return -ENODEV;
- phy->gpio_en = of_get_named_gpio(np, "en-gpios", 0);
- if (!gpio_is_valid(phy->gpio_en)) {
+ phy->common.gpio_en = of_get_named_gpio(np, "en-gpios", 0);
+ if (!gpio_is_valid(phy->common.gpio_en)) {
/* Support also deprecated property */
- phy->gpio_en = of_get_named_gpio(np, "s3fwrn5,en-gpios", 0);
- if (!gpio_is_valid(phy->gpio_en))
+ phy->common.gpio_en = of_get_named_gpio(np,
+ "s3fwrn5,en-gpios",
+ 0);
+ if (!gpio_is_valid(phy->common.gpio_en))
return -ENODEV;
}
- phy->gpio_fw_wake = of_get_named_gpio(np, "wake-gpios", 0);
- if (!gpio_is_valid(phy->gpio_fw_wake)) {
+ phy->common.gpio_fw_wake = of_get_named_gpio(np, "wake-gpios", 0);
+ if (!gpio_is_valid(phy->common.gpio_fw_wake)) {
/* Support also deprecated property */
- phy->gpio_fw_wake = of_get_named_gpio(np, "s3fwrn5,fw-gpios", 0);
- if (!gpio_is_valid(phy->gpio_fw_wake))
+ phy->common.gpio_fw_wake = of_get_named_gpio(np,
+ "s3fwrn5,fw-gpios",
+ 0);
+ if (!gpio_is_valid(phy->common.gpio_fw_wake))
return -ENODEV;
}
@@ -227,8 +185,8 @@ static int s3fwrn5_i2c_probe(struct i2c_client *client,
if (!phy)
return -ENOMEM;
- mutex_init(&phy->mutex);
- phy->mode = S3FWRN5_MODE_COLD;
+ mutex_init(&phy->common.mutex);
+ phy->common.mode = S3FWRN5_MODE_COLD;
phy->irq_skip = true;
phy->i2c_dev = client;
@@ -238,18 +196,19 @@ static int s3fwrn5_i2c_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- ret = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_en,
- GPIOF_OUT_INIT_HIGH, "s3fwrn5_en");
+ ret = devm_gpio_request_one(&phy->i2c_dev->dev, phy->common.gpio_en,
+ GPIOF_OUT_INIT_HIGH, "s3fwrn5_en");
if (ret < 0)
return ret;
- ret = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_fw_wake,
- GPIOF_OUT_INIT_LOW, "s3fwrn5_fw_wake");
+ ret = devm_gpio_request_one(&phy->i2c_dev->dev,
+ phy->common.gpio_fw_wake,
+ GPIOF_OUT_INIT_LOW, "s3fwrn5_fw_wake");
if (ret < 0)
return ret;
- ret = s3fwrn5_probe(&phy->ndev, phy, &phy->i2c_dev->dev, &i2c_phy_ops,
- S3FWRN5_I2C_MAX_PAYLOAD);
+ ret = s3fwrn5_probe(&phy->common.ndev, phy, &phy->i2c_dev->dev,
+ &i2c_phy_ops);
if (ret < 0)
return ret;
@@ -257,7 +216,7 @@ static int s3fwrn5_i2c_probe(struct i2c_client *client,
s3fwrn5_i2c_irq_thread_fn, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
S3FWRN5_I2C_DRIVER_NAME, phy);
if (ret)
- s3fwrn5_remove(phy->ndev);
+ s3fwrn5_remove(phy->common.ndev);
return ret;
}
@@ -266,7 +225,7 @@ static int s3fwrn5_i2c_remove(struct i2c_client *client)
{
struct s3fwrn5_i2c_phy *phy = i2c_get_clientdata(client);
- s3fwrn5_remove(phy->ndev);
+ s3fwrn5_remove(phy->common.ndev);
return 0;
}
diff --git a/drivers/nfc/s3fwrn5/phy_common.c b/drivers/nfc/s3fwrn5/phy_common.c
new file mode 100644
index 000000000000..497b02b30ae7
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/phy_common.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Link Layer for Samsung S3FWRN5 NCI based Driver
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ * Copyright (C) 2020 Samsung Electrnoics
+ * Bongsu Jeon <bongsu.jeon@samsung.com>
+ */
+
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+
+#include "phy_common.h"
+
+void s3fwrn5_phy_set_wake(void *phy_id, bool wake)
+{
+ struct phy_common *phy = phy_id;
+
+ mutex_lock(&phy->mutex);
+ gpio_set_value(phy->gpio_fw_wake, wake);
+ msleep(S3FWRN5_EN_WAIT_TIME);
+ mutex_unlock(&phy->mutex);
+}
+EXPORT_SYMBOL(s3fwrn5_phy_set_wake);
+
+bool s3fwrn5_phy_power_ctrl(struct phy_common *phy, enum s3fwrn5_mode mode)
+{
+ if (phy->mode == mode)
+ return false;
+
+ phy->mode = mode;
+
+ gpio_set_value(phy->gpio_en, 1);
+ gpio_set_value(phy->gpio_fw_wake, 0);
+ if (mode == S3FWRN5_MODE_FW)
+ gpio_set_value(phy->gpio_fw_wake, 1);
+
+ if (mode != S3FWRN5_MODE_COLD) {
+ msleep(S3FWRN5_EN_WAIT_TIME);
+ gpio_set_value(phy->gpio_en, 0);
+ msleep(S3FWRN5_EN_WAIT_TIME);
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(s3fwrn5_phy_power_ctrl);
+
+void s3fwrn5_phy_set_mode(void *phy_id, enum s3fwrn5_mode mode)
+{
+ struct phy_common *phy = phy_id;
+
+ mutex_lock(&phy->mutex);
+
+ s3fwrn5_phy_power_ctrl(phy, mode);
+
+ mutex_unlock(&phy->mutex);
+}
+EXPORT_SYMBOL(s3fwrn5_phy_set_mode);
+
+enum s3fwrn5_mode s3fwrn5_phy_get_mode(void *phy_id)
+{
+ struct phy_common *phy = phy_id;
+ enum s3fwrn5_mode mode;
+
+ mutex_lock(&phy->mutex);
+
+ mode = phy->mode;
+
+ mutex_unlock(&phy->mutex);
+
+ return mode;
+}
+EXPORT_SYMBOL(s3fwrn5_phy_get_mode);
diff --git a/drivers/nfc/s3fwrn5/phy_common.h b/drivers/nfc/s3fwrn5/phy_common.h
new file mode 100644
index 000000000000..99749c9294d1
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/phy_common.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Link Layer for Samsung S3FWRN5 NCI based Driver
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ * Copyright (C) 2020 Samsung Electrnoics
+ * Bongsu Jeon <bongsu.jeon@samsung.com>
+ */
+
+#ifndef __NFC_S3FWRN5_PHY_COMMON_H
+#define __NFC_S3FWRN5_PHY_COMMON_H
+
+#include <linux/mutex.h>
+#include <net/nfc/nci_core.h>
+
+#include "s3fwrn5.h"
+
+#define S3FWRN5_EN_WAIT_TIME 20
+
+struct phy_common {
+ struct nci_dev *ndev;
+
+ int gpio_en;
+ int gpio_fw_wake;
+
+ struct mutex mutex;
+
+ enum s3fwrn5_mode mode;
+};
+
+void s3fwrn5_phy_set_wake(void *phy_id, bool wake);
+bool s3fwrn5_phy_power_ctrl(struct phy_common *phy, enum s3fwrn5_mode mode);
+void s3fwrn5_phy_set_mode(void *phy_id, enum s3fwrn5_mode mode);
+enum s3fwrn5_mode s3fwrn5_phy_get_mode(void *phy_id);
+
+#endif /* __NFC_S3FWRN5_PHY_COMMON_H */
diff --git a/drivers/nfc/s3fwrn5/s3fwrn5.h b/drivers/nfc/s3fwrn5/s3fwrn5.h
index ede68bb5eeae..bb8f936d13a2 100644
--- a/drivers/nfc/s3fwrn5/s3fwrn5.h
+++ b/drivers/nfc/s3fwrn5/s3fwrn5.h
@@ -34,7 +34,6 @@ struct s3fwrn5_info {
struct device *pdev;
const struct s3fwrn5_phy_ops *phy_ops;
- unsigned int max_payload;
struct s3fwrn5_fw_info fw_info;
@@ -45,7 +44,7 @@ static inline int s3fwrn5_set_mode(struct s3fwrn5_info *info,
enum s3fwrn5_mode mode)
{
if (!info->phy_ops->set_mode)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
info->phy_ops->set_mode(info->phy_id, mode);
@@ -55,7 +54,7 @@ static inline int s3fwrn5_set_mode(struct s3fwrn5_info *info,
static inline enum s3fwrn5_mode s3fwrn5_get_mode(struct s3fwrn5_info *info)
{
if (!info->phy_ops->get_mode)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return info->phy_ops->get_mode(info->phy_id);
}
@@ -63,7 +62,7 @@ static inline enum s3fwrn5_mode s3fwrn5_get_mode(struct s3fwrn5_info *info)
static inline int s3fwrn5_set_wake(struct s3fwrn5_info *info, bool wake)
{
if (!info->phy_ops->set_wake)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
info->phy_ops->set_wake(info->phy_id, wake);
@@ -73,13 +72,13 @@ static inline int s3fwrn5_set_wake(struct s3fwrn5_info *info, bool wake)
static inline int s3fwrn5_write(struct s3fwrn5_info *info, struct sk_buff *skb)
{
if (!info->phy_ops->write)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return info->phy_ops->write(info->phy_id, skb);
}
int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
- const struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload);
+ const struct s3fwrn5_phy_ops *phy_ops);
void s3fwrn5_remove(struct nci_dev *ndev);
int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
diff --git a/drivers/nfc/s3fwrn5/uart.c b/drivers/nfc/s3fwrn5/uart.c
new file mode 100644
index 000000000000..82ea35d748a5
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/uart.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * UART Link Layer for S3FWRN82 NCI based Driver
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ * Copyright (C) 2020 Samsung Electronics
+ * Bongsu Jeon <bongsu.jeon@samsung.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/serdev.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#include "phy_common.h"
+
+#define S3FWRN82_NCI_HEADER 3
+#define S3FWRN82_NCI_IDX 2
+#define NCI_SKB_BUFF_LEN 258
+
+struct s3fwrn82_uart_phy {
+ struct phy_common common;
+ struct serdev_device *ser_dev;
+ struct sk_buff *recv_skb;
+};
+
+static int s3fwrn82_uart_write(void *phy_id, struct sk_buff *out)
+{
+ struct s3fwrn82_uart_phy *phy = phy_id;
+ int err;
+
+ err = serdev_device_write(phy->ser_dev,
+ out->data, out->len,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static const struct s3fwrn5_phy_ops uart_phy_ops = {
+ .set_wake = s3fwrn5_phy_set_wake,
+ .set_mode = s3fwrn5_phy_set_mode,
+ .get_mode = s3fwrn5_phy_get_mode,
+ .write = s3fwrn82_uart_write,
+};
+
+static int s3fwrn82_uart_read(struct serdev_device *serdev,
+ const unsigned char *data,
+ size_t count)
+{
+ struct s3fwrn82_uart_phy *phy = serdev_device_get_drvdata(serdev);
+ size_t i;
+
+ for (i = 0; i < count; i++) {
+ skb_put_u8(phy->recv_skb, *data++);
+
+ if (phy->recv_skb->len < S3FWRN82_NCI_HEADER)
+ continue;
+
+ if ((phy->recv_skb->len - S3FWRN82_NCI_HEADER)
+ < phy->recv_skb->data[S3FWRN82_NCI_IDX])
+ continue;
+
+ s3fwrn5_recv_frame(phy->common.ndev, phy->recv_skb,
+ phy->common.mode);
+ phy->recv_skb = alloc_skb(NCI_SKB_BUFF_LEN, GFP_KERNEL);
+ if (!phy->recv_skb)
+ return 0;
+ }
+
+ return i;
+}
+
+static const struct serdev_device_ops s3fwrn82_serdev_ops = {
+ .receive_buf = s3fwrn82_uart_read,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+static const struct of_device_id s3fwrn82_uart_of_match[] = {
+ { .compatible = "samsung,s3fwrn82", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s3fwrn82_uart_of_match);
+
+static int s3fwrn82_uart_parse_dt(struct serdev_device *serdev)
+{
+ struct s3fwrn82_uart_phy *phy = serdev_device_get_drvdata(serdev);
+ struct device_node *np = serdev->dev.of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ phy->common.gpio_en = of_get_named_gpio(np, "en-gpios", 0);
+ if (!gpio_is_valid(phy->common.gpio_en))
+ return -ENODEV;
+
+ phy->common.gpio_fw_wake = of_get_named_gpio(np, "wake-gpios", 0);
+ if (!gpio_is_valid(phy->common.gpio_fw_wake))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int s3fwrn82_uart_probe(struct serdev_device *serdev)
+{
+ struct s3fwrn82_uart_phy *phy;
+ int ret = -ENOMEM;
+
+ phy = devm_kzalloc(&serdev->dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ goto err_exit;
+
+ phy->recv_skb = alloc_skb(NCI_SKB_BUFF_LEN, GFP_KERNEL);
+ if (!phy->recv_skb)
+ goto err_exit;
+
+ mutex_init(&phy->common.mutex);
+ phy->common.mode = S3FWRN5_MODE_COLD;
+
+ phy->ser_dev = serdev;
+ serdev_device_set_drvdata(serdev, phy);
+ serdev_device_set_client_ops(serdev, &s3fwrn82_serdev_ops);
+ ret = serdev_device_open(serdev);
+ if (ret) {
+ dev_err(&serdev->dev, "Unable to open device\n");
+ goto err_skb;
+ }
+
+ ret = serdev_device_set_baudrate(serdev, 115200);
+ if (ret != 115200) {
+ ret = -EINVAL;
+ goto err_serdev;
+ }
+
+ serdev_device_set_flow_control(serdev, false);
+
+ ret = s3fwrn82_uart_parse_dt(serdev);
+ if (ret < 0)
+ goto err_serdev;
+
+ ret = devm_gpio_request_one(&phy->ser_dev->dev, phy->common.gpio_en,
+ GPIOF_OUT_INIT_HIGH, "s3fwrn82_en");
+ if (ret < 0)
+ goto err_serdev;
+
+ ret = devm_gpio_request_one(&phy->ser_dev->dev,
+ phy->common.gpio_fw_wake,
+ GPIOF_OUT_INIT_LOW, "s3fwrn82_fw_wake");
+ if (ret < 0)
+ goto err_serdev;
+
+ ret = s3fwrn5_probe(&phy->common.ndev, phy, &phy->ser_dev->dev,
+ &uart_phy_ops);
+ if (ret < 0)
+ goto err_serdev;
+
+ return ret;
+
+err_serdev:
+ serdev_device_close(serdev);
+err_skb:
+ kfree_skb(phy->recv_skb);
+err_exit:
+ return ret;
+}
+
+static void s3fwrn82_uart_remove(struct serdev_device *serdev)
+{
+ struct s3fwrn82_uart_phy *phy = serdev_device_get_drvdata(serdev);
+
+ s3fwrn5_remove(phy->common.ndev);
+ serdev_device_close(serdev);
+ kfree_skb(phy->recv_skb);
+}
+
+static struct serdev_device_driver s3fwrn82_uart_driver = {
+ .probe = s3fwrn82_uart_probe,
+ .remove = s3fwrn82_uart_remove,
+ .driver = {
+ .name = "s3fwrn82_uart",
+ .of_match_table = s3fwrn82_uart_of_match,
+ },
+};
+
+module_serdev_device_driver(s3fwrn82_uart_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("UART driver for Samsung NFC");
+MODULE_AUTHOR("Bongsu Jeon <bongsu.jeon@samsung.com>");
diff --git a/drivers/ptp/ptp_idt82p33.c b/drivers/ptp/ptp_idt82p33.c
index 179f6c472e50..c1c959f7e52b 100644
--- a/drivers/ptp/ptp_idt82p33.c
+++ b/drivers/ptp/ptp_idt82p33.c
@@ -21,6 +21,7 @@ MODULE_DESCRIPTION("Driver for IDT 82p33xxx clock devices");
MODULE_AUTHOR("IDT support-1588 <IDT-support-1588@lm.renesas.com>");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FW_FILENAME);
/* Module Parameters */
static u32 sync_tod_timeout = SYNC_TOD_TIMEOUT_SEC;
@@ -77,11 +78,10 @@ static void idt82p33_timespec_to_byte_array(struct timespec64 const *ts,
}
}
-static int idt82p33_xfer(struct idt82p33 *idt82p33,
- unsigned char regaddr,
- unsigned char *buf,
- unsigned int count,
- int write)
+static int idt82p33_xfer_read(struct idt82p33 *idt82p33,
+ unsigned char regaddr,
+ unsigned char *buf,
+ unsigned int count)
{
struct i2c_client *client = idt82p33->client;
struct i2c_msg msg[2];
@@ -93,7 +93,7 @@ static int idt82p33_xfer(struct idt82p33 *idt82p33,
msg[0].buf = &regaddr;
msg[1].addr = client->addr;
- msg[1].flags = write ? 0 : I2C_M_RD;
+ msg[1].flags = I2C_M_RD;
msg[1].len = count;
msg[1].buf = buf;
@@ -109,6 +109,31 @@ static int idt82p33_xfer(struct idt82p33 *idt82p33,
return 0;
}
+static int idt82p33_xfer_write(struct idt82p33 *idt82p33,
+ u8 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ struct i2c_client *client = idt82p33->client;
+ /* we add 1 byte for device register */
+ u8 msg[IDT82P33_MAX_WRITE_COUNT + 1];
+ int err;
+
+ if (count > IDT82P33_MAX_WRITE_COUNT)
+ return -EINVAL;
+
+ msg[0] = regaddr;
+ memcpy(&msg[1], buf, count);
+
+ err = i2c_master_send(client, msg, count + 1);
+ if (err < 0) {
+ dev_err(&client->dev, "i2c_master_send returned %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
static int idt82p33_page_offset(struct idt82p33 *idt82p33, unsigned char val)
{
int err;
@@ -116,7 +141,7 @@ static int idt82p33_page_offset(struct idt82p33 *idt82p33, unsigned char val)
if (idt82p33->page_offset == val)
return 0;
- err = idt82p33_xfer(idt82p33, PAGE_ADDR, &val, sizeof(val), 1);
+ err = idt82p33_xfer_write(idt82p33, PAGE_ADDR, &val, sizeof(val));
if (err)
dev_err(&idt82p33->client->dev,
"failed to set page offset %d\n", val);
@@ -137,11 +162,12 @@ static int idt82p33_rdwr(struct idt82p33 *idt82p33, unsigned int regaddr,
err = idt82p33_page_offset(idt82p33, page);
if (err)
- goto out;
+ return err;
- err = idt82p33_xfer(idt82p33, offset, buf, count, write);
-out:
- return err;
+ if (write)
+ return idt82p33_xfer_write(idt82p33, offset, buf, count);
+
+ return idt82p33_xfer_read(idt82p33, offset, buf, count);
}
static int idt82p33_read(struct idt82p33 *idt82p33, unsigned int regaddr,
@@ -294,7 +320,6 @@ static int _idt82p33_adjfine(struct idt82p33_channel *channel, long scaled_ppm)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
unsigned char buf[5] = {0};
- int neg_adj = 0;
int err, i;
s64 fcw;
@@ -314,16 +339,9 @@ static int _idt82p33_adjfine(struct idt82p33_channel *channel, long scaled_ppm)
* FCW = -------------
* 168 * 2^4
*/
- if (scaled_ppm < 0) {
- neg_adj = 1;
- scaled_ppm = -scaled_ppm;
- }
fcw = scaled_ppm * 244140625ULL;
- fcw = div_u64(fcw, 2688);
-
- if (neg_adj)
- fcw = -fcw;
+ fcw = div_s64(fcw, 2688);
for (i = 0; i < 5; i++) {
buf[i] = fcw & 0xff;
@@ -448,8 +466,11 @@ static int idt82p33_measure_tod_write_overhead(struct idt82p33_channel *channel)
err = idt82p33_measure_settime_gettime_gap_overhead(channel, &gap_ns);
- if (err)
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n", __func__, err);
return err;
+ }
err = idt82p33_measure_one_byte_write_overhead(channel,
&one_byte_write_ns);
@@ -518,13 +539,10 @@ static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable)
u8 sync_cnfg;
int err;
- if (enable == channel->sync_tod_on) {
- if (enable && sync_tod_timeout) {
- mod_delayed_work(system_wq, &channel->sync_tod_work,
- sync_tod_timeout * HZ);
- }
- return 0;
- }
+ /* Turn it off after sync_tod_timeout seconds */
+ if (enable && sync_tod_timeout)
+ ptp_schedule_worker(channel->ptp_clock,
+ sync_tod_timeout * HZ);
err = idt82p33_read(idt82p33, channel->dpll_sync_cnfg,
&sync_cnfg, sizeof(sync_cnfg));
@@ -532,29 +550,17 @@ static int idt82p33_sync_tod(struct idt82p33_channel *channel, bool enable)
return err;
sync_cnfg &= ~SYNC_TOD;
-
if (enable)
sync_cnfg |= SYNC_TOD;
- err = idt82p33_write(idt82p33, channel->dpll_sync_cnfg,
- &sync_cnfg, sizeof(sync_cnfg));
- if (err)
- return err;
-
- channel->sync_tod_on = enable;
-
- if (enable && sync_tod_timeout) {
- mod_delayed_work(system_wq, &channel->sync_tod_work,
- sync_tod_timeout * HZ);
- }
-
- return 0;
+ return idt82p33_write(idt82p33, channel->dpll_sync_cnfg,
+ &sync_cnfg, sizeof(sync_cnfg));
}
-static void idt82p33_sync_tod_work_handler(struct work_struct *work)
+static long idt82p33_sync_tod_work_handler(struct ptp_clock_info *ptp)
{
struct idt82p33_channel *channel =
- container_of(work, struct idt82p33_channel, sync_tod_work.work);
+ container_of(ptp, struct idt82p33_channel, caps);
struct idt82p33 *idt82p33 = channel->idt82p33;
mutex_lock(&idt82p33->reg_lock);
@@ -562,35 +568,46 @@ static void idt82p33_sync_tod_work_handler(struct work_struct *work)
(void)idt82p33_sync_tod(channel, false);
mutex_unlock(&idt82p33->reg_lock);
+
+ /* Return a negative value here to not reschedule */
+ return -1;
}
-static int idt82p33_pps_enable(struct idt82p33_channel *channel, bool enable)
+static int idt82p33_output_enable(struct idt82p33_channel *channel,
+ bool enable, unsigned int outn)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
- u8 mask, outn, val;
int err;
+ u8 val;
+
+ err = idt82p33_read(idt82p33, OUT_MUX_CNFG(outn), &val, sizeof(val));
+ if (err)
+ return err;
+ if (enable)
+ val &= ~SQUELCH_ENABLE;
+ else
+ val |= SQUELCH_ENABLE;
+
+ return idt82p33_write(idt82p33, OUT_MUX_CNFG(outn), &val, sizeof(val));
+}
+
+static int idt82p33_output_mask_enable(struct idt82p33_channel *channel,
+ bool enable)
+{
+ u16 mask;
+ int err;
+ u8 outn;
mask = channel->output_mask;
outn = 0;
while (mask) {
if (mask & 0x1) {
- err = idt82p33_read(idt82p33, OUT_MUX_CNFG(outn),
- &val, sizeof(val));
- if (err)
- return err;
-
- if (enable)
- val &= ~SQUELCH_ENABLE;
- else
- val |= SQUELCH_ENABLE;
-
- err = idt82p33_write(idt82p33, OUT_MUX_CNFG(outn),
- &val, sizeof(val));
-
+ err = idt82p33_output_enable(channel, enable, outn);
if (err)
return err;
}
+
mask >>= 0x1;
outn++;
}
@@ -598,6 +615,20 @@ static int idt82p33_pps_enable(struct idt82p33_channel *channel, bool enable)
return 0;
}
+static int idt82p33_perout_enable(struct idt82p33_channel *channel,
+ bool enable,
+ struct ptp_perout_request *perout)
+{
+ unsigned int flags = perout->flags;
+
+ /* Enable/disable output based on output_mask */
+ if (flags == PEROUT_ENABLE_OUTPUT_MASK)
+ return idt82p33_output_mask_enable(channel, enable);
+
+ /* Enable/disable individual output instead */
+ return idt82p33_output_enable(channel, enable, perout->index);
+}
+
static int idt82p33_enable_tod(struct idt82p33_channel *channel)
{
struct idt82p33 *idt82p33 = channel->idt82p33;
@@ -611,15 +642,13 @@ static int idt82p33_enable_tod(struct idt82p33_channel *channel)
if (err)
return err;
- err = idt82p33_pps_enable(channel, false);
-
- if (err)
- return err;
-
err = idt82p33_measure_tod_write_overhead(channel);
- if (err)
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n", __func__, err);
return err;
+ }
err = _idt82p33_settime(channel, &ts);
@@ -638,10 +667,8 @@ static void idt82p33_ptp_clock_unregister_all(struct idt82p33 *idt82p33)
channel = &idt82p33->channel[i];
- if (channel->ptp_clock) {
+ if (channel->ptp_clock)
ptp_clock_unregister(channel->ptp_clock);
- cancel_delayed_work_sync(&channel->sync_tod_work);
- }
}
}
@@ -659,14 +686,15 @@ static int idt82p33_enable(struct ptp_clock_info *ptp,
if (rq->type == PTP_CLK_REQ_PEROUT) {
if (!on)
- err = idt82p33_pps_enable(channel, false);
-
+ err = idt82p33_perout_enable(channel, false,
+ &rq->perout);
/* Only accept a 1-PPS aligned to the second. */
else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
rq->perout.period.nsec) {
err = -ERANGE;
} else
- err = idt82p33_pps_enable(channel, true);
+ err = idt82p33_perout_enable(channel, true,
+ &rq->perout);
}
mutex_unlock(&idt82p33->reg_lock);
@@ -674,6 +702,48 @@ static int idt82p33_enable(struct ptp_clock_info *ptp,
return err;
}
+static int idt82p33_adjwritephase(struct ptp_clock_info *ptp, s32 offset_ns)
+{
+ struct idt82p33_channel *channel =
+ container_of(ptp, struct idt82p33_channel, caps);
+ struct idt82p33 *idt82p33 = channel->idt82p33;
+ s64 offset_regval, offset_fs;
+ u8 val[4] = {0};
+ int err;
+
+ offset_fs = (s64)(-offset_ns) * 1000000;
+
+ if (offset_fs > WRITE_PHASE_OFFSET_LIMIT)
+ offset_fs = WRITE_PHASE_OFFSET_LIMIT;
+ else if (offset_fs < -WRITE_PHASE_OFFSET_LIMIT)
+ offset_fs = -WRITE_PHASE_OFFSET_LIMIT;
+
+ /* Convert from phaseoffset_fs to register value */
+ offset_regval = div_s64(offset_fs * 1000, IDT_T0DPLL_PHASE_RESOL);
+
+ val[0] = offset_regval & 0xFF;
+ val[1] = (offset_regval >> 8) & 0xFF;
+ val[2] = (offset_regval >> 16) & 0xFF;
+ val[3] = (offset_regval >> 24) & 0x1F;
+ val[3] |= PH_OFFSET_EN;
+
+ mutex_lock(&idt82p33->reg_lock);
+
+ err = idt82p33_dpll_set_mode(channel, PLL_MODE_WPH);
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n", __func__, err);
+ goto out;
+ }
+
+ err = idt82p33_write(idt82p33, channel->dpll_phase_cnfg, val,
+ sizeof(val));
+
+out:
+ mutex_unlock(&idt82p33->reg_lock);
+ return err;
+}
+
static int idt82p33_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct idt82p33_channel *channel =
@@ -683,6 +753,9 @@ static int idt82p33_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
mutex_lock(&idt82p33->reg_lock);
err = _idt82p33_adjfine(channel, scaled_ppm);
+ if (err)
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n", __func__, err);
mutex_unlock(&idt82p33->reg_lock);
return err;
@@ -706,10 +779,15 @@ static int idt82p33_adjtime(struct ptp_clock_info *ptp, s64 delta_ns)
if (err) {
mutex_unlock(&idt82p33->reg_lock);
+ dev_err(&idt82p33->client->dev,
+ "Adjtime failed in %s with err %d!\n", __func__, err);
return err;
}
err = idt82p33_sync_tod(channel, true);
+ if (err)
+ dev_err(&idt82p33->client->dev,
+ "Sync_tod failed in %s with err %d!\n", __func__, err);
mutex_unlock(&idt82p33->reg_lock);
@@ -725,6 +803,9 @@ static int idt82p33_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
mutex_lock(&idt82p33->reg_lock);
err = _idt82p33_gettime(channel, ts);
+ if (err)
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n", __func__, err);
mutex_unlock(&idt82p33->reg_lock);
return err;
@@ -740,6 +821,9 @@ static int idt82p33_settime(struct ptp_clock_info *ptp,
mutex_lock(&idt82p33->reg_lock);
err = _idt82p33_settime(channel, ts);
+ if (err)
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n", __func__, err);
mutex_unlock(&idt82p33->reg_lock);
return err;
@@ -772,9 +856,6 @@ static int idt82p33_channel_init(struct idt82p33_channel *channel, int index)
return -EINVAL;
}
- INIT_DELAYED_WORK(&channel->sync_tod_work,
- idt82p33_sync_tod_work_handler);
- channel->sync_tod_on = false;
channel->current_freq_ppb = 0;
return 0;
@@ -784,11 +865,14 @@ static void idt82p33_caps_init(struct ptp_clock_info *caps)
{
caps->owner = THIS_MODULE;
caps->max_adj = 92000;
+ caps->n_per_out = 11;
+ caps->adjphase = idt82p33_adjwritephase;
caps->adjfine = idt82p33_adjfine;
caps->adjtime = idt82p33_adjtime;
caps->gettime64 = idt82p33_gettime;
caps->settime64 = idt82p33_settime;
caps->enable = idt82p33_enable;
+ caps->do_aux_work = idt82p33_sync_tod_work_handler;
}
static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
@@ -802,23 +886,18 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
channel = &idt82p33->channel[index];
err = idt82p33_channel_init(channel, index);
- if (err)
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Channel_init failed in %s with err %d!\n",
+ __func__, err);
return err;
+ }
channel->idt82p33 = idt82p33;
idt82p33_caps_init(&channel->caps);
snprintf(channel->caps.name, sizeof(channel->caps.name),
"IDT 82P33 PLL%u", index);
- channel->caps.n_per_out = hweight8(channel->output_mask);
-
- err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO);
- if (err)
- return err;
-
- err = idt82p33_enable_tod(channel);
- if (err)
- return err;
channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
@@ -831,6 +910,22 @@ static int idt82p33_enable_channel(struct idt82p33 *idt82p33, u32 index)
if (!channel->ptp_clock)
return -ENOTSUPP;
+ err = idt82p33_dpll_set_mode(channel, PLL_MODE_DCO);
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Dpll_set_mode failed in %s with err %d!\n",
+ __func__, err);
+ return err;
+ }
+
+ err = idt82p33_enable_tod(channel);
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Enable_tod failed in %s with err %d!\n",
+ __func__, err);
+ return err;
+ }
+
dev_info(&idt82p33->client->dev, "PLL%d registered as ptp%d\n",
index, channel->ptp_clock->index);
@@ -850,8 +945,11 @@ static int idt82p33_load_firmware(struct idt82p33 *idt82p33)
err = request_firmware(&fw, FW_FILENAME, &idt82p33->client->dev);
- if (err)
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n", __func__, err);
return err;
+ }
dev_dbg(&idt82p33->client->dev, "firmware size %zu bytes\n", fw->size);
@@ -935,8 +1033,12 @@ static int idt82p33_probe(struct i2c_client *client,
for (i = 0; i < MAX_PHC_PLL; i++) {
if (idt82p33->pll_mask & (1 << i)) {
err = idt82p33_enable_channel(idt82p33, i);
- if (err)
+ if (err) {
+ dev_err(&idt82p33->client->dev,
+ "Failed in %s with err %d!\n",
+ __func__, err);
break;
+ }
}
}
} else {
diff --git a/drivers/ptp/ptp_idt82p33.h b/drivers/ptp/ptp_idt82p33.h
index 9d46966d25f1..1c7a0f0872e8 100644
--- a/drivers/ptp/ptp_idt82p33.h
+++ b/drivers/ptp/ptp_idt82p33.h
@@ -56,6 +56,8 @@
#define PLL_MODE_SHIFT (0)
#define PLL_MODE_MASK (0x1F)
+#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef)
+
enum pll_mode {
PLL_MODE_MIN = 0,
PLL_MODE_AUTOMATIC = PLL_MODE_MIN,
@@ -93,6 +95,7 @@ enum hw_tod_trig_sel {
#define MAX_MEASURMENT_COUNT (5)
#define SNAP_THRESHOLD_NS (150000)
#define SYNC_TOD_TIMEOUT_SEC (5)
+#define IDT82P33_MAX_WRITE_COUNT (512)
#define PLLMASK_ADDR_HI 0xFF
#define PLLMASK_ADDR_LO 0xA5
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
index 4700ffbdfced..6c7c2843ba0b 100644
--- a/drivers/ptp/ptp_ines.c
+++ b/drivers/ptp/ptp_ines.c
@@ -108,11 +108,6 @@ MODULE_LICENSE("GPL");
#define MESSAGE_TYPE_P_DELAY_RESP 3
#define MESSAGE_TYPE_DELAY_REQ 4
-#define SYNC 0x0
-#define DELAY_REQ 0x1
-#define PDELAY_REQ 0x2
-#define PDELAY_RESP 0x3
-
static LIST_HEAD(ines_clocks);
static DEFINE_MUTEX(ines_clocks_lock);
@@ -683,9 +678,9 @@ static bool is_sync_pdelay_resp(struct sk_buff *skb, int type)
msgtype = ptp_get_msgtype(hdr, type);
- switch ((msgtype & 0xf)) {
- case SYNC:
- case PDELAY_RESP:
+ switch (msgtype) {
+ case PTP_MSGTYPE_SYNC:
+ case PTP_MSGTYPE_PDELAY_RESP:
return true;
default:
return false;
@@ -696,13 +691,13 @@ static u8 tag_to_msgtype(u8 tag)
{
switch (tag) {
case MESSAGE_TYPE_SYNC:
- return SYNC;
+ return PTP_MSGTYPE_SYNC;
case MESSAGE_TYPE_P_DELAY_REQ:
- return PDELAY_REQ;
+ return PTP_MSGTYPE_PDELAY_REQ;
case MESSAGE_TYPE_P_DELAY_RESP:
- return PDELAY_RESP;
+ return PTP_MSGTYPE_PDELAY_RESP;
case MESSAGE_TYPE_DELAY_REQ:
- return DELAY_REQ;
+ return PTP_MSGTYPE_DELAY_REQ;
}
return 0xf;
}
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 661d2a49bce9..b341075397d9 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1303,12 +1303,10 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
/* p_header points to the last one we handled */
if (p_header)
p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/
- header = kzalloc(TH_HEADER_LENGTH, gfp_type());
- if (!header) {
- spin_unlock(&ch->collect_lock);
- fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
- goto done;
- }
+
+ header = skb_push(ch->trans_skb, TH_HEADER_LENGTH);
+ memset(header, 0, TH_HEADER_LENGTH);
+
header->th_ch_flag = TH_HAS_PDU; /* Normal data */
ch->th_seq_num++;
header->th_seq_num = ch->th_seq_num;
@@ -1316,11 +1314,6 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" ,
__func__, ch->th_seq_num);
- memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
- TH_HEADER_LENGTH); /* put the TH on the packet */
-
- kfree(header);
-
CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
__func__, ch->trans_skb->len);
CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb "
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index d06809eac16d..fd705429708e 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -623,25 +623,10 @@ static void ctcmpc_send_sweep_req(struct channel *rch)
goto nomem;
}
- header = kmalloc(TH_SWEEP_LENGTH, gfp_type());
-
- if (!header) {
- dev_kfree_skb_any(sweep_skb);
- /* rc = -ENOMEM; */
- goto nomem;
- }
-
- header->th.th_seg = 0x00 ;
+ header = skb_put_zero(sweep_skb, TH_SWEEP_LENGTH);
header->th.th_ch_flag = TH_SWEEP_REQ; /* 0x0f */
- header->th.th_blk_flag = 0x00;
- header->th.th_is_xid = 0x00;
- header->th.th_seq_num = 0x00;
header->sw.th_last_seq = ch->th_seq_num;
- skb_put_data(sweep_skb, header, TH_SWEEP_LENGTH);
-
- kfree(header);
-
netif_trans_update(dev);
skb_queue_tail(&ch->sweep_queue, sweep_skb);
@@ -680,24 +665,16 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
spin_lock_irqsave(&ch->collect_lock, saveflags);
refcount_inc(&skb->users);
- p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
-
- if (!p_header) {
- spin_unlock_irqrestore(&ch->collect_lock, saveflags);
- goto nomem_exit;
- }
- p_header->pdu_offset = skb->len;
+ p_header = skb_push(skb, PDU_HEADER_LENGTH);
+ p_header->pdu_offset = skb->len - PDU_HEADER_LENGTH;
p_header->pdu_proto = 0x01;
- p_header->pdu_flag = 0x00;
if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) {
- p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
+ p_header->pdu_flag = PDU_FIRST | PDU_CNTL;
} else {
- p_header->pdu_flag |= PDU_FIRST;
+ p_header->pdu_flag = PDU_FIRST;
}
p_header->pdu_seq = 0;
- memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header,
- PDU_HEADER_LENGTH);
CTCM_PR_DEBUG("%s(%s): Put on collect_q - skb len: %04x \n"
"pdu header and data for up to 32 bytes:\n",
@@ -706,7 +683,6 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
skb_queue_tail(&ch->collect_queue, skb);
ch->collect_len += skb->len;
- kfree(p_header);
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
goto done;
@@ -736,23 +712,15 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
}
}
- p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
-
- if (!p_header)
- goto nomem_exit;
-
- p_header->pdu_offset = skb->len;
+ p_header = skb_push(skb, PDU_HEADER_LENGTH);
+ p_header->pdu_offset = skb->len - PDU_HEADER_LENGTH;
p_header->pdu_proto = 0x01;
- p_header->pdu_flag = 0x00;
p_header->pdu_seq = 0;
if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) {
- p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
+ p_header->pdu_flag = PDU_FIRST | PDU_CNTL;
} else {
- p_header->pdu_flag |= PDU_FIRST;
+ p_header->pdu_flag = PDU_FIRST;
}
- memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header, PDU_HEADER_LENGTH);
-
- kfree(p_header);
if (ch->collect_len > 0) {
spin_lock_irqsave(&ch->collect_lock, saveflags);
@@ -768,25 +736,17 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
ch->prof.txlen += skb->len - PDU_HEADER_LENGTH;
- header = kmalloc(TH_HEADER_LENGTH, gfp_type());
- if (!header)
- goto nomem_exit;
+ /* put the TH on the packet */
+ header = skb_push(skb, TH_HEADER_LENGTH);
+ memset(header, 0, TH_HEADER_LENGTH);
- header->th_seg = 0x00;
header->th_ch_flag = TH_HAS_PDU; /* Normal data */
- header->th_blk_flag = 0x00;
- header->th_is_xid = 0x00; /* Just data here */
ch->th_seq_num++;
header->th_seq_num = ch->th_seq_num;
CTCM_PR_DBGDATA("%s(%s) ToVTAM_th_seq= %08x\n" ,
__func__, dev->name, ch->th_seq_num);
- /* put the TH on the packet */
- memcpy(skb_push(skb, TH_HEADER_LENGTH), header, TH_HEADER_LENGTH);
-
- kfree(header);
-
CTCM_PR_DBGDATA("%s(%s): skb len: %04x\n - pdu header and data for "
"up to 32 bytes sent to vtam:\n",
__func__, dev->name, skb->len);
@@ -943,7 +903,7 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
- newskb = __dev_alloc_skb(len, gfp_type() | GFP_DMA);
+ newskb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
if (!newskb) {
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
@@ -1361,7 +1321,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
ch->protocol = priv->protocol;
if (IS_MPC(priv)) {
- ch->discontact_th = kzalloc(TH_HEADER_LENGTH, gfp_type());
+ ch->discontact_th = kzalloc(TH_HEADER_LENGTH, GFP_KERNEL);
if (ch->discontact_th == NULL)
goto nomem_return;
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index 16bdf23ee02b..90bd7b3f80c3 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -298,11 +298,6 @@ struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
/* test if struct ctcm_priv of struct net_device has MPC protocol setting */
#define IS_MPCDEV(dev) IS_MPC((struct ctcm_priv *)dev->ml_priv)
-static inline gfp_t gfp_type(void)
-{
- return in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
-}
-
/*
* Definition of our link level header.
*/
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 85a1a4533cbe..19ee91acb89d 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -655,24 +655,10 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
goto done;
}
- header = kmalloc(sizeof(struct th_sweep), gfp_type());
-
- if (!header) {
- dev_kfree_skb_any(sweep_skb);
- goto done;
- }
-
- header->th.th_seg = 0x00 ;
+ header = skb_put_zero(sweep_skb, TH_SWEEP_LENGTH);
header->th.th_ch_flag = TH_SWEEP_RESP;
- header->th.th_blk_flag = 0x00;
- header->th.th_is_xid = 0x00;
- header->th.th_seq_num = 0x00;
header->sw.th_last_seq = ch->th_seq_num;
- skb_put_data(sweep_skb, header, TH_SWEEP_LENGTH);
-
- kfree(header);
-
netif_trans_update(dev);
skb_queue_tail(&ch->sweep_queue, sweep_skb);
@@ -1177,7 +1163,7 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
skb_pull(pskb, new_len); /* point to next PDU */
}
} else {
- mpcginfo = kmalloc(sizeof(struct mpcg_info), gfp_type());
+ mpcginfo = kmalloc(sizeof(struct mpcg_info), GFP_ATOMIC);
if (mpcginfo == NULL)
goto done;
@@ -2062,7 +2048,6 @@ static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
*/
static int mpc_send_qllc_discontact(struct net_device *dev)
{
- __u32 new_len = 0;
struct sk_buff *skb;
struct qllc *qllcptr;
struct ctcm_priv *priv = dev->ml_priv;
@@ -2093,31 +2078,19 @@ static int mpc_send_qllc_discontact(struct net_device *dev)
case MPCG_STATE_FLOWC:
case MPCG_STATE_READY:
grp->send_qllc_disc = 2;
- new_len = sizeof(struct qllc);
- qllcptr = kzalloc(new_len, gfp_type() | GFP_DMA);
- if (qllcptr == NULL) {
- CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
- "%s(%s): qllcptr allocation error",
- CTCM_FUNTAIL, dev->name);
- return -ENOMEM;
- }
-
- qllcptr->qllc_address = 0xcc;
- qllcptr->qllc_commands = 0x03;
-
- skb = __dev_alloc_skb(new_len, GFP_ATOMIC);
+ skb = __dev_alloc_skb(sizeof(struct qllc), GFP_ATOMIC);
if (skb == NULL) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): skb allocation error",
CTCM_FUNTAIL, dev->name);
priv->stats.rx_dropped++;
- kfree(qllcptr);
return -ENOMEM;
}
- skb_put_data(skb, qllcptr, new_len);
- kfree(qllcptr);
+ qllcptr = skb_put(skb, sizeof(struct qllc));
+ qllcptr->qllc_address = 0xcc;
+ qllcptr->qllc_commands = 0x03;
if (skb_headroom(skb) < 4) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index b235393e091c..0e9af2fbaa76 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -704,6 +704,19 @@ enum qeth_pnso_mode {
QETH_PNSO_ADDR_INFO,
};
+enum qeth_link_mode {
+ QETH_LINK_MODE_UNKNOWN,
+ QETH_LINK_MODE_FIBRE_SHORT,
+ QETH_LINK_MODE_FIBRE_LONG,
+};
+
+struct qeth_link_info {
+ u32 speed;
+ u8 duplex;
+ u8 port;
+ enum qeth_link_mode link_mode;
+};
+
#define QETH_BROADCAST_WITH_ECHO 0x01
#define QETH_BROADCAST_WITHOUT_ECHO 0x02
struct qeth_card_info {
@@ -735,6 +748,7 @@ struct qeth_card_info {
struct qeth_card_blkt blkt;
__u32 diagass_support;
__u32 hwtrap;
+ struct qeth_link_info link_info;
};
enum qeth_discipline_id {
@@ -799,12 +813,6 @@ struct qeth_rx {
u8 bufs_refill;
};
-struct carrier_info {
- __u8 card_type;
- __u16 port_mode;
- __u32 port_speed;
-};
-
struct qeth_switch_info {
__u32 capabilities;
__u32 settings;
@@ -1111,7 +1119,7 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
int qeth_query_card_info(struct qeth_card *card,
- struct carrier_info *carrier_info);
+ struct qeth_link_info *link_info);
int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
enum qeth_ipa_isolation_modes mode);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e27319de7b00..319190824cd2 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4871,8 +4871,8 @@ out_free:
static int qeth_query_card_info_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
- struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
+ struct qeth_link_info *link_info = reply->param;
struct qeth_query_card_info *card_info;
QETH_CARD_TEXT(card, 2, "qcrdincb");
@@ -4880,14 +4880,67 @@ static int qeth_query_card_info_cb(struct qeth_card *card,
return -EIO;
card_info = &cmd->data.setadapterparms.data.card_info;
- carrier_info->card_type = card_info->card_type;
- carrier_info->port_mode = card_info->port_mode;
- carrier_info->port_speed = card_info->port_speed;
+ netdev_dbg(card->dev,
+ "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
+ card_info->card_type, card_info->port_mode,
+ card_info->port_speed);
+
+ switch (card_info->port_mode) {
+ case CARD_INFO_PORTM_FULLDUPLEX:
+ link_info->duplex = DUPLEX_FULL;
+ break;
+ case CARD_INFO_PORTM_HALFDUPLEX:
+ link_info->duplex = DUPLEX_HALF;
+ break;
+ default:
+ link_info->duplex = DUPLEX_UNKNOWN;
+ }
+
+ switch (card_info->card_type) {
+ case CARD_INFO_TYPE_1G_COPPER_A:
+ case CARD_INFO_TYPE_1G_COPPER_B:
+ link_info->speed = SPEED_1000;
+ link_info->port = PORT_TP;
+ break;
+ case CARD_INFO_TYPE_1G_FIBRE_A:
+ case CARD_INFO_TYPE_1G_FIBRE_B:
+ link_info->speed = SPEED_1000;
+ link_info->port = PORT_FIBRE;
+ break;
+ case CARD_INFO_TYPE_10G_FIBRE_A:
+ case CARD_INFO_TYPE_10G_FIBRE_B:
+ link_info->speed = SPEED_10000;
+ link_info->port = PORT_FIBRE;
+ break;
+ default:
+ switch (card_info->port_speed) {
+ case CARD_INFO_PORTS_10M:
+ link_info->speed = SPEED_10;
+ break;
+ case CARD_INFO_PORTS_100M:
+ link_info->speed = SPEED_100;
+ break;
+ case CARD_INFO_PORTS_1G:
+ link_info->speed = SPEED_1000;
+ break;
+ case CARD_INFO_PORTS_10G:
+ link_info->speed = SPEED_10000;
+ break;
+ case CARD_INFO_PORTS_25G:
+ link_info->speed = SPEED_25000;
+ break;
+ default:
+ link_info->speed = SPEED_UNKNOWN;
+ }
+
+ link_info->port = PORT_OTHER;
+ }
+
return 0;
}
int qeth_query_card_info(struct qeth_card *card,
- struct carrier_info *carrier_info)
+ struct qeth_link_info *link_info)
{
struct qeth_cmd_buffer *iob;
@@ -4897,8 +4950,162 @@ int qeth_query_card_info(struct qeth_card *card,
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
if (!iob)
return -ENOMEM;
- return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
- (void *)carrier_info);
+
+ return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
+}
+
+static int qeth_init_link_info_oat_cb(struct qeth_card *card,
+ struct qeth_reply *reply_priv,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
+ struct qeth_link_info *link_info = reply_priv->param;
+ struct qeth_query_oat_physical_if *phys_if;
+ struct qeth_query_oat_reply *reply;
+
+ if (qeth_setadpparms_inspect_rc(cmd))
+ return -EIO;
+
+ /* Multi-part reply is unexpected, don't bother: */
+ if (cmd->data.setadapterparms.hdr.used_total > 1)
+ return -EINVAL;
+
+ /* Expect the reply to start with phys_if data: */
+ reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
+ if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
+ reply->length < sizeof(*reply))
+ return -EINVAL;
+
+ phys_if = &reply->phys_if;
+
+ switch (phys_if->speed_duplex) {
+ case QETH_QOAT_PHYS_SPEED_10M_HALF:
+ link_info->speed = SPEED_10;
+ link_info->duplex = DUPLEX_HALF;
+ break;
+ case QETH_QOAT_PHYS_SPEED_10M_FULL:
+ link_info->speed = SPEED_10;
+ link_info->duplex = DUPLEX_FULL;
+ break;
+ case QETH_QOAT_PHYS_SPEED_100M_HALF:
+ link_info->speed = SPEED_100;
+ link_info->duplex = DUPLEX_HALF;
+ break;
+ case QETH_QOAT_PHYS_SPEED_100M_FULL:
+ link_info->speed = SPEED_100;
+ link_info->duplex = DUPLEX_FULL;
+ break;
+ case QETH_QOAT_PHYS_SPEED_1000M_HALF:
+ link_info->speed = SPEED_1000;
+ link_info->duplex = DUPLEX_HALF;
+ break;
+ case QETH_QOAT_PHYS_SPEED_1000M_FULL:
+ link_info->speed = SPEED_1000;
+ link_info->duplex = DUPLEX_FULL;
+ break;
+ case QETH_QOAT_PHYS_SPEED_10G_FULL:
+ link_info->speed = SPEED_10000;
+ link_info->duplex = DUPLEX_FULL;
+ break;
+ case QETH_QOAT_PHYS_SPEED_25G_FULL:
+ link_info->speed = SPEED_25000;
+ link_info->duplex = DUPLEX_FULL;
+ break;
+ case QETH_QOAT_PHYS_SPEED_UNKNOWN:
+ default:
+ link_info->speed = SPEED_UNKNOWN;
+ link_info->duplex = DUPLEX_UNKNOWN;
+ break;
+ }
+
+ switch (phys_if->media_type) {
+ case QETH_QOAT_PHYS_MEDIA_COPPER:
+ link_info->port = PORT_TP;
+ link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
+ break;
+ case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
+ link_info->port = PORT_FIBRE;
+ link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
+ break;
+ case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
+ link_info->port = PORT_FIBRE;
+ link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
+ break;
+ default:
+ link_info->port = PORT_OTHER;
+ link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static void qeth_init_link_info(struct qeth_card *card)
+{
+ card->info.link_info.duplex = DUPLEX_FULL;
+
+ if (IS_IQD(card) || IS_VM_NIC(card)) {
+ card->info.link_info.speed = SPEED_10000;
+ card->info.link_info.port = PORT_FIBRE;
+ card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
+ } else {
+ switch (card->info.link_type) {
+ case QETH_LINK_TYPE_FAST_ETH:
+ case QETH_LINK_TYPE_LANE_ETH100:
+ card->info.link_info.speed = SPEED_100;
+ card->info.link_info.port = PORT_TP;
+ break;
+ case QETH_LINK_TYPE_GBIT_ETH:
+ case QETH_LINK_TYPE_LANE_ETH1000:
+ card->info.link_info.speed = SPEED_1000;
+ card->info.link_info.port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_10GBIT_ETH:
+ card->info.link_info.speed = SPEED_10000;
+ card->info.link_info.port = PORT_FIBRE;
+ break;
+ case QETH_LINK_TYPE_25GBIT_ETH:
+ card->info.link_info.speed = SPEED_25000;
+ card->info.link_info.port = PORT_FIBRE;
+ break;
+ default:
+ dev_info(&card->gdev->dev, "Unknown link type %x\n",
+ card->info.link_type);
+ card->info.link_info.speed = SPEED_UNKNOWN;
+ card->info.link_info.port = PORT_OTHER;
+ }
+
+ card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
+ }
+
+ /* Get more accurate data via QUERY OAT: */
+ if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
+ struct qeth_link_info link_info;
+ struct qeth_cmd_buffer *iob;
+
+ iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
+ SETADP_DATA_SIZEOF(query_oat));
+ if (iob) {
+ struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
+ struct qeth_query_oat *oat_req;
+
+ oat_req = &cmd->data.setadapterparms.data.query_oat;
+ oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;
+
+ if (!qeth_send_ipa_cmd(card, iob,
+ qeth_init_link_info_oat_cb,
+ &link_info)) {
+ if (link_info.speed != SPEED_UNKNOWN)
+ card->info.link_info.speed = link_info.speed;
+ if (link_info.duplex != DUPLEX_UNKNOWN)
+ card->info.link_info.duplex = link_info.duplex;
+ if (link_info.port != PORT_OTHER)
+ card->info.link_info.port = link_info.port;
+ if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
+ card->info.link_info.link_mode = link_info.link_mode;
+ }
+ }
+ }
}
/**
@@ -5285,6 +5492,8 @@ retriable:
goto out;
}
+ qeth_init_link_info(card);
+
rc = qeth_init_qdio_queues(card);
if (rc) {
QETH_CARD_TEXT_(card, 2, "9err%d", rc);
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 6541bab96822..e4bde7daf083 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -489,9 +489,45 @@ struct qeth_set_access_ctrl {
__u8 reserved[8];
} __attribute__((packed));
+#define QETH_QOAT_PHYS_SPEED_UNKNOWN 0x00
+#define QETH_QOAT_PHYS_SPEED_10M_HALF 0x01
+#define QETH_QOAT_PHYS_SPEED_10M_FULL 0x02
+#define QETH_QOAT_PHYS_SPEED_100M_HALF 0x03
+#define QETH_QOAT_PHYS_SPEED_100M_FULL 0x04
+#define QETH_QOAT_PHYS_SPEED_1000M_HALF 0x05
+#define QETH_QOAT_PHYS_SPEED_1000M_FULL 0x06
+// n/a 0x07
+#define QETH_QOAT_PHYS_SPEED_10G_FULL 0x08
+// n/a 0x09
+#define QETH_QOAT_PHYS_SPEED_25G_FULL 0x0A
+
+#define QETH_QOAT_PHYS_MEDIA_COPPER 0x01
+#define QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT 0x02
+#define QETH_QOAT_PHYS_MEDIA_FIBRE_LONG 0x04
+
+struct qeth_query_oat_physical_if {
+ u8 res_head[33];
+ u8 speed_duplex;
+ u8 media_type;
+ u8 res_tail[29];
+};
+
+#define QETH_QOAT_REPLY_TYPE_PHYS_IF 0x0004
+
+struct qeth_query_oat_reply {
+ u16 type;
+ u16 length;
+ u16 version;
+ u8 res[10];
+ struct qeth_query_oat_physical_if phys_if;
+};
+
+#define QETH_QOAT_SCOPE_INTERFACE 0x00000001
+
struct qeth_query_oat {
- __u32 subcmd_code;
- __u8 reserved[12];
+ u32 subcmd_code;
+ u8 reserved[12];
+ struct qeth_query_oat_reply reply[];
} __packed;
struct qeth_qoat_priv {
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index b5caa723326e..3a51bbff0ffe 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -324,8 +324,8 @@ static int qeth_set_per_queue_coalesce(struct net_device *dev, u32 queue,
/* Autoneg and full-duplex are supported and advertised unconditionally. */
/* Always advertise and support all speeds up to specified, and only one */
/* specified port type. */
-static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
- int maxspeed, int porttype)
+static void qeth_set_ethtool_link_modes(struct ethtool_link_ksettings *cmd,
+ enum qeth_link_mode link_mode)
{
ethtool_link_ksettings_zero_link_mode(cmd, supported);
ethtool_link_ksettings_zero_link_mode(cmd, advertising);
@@ -334,186 +334,119 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
- switch (porttype) {
+ switch (cmd->base.port) {
case PORT_TP:
ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+
+ switch (cmd->base.speed) {
+ case SPEED_10000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10000baseT_Full);
+ fallthrough;
+ case SPEED_1000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 1000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 1000baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 1000baseT_Half);
+ fallthrough;
+ case SPEED_100:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 100baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 100baseT_Half);
+ fallthrough;
+ case SPEED_10:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10baseT_Half);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10baseT_Half);
+ break;
+ default:
+ break;
+ }
+
break;
case PORT_FIBRE:
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
- break;
- default:
- ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
- WARN_ON_ONCE(1);
- }
- /* partially does fall through, to also select lower speeds */
- switch (maxspeed) {
- case SPEED_25000:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 25000baseSR_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 25000baseSR_Full);
- break;
- case SPEED_10000:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10000baseT_Full);
- fallthrough;
- case SPEED_1000:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 1000baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 1000baseT_Half);
- fallthrough;
- case SPEED_100:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 100baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 100baseT_Half);
- fallthrough;
- case SPEED_10:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Half);
+ switch (cmd->base.speed) {
+ case SPEED_25000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 25000baseSR_Full);
+ break;
+ case SPEED_10000:
+ if (link_mode == QETH_LINK_MODE_FIBRE_LONG) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10000baseLR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10000baseLR_Full);
+ } else if (link_mode == QETH_LINK_MODE_FIBRE_SHORT) {
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 10000baseSR_Full);
+ }
+ break;
+ case SPEED_1000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 1000baseX_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 1000baseX_Full);
+ break;
+ default:
+ break;
+ }
+
break;
default:
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported,
- 10baseT_Half);
- ethtool_link_ksettings_add_link_mode(cmd, advertising,
- 10baseT_Half);
- WARN_ON_ONCE(1);
+ break;
}
}
-
static int qeth_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct qeth_card *card = netdev->ml_priv;
- enum qeth_link_types link_type;
- struct carrier_info carrier_info;
- int rc;
+ struct qeth_link_info link_info;
- if (IS_IQD(card) || IS_VM_NIC(card))
- link_type = QETH_LINK_TYPE_10GBIT_ETH;
- else
- link_type = card->info.link_type;
-
- cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.speed = card->info.link_info.speed;
+ cmd->base.duplex = card->info.link_info.duplex;
+ cmd->base.port = card->info.link_info.port;
cmd->base.autoneg = AUTONEG_ENABLE;
cmd->base.phy_address = 0;
cmd->base.mdio_support = 0;
cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
- switch (link_type) {
- case QETH_LINK_TYPE_FAST_ETH:
- case QETH_LINK_TYPE_LANE_ETH100:
- cmd->base.speed = SPEED_100;
- cmd->base.port = PORT_TP;
- break;
- case QETH_LINK_TYPE_GBIT_ETH:
- case QETH_LINK_TYPE_LANE_ETH1000:
- cmd->base.speed = SPEED_1000;
- cmd->base.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_10GBIT_ETH:
- cmd->base.speed = SPEED_10000;
- cmd->base.port = PORT_FIBRE;
- break;
- case QETH_LINK_TYPE_25GBIT_ETH:
- cmd->base.speed = SPEED_25000;
- cmd->base.port = PORT_FIBRE;
- break;
- default:
- cmd->base.speed = SPEED_10;
- cmd->base.port = PORT_TP;
- }
- qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port);
-
/* Check if we can obtain more accurate information. */
- /* If QUERY_CARD_INFO command is not supported or fails, */
- /* just return the heuristics that was filled above. */
- rc = qeth_query_card_info(card, &carrier_info);
- if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */
- return 0;
- if (rc) /* report error from the hardware operation */
- return rc;
- /* on success, fill in the information got from the hardware */
-
- netdev_dbg(netdev,
- "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
- carrier_info.card_type,
- carrier_info.port_mode,
- carrier_info.port_speed);
-
- /* Update attributes for which we've obtained more authoritative */
- /* information, leave the rest the way they where filled above. */
- switch (carrier_info.card_type) {
- case CARD_INFO_TYPE_1G_COPPER_A:
- case CARD_INFO_TYPE_1G_COPPER_B:
- cmd->base.port = PORT_TP;
- qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
- break;
- case CARD_INFO_TYPE_1G_FIBRE_A:
- case CARD_INFO_TYPE_1G_FIBRE_B:
- cmd->base.port = PORT_FIBRE;
- qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port);
- break;
- case CARD_INFO_TYPE_10G_FIBRE_A:
- case CARD_INFO_TYPE_10G_FIBRE_B:
- cmd->base.port = PORT_FIBRE;
- qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port);
- break;
- }
-
- switch (carrier_info.port_mode) {
- case CARD_INFO_PORTM_FULLDUPLEX:
- cmd->base.duplex = DUPLEX_FULL;
- break;
- case CARD_INFO_PORTM_HALFDUPLEX:
- cmd->base.duplex = DUPLEX_HALF;
- break;
+ if (!qeth_query_card_info(card, &link_info)) {
+ if (link_info.speed != SPEED_UNKNOWN)
+ cmd->base.speed = link_info.speed;
+ if (link_info.duplex != DUPLEX_UNKNOWN)
+ cmd->base.duplex = link_info.duplex;
+ if (link_info.port != PORT_OTHER)
+ cmd->base.port = link_info.port;
}
- switch (carrier_info.port_speed) {
- case CARD_INFO_PORTS_10M:
- cmd->base.speed = SPEED_10;
- break;
- case CARD_INFO_PORTS_100M:
- cmd->base.speed = SPEED_100;
- break;
- case CARD_INFO_PORTS_1G:
- cmd->base.speed = SPEED_1000;
- break;
- case CARD_INFO_PORTS_10G:
- cmd->base.speed = SPEED_10000;
- break;
- case CARD_INFO_PORTS_25G:
- cmd->base.speed = SPEED_25000;
- break;
- }
+ qeth_set_ethtool_link_modes(cmd, card->info.link_info.link_mode);
return 0;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 79939ba5d523..393aef681e44 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -737,8 +737,6 @@ static void qeth_l2_dev2br_an_set_cb(void *priv,
*
* On enable, emits a series of address notifications for all
* currently registered hosts.
- *
- * Must be called under rtnl_lock
*/
static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable)
{
@@ -1276,16 +1274,19 @@ static void qeth_l2_dev2br_worker(struct work_struct *work)
if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
goto free;
- /* Potential re-config in progress, try again later: */
- if (!rtnl_trylock()) {
- queue_delayed_work(card->event_wq, dwork,
- msecs_to_jiffies(100));
- return;
- }
- if (!netif_device_present(card->dev))
- goto out_unlock;
-
if (data->ac_event.lost_event_mask) {
+ /* Potential re-config in progress, try again later: */
+ if (!rtnl_trylock()) {
+ queue_delayed_work(card->event_wq, dwork,
+ msecs_to_jiffies(100));
+ return;
+ }
+
+ if (!netif_device_present(card->dev)) {
+ rtnl_unlock();
+ goto free;
+ }
+
QETH_DBF_MESSAGE(3,
"Address change notification overflow on device %x\n",
CARD_DEVID(card));
@@ -1315,6 +1316,8 @@ static void qeth_l2_dev2br_worker(struct work_struct *work)
"Address Notification resynced on device %x\n",
CARD_DEVID(card));
}
+
+ rtnl_unlock();
} else {
for (i = 0; i < data->ac_event.num_entries; i++) {
struct qeth_ipacmd_addr_change_entry *entry =
@@ -1326,9 +1329,6 @@ static void qeth_l2_dev2br_worker(struct work_struct *work)
}
}
-out_unlock:
- rtnl_unlock();
-
free:
kfree(data);
}
@@ -2296,11 +2296,8 @@ static void qeth_l2_set_offline(struct qeth_card *card)
card->state = CARD_STATE_DOWN;
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
- if (priv->brport_features & BR_LEARNING_SYNC) {
- rtnl_lock();
+ if (priv->brport_features & BR_LEARNING_SYNC)
qeth_l2_dev2br_fdb_flush(card);
- rtnl_unlock();
- }
}
/* Returns zero if the command is successfully "consumed" */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index b1c1d2510d55..264b6c782382 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -104,10 +104,7 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits,
(ipatoe->proto == QETH_PROT_IPV4) ?
4 : 16);
- if (addr->proto == QETH_PROT_IPV4)
- rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
- else
- rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
+ rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
if (rc)
break;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 6890bbe04a8c..08fb7d5d08b3 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -16,6 +16,8 @@
#include "bnx2fc.h"
+#include <linux/ethtool.h>
+
static struct list_head adapter_list;
static struct list_head if_list;
static u32 adapter_count;
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 6e187d0e71fd..b927b3d84523 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/netdevice.h>
+#include <linux/ethtool.h>
#include <linux/errno.h>
#include <linux/crc32.h>
#include <scsi/libfcoe.h>
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 9888a7061873..101def7dc73d 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1159,7 +1159,7 @@ static u32 fq_to_tag(struct qman_fq *fq)
static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
static inline unsigned int __poll_portal_fast(struct qman_portal *p,
- unsigned int poll_limit);
+ unsigned int poll_limit, bool sched_napi);
static void qm_congestion_task(struct work_struct *work);
static void qm_mr_process_task(struct work_struct *work);
@@ -1174,7 +1174,7 @@ static irqreturn_t portal_isr(int irq, void *ptr)
/* DQRR-handling if it's interrupt-driven */
if (is & QM_PIRQ_DQRI) {
- __poll_portal_fast(p, QMAN_POLL_LIMIT);
+ __poll_portal_fast(p, QMAN_POLL_LIMIT, true);
clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
}
/* Handling of anything else that's interrupt-driven */
@@ -1602,7 +1602,7 @@ static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
* user callbacks to call into any QMan API.
*/
static inline unsigned int __poll_portal_fast(struct qman_portal *p,
- unsigned int poll_limit)
+ unsigned int poll_limit, bool sched_napi)
{
const struct qm_dqrr_entry *dq;
struct qman_fq *fq;
@@ -1636,7 +1636,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
* and we don't want multiple if()s in the critical
* path (SDQCR).
*/
- res = fq->cb.dqrr(p, fq, dq);
+ res = fq->cb.dqrr(p, fq, dq, sched_napi);
if (res == qman_cb_dqrr_stop)
break;
/* Check for VDQCR completion */
@@ -1646,7 +1646,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
/* SDQCR: context_b points to the FQ */
fq = tag_to_fq(be32_to_cpu(dq->context_b));
/* Now let the callback do its stuff */
- res = fq->cb.dqrr(p, fq, dq);
+ res = fq->cb.dqrr(p, fq, dq, sched_napi);
/*
* The callback can request that we exit without
* consuming this entry nor advancing;
@@ -1753,7 +1753,7 @@ EXPORT_SYMBOL(qman_start_using_portal);
int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
{
- return __poll_portal_fast(p, limit);
+ return __poll_portal_fast(p, limit, false);
}
EXPORT_SYMBOL(qman_p_poll_dqrr);
diff --git a/drivers/soc/fsl/qbman/qman_test_api.c b/drivers/soc/fsl/qbman/qman_test_api.c
index 7066b2f1467c..28fbddc3c204 100644
--- a/drivers/soc/fsl/qbman/qman_test_api.c
+++ b/drivers/soc/fsl/qbman/qman_test_api.c
@@ -45,7 +45,8 @@
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
struct qman_fq *,
- const struct qm_dqrr_entry *);
+ const struct qm_dqrr_entry *,
+ bool sched_napi);
static void cb_ern(struct qman_portal *, struct qman_fq *,
const union qm_mr_entry *);
static void cb_fqs(struct qman_portal *, struct qman_fq *,
@@ -208,7 +209,8 @@ failed:
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dq)
+ const struct qm_dqrr_entry *dq,
+ bool sched_napi)
{
if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
pr_err("BADNESS: dequeued frame doesn't match;\n");
diff --git a/drivers/soc/fsl/qbman/qman_test_stash.c b/drivers/soc/fsl/qbman/qman_test_stash.c
index e87b65403b67..b7e8e5ec884c 100644
--- a/drivers/soc/fsl/qbman/qman_test_stash.c
+++ b/drivers/soc/fsl/qbman/qman_test_stash.c
@@ -275,7 +275,8 @@ static inline int process_frame_data(struct hp_handler *handler,
static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dqrr)
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi)
{
struct hp_handler *handler = (struct hp_handler *)fq;
@@ -293,7 +294,8 @@ skip:
static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
struct qman_fq *fq,
- const struct qm_dqrr_entry *dqrr)
+ const struct qm_dqrr_entry *dqrr,
+ bool sched_napi)
{
struct hp_handler *handler = (struct hp_handler *)fq;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 2d0310448eba..443ca3f3cdf0 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -114,6 +114,8 @@ source "drivers/staging/kpc2000/Kconfig"
source "drivers/staging/qlge/Kconfig"
+source "drivers/staging/wimax/Kconfig"
+
source "drivers/staging/wfx/Kconfig"
source "drivers/staging/hikey9xx/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 757a892ab5b9..dc45128ef525 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -47,5 +47,6 @@ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_KPC2000) += kpc2000/
obj-$(CONFIG_QLGE) += qlge/
+obj-$(CONFIG_WIMAX) += wimax/
obj-$(CONFIG_WFX) += wfx/
obj-y += hikey9xx/
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
index ace4a6d28562..ad55cd738847 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw-ethtool.c
@@ -7,6 +7,8 @@
*
*/
+#include <linux/ethtool.h>
+
#include "ethsw.h"
static struct {
diff --git a/drivers/staging/wimax/Documentation/i2400m.rst b/drivers/staging/wimax/Documentation/i2400m.rst
new file mode 100644
index 000000000000..194388c0c351
--- /dev/null
+++ b/drivers/staging/wimax/Documentation/i2400m.rst
@@ -0,0 +1,283 @@
+.. include:: <isonum.txt>
+
+====================================================
+Driver for the Intel Wireless Wimax Connection 2400m
+====================================================
+
+:Copyright: |copy| 2008 Intel Corporation < linux-wimax@intel.com >
+
+ This provides a driver for the Intel Wireless WiMAX Connection 2400m
+ and a basic Linux kernel WiMAX stack.
+
+1. Requirements
+===============
+
+ * Linux installation with Linux kernel 2.6.22 or newer (if building
+ from a separate tree)
+ * Intel i2400m Echo Peak or Baxter Peak; this includes the Intel
+ Wireless WiMAX/WiFi Link 5x50 series.
+ * build tools:
+
+ + Linux kernel development package for the target kernel; to
+ build against your currently running kernel, you need to have
+ the kernel development package corresponding to the running
+ image installed (usually if your kernel is named
+ linux-VERSION, the development package is called
+ linux-dev-VERSION or linux-headers-VERSION).
+ + GNU C Compiler, make
+
+2. Compilation and installation
+===============================
+
+2.1. Compilation of the drivers included in the kernel
+------------------------------------------------------
+
+ Configure the kernel; to enable the WiMAX drivers select Drivers >
+ Networking Drivers > WiMAX device support. Enable all of them as
+ modules (easier).
+
+ If USB or SDIO are not enabled in the kernel configuration, the options
+ to build the i2400m USB or SDIO drivers will not show. Enable said
+ subsystems and go back to the WiMAX menu to enable the drivers.
+
+ Compile and install your kernel as usual.
+
+2.2. Compilation of the drivers distributed as an standalone module
+-------------------------------------------------------------------
+
+ To compile::
+
+ $ cd source/directory
+ $ make
+
+ Once built you can load and unload using the provided load.sh script;
+ load.sh will load the modules, load.sh u will unload them.
+
+ To install in the default kernel directories (and enable auto loading
+ when the device is plugged)::
+
+ $ make install
+ $ depmod -a
+
+ If your kernel development files are located in a non standard
+ directory or if you want to build for a kernel that is not the
+ currently running one, set KDIR to the right location::
+
+ $ make KDIR=/path/to/kernel/dev/tree
+
+ For more information, please contact linux-wimax@intel.com.
+
+3. Installing the firmware
+--------------------------
+
+ The firmware can be obtained from http://linuxwimax.org or might have
+ been supplied with your hardware.
+
+ It has to be installed in the target system::
+
+ $ cp FIRMWAREFILE.sbcf /lib/firmware/i2400m-fw-BUSTYPE-1.3.sbcf
+
+ * NOTE: if your firmware came in an .rpm or .deb file, just install
+ it as normal, with the rpm (rpm -i FIRMWARE.rpm) or dpkg
+ (dpkg -i FIRMWARE.deb) commands. No further action is needed.
+ * BUSTYPE will be usb or sdio, depending on the hardware you have.
+ Each hardware type comes with its own firmware and will not work
+ with other types.
+
+4. Design
+=========
+
+ This package contains two major parts: a WiMAX kernel stack and a
+ driver for the Intel i2400m.
+
+ The WiMAX stack is designed to provide for common WiMAX control
+ services to current and future WiMAX devices from any vendor; please
+ see README.wimax for details.
+
+ The i2400m kernel driver is broken up in two main parts: the bus
+ generic driver and the bus-specific drivers. The bus generic driver
+ forms the drivercore and contain no knowledge of the actual method we
+ use to connect to the device. The bus specific drivers are just the
+ glue to connect the bus-generic driver and the device. Currently only
+ USB and SDIO are supported. See drivers/net/wimax/i2400m/i2400m.h for
+ more information.
+
+ The bus generic driver is logically broken up in two parts: OS-glue and
+ hardware-glue. The OS-glue interfaces with Linux. The hardware-glue
+ interfaces with the device on using an interface provided by the
+ bus-specific driver. The reason for this breakup is to be able to
+ easily reuse the hardware-glue to write drivers for other OSes; note
+ the hardware glue part is written as a native Linux driver; no
+ abstraction layers are used, so to port to another OS, the Linux kernel
+ API calls should be replaced with the target OS's.
+
+5. Usage
+========
+
+ To load the driver, follow the instructions in the install section;
+ once the driver is loaded, plug in the device (unless it is permanently
+ plugged in). The driver will enumerate the device, upload the firmware
+ and output messages in the kernel log (dmesg, /var/log/messages or
+ /var/log/kern.log) such as::
+
+ ...
+ i2400m_usb 5-4:1.0: firmware interface version 8.0.0
+ i2400m_usb 5-4:1.0: WiMAX interface wmx0 (00:1d:e1:01:94:2c) ready
+
+ At this point the device is ready to work.
+
+ Current versions require the Intel WiMAX Network Service in userspace
+ to make things work. See the network service's README for instructions
+ on how to scan, connect and disconnect.
+
+5.1. Module parameters
+----------------------
+
+ Module parameters can be set at kernel or module load time or by
+ echoing values::
+
+ $ echo VALUE > /sys/module/MODULENAME/parameters/PARAMETERNAME
+
+ To make changes permanent, for example, for the i2400m module, you can
+ also create a file named /etc/modprobe.d/i2400m containing::
+
+ options i2400m idle_mode_disabled=1
+
+ To find which parameters are supported by a module, run::
+
+ $ modinfo path/to/module.ko
+
+ During kernel bootup (if the driver is linked in the kernel), specify
+ the following to the kernel command line::
+
+ i2400m.PARAMETER=VALUE
+
+5.1.1. i2400m: idle_mode_disabled
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The i2400m module supports a parameter to disable idle mode. This
+ parameter, once set, will take effect only when the device is
+ reinitialized by the driver (eg: following a reset or a reconnect).
+
+5.2. Debug operations: debugfs entries
+--------------------------------------
+
+ The driver will register debugfs entries that allow the user to tweak
+ debug settings. There are three main container directories where
+ entries are placed, which correspond to the three blocks a i2400m WiMAX
+ driver has:
+
+ * /sys/kernel/debug/wimax:DEVNAME/ for the generic WiMAX stack
+ controls
+ * /sys/kernel/debug/wimax:DEVNAME/i2400m for the i2400m generic
+ driver controls
+ * /sys/kernel/debug/wimax:DEVNAME/i2400m-usb (or -sdio) for the
+ bus-specific i2400m-usb or i2400m-sdio controls).
+
+ Of course, if debugfs is mounted in a directory other than
+ /sys/kernel/debug, those paths will change.
+
+5.2.1. Increasing debug output
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The files named *dl_* indicate knobs for controlling the debug output
+ of different submodules::
+
+ # find /sys/kernel/debug/wimax\:wmx0 -name \*dl_\*
+ /sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_tx
+ /sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_rx
+ /sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_notif
+ /sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_fw
+ /sys/kernel/debug/wimax:wmx0/i2400m-usb/dl_usb
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_tx
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_rx
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_rfkill
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_netdev
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_fw
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_debugfs
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_driver
+ /sys/kernel/debug/wimax:wmx0/i2400m/dl_control
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_stack
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_op_rfkill
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_op_reset
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_op_msg
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_id_table
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_debugfs
+
+ By reading the file you can obtain the current value of said debug
+ level; by writing to it, you can set it.
+
+ To increase the debug level of, for example, the i2400m's generic TX
+ engine, just write::
+
+ $ echo 3 > /sys/kernel/debug/wimax:wmx0/i2400m/dl_tx
+
+ Increasing numbers yield increasing debug information; for details of
+ what is printed and the available levels, check the source. The code
+ uses 0 for disabled and increasing values until 8.
+
+5.2.2. RX and TX statistics
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The i2400m/rx_stats and i2400m/tx_stats provide statistics about the
+ data reception/delivery from the device::
+
+ $ cat /sys/kernel/debug/wimax:wmx0/i2400m/rx_stats
+ 45 1 3 34 3104 48 480
+
+ The numbers reported are:
+
+ * packets/RX-buffer: total, min, max
+ * RX-buffers: total RX buffers received, accumulated RX buffer size
+ in bytes, min size received, max size received
+
+ Thus, to find the average buffer size received, divide accumulated
+ RX-buffer / total RX-buffers.
+
+ To clear the statistics back to 0, write anything to the rx_stats file::
+
+ $ echo 1 > /sys/kernel/debug/wimax:wmx0/i2400m_rx_stats
+
+ Likewise for TX.
+
+ Note the packets this debug file refers to are not network packet, but
+ packets in the sense of the device-specific protocol for communication
+ to the host. See drivers/net/wimax/i2400m/tx.c.
+
+5.2.3. Tracing messages received from user space
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ To echo messages received from user space into the trace pipe that the
+ i2400m driver creates, set the debug file i2400m/trace_msg_from_user to
+ 1::
+
+ $ echo 1 > /sys/kernel/debug/wimax:wmx0/i2400m/trace_msg_from_user
+
+5.2.4. Performing a device reset
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ By writing a 0, a 1 or a 2 to the file
+ /sys/kernel/debug/wimax:wmx0/reset, the driver performs a warm (without
+ disconnecting from the bus), cold (disconnecting from the bus) or bus
+ (bus specific) reset on the device.
+
+5.2.5. Asking the device to enter power saving mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ By writing any value to the /sys/kernel/debug/wimax:wmx0 file, the
+ device will attempt to enter power saving mode.
+
+6. Troubleshooting
+==================
+
+6.1. Driver complains about ``i2400m-fw-usb-1.2.sbcf: request failed``
+----------------------------------------------------------------------
+
+ If upon connecting the device, the following is output in the kernel
+ log::
+
+ i2400m_usb 5-4:1.0: fw i2400m-fw-usb-1.3.sbcf: request failed: -2
+
+ This means that the driver cannot locate the firmware file named
+ /lib/firmware/i2400m-fw-usb-1.2.sbcf. Check that the file is present in
+ the right location.
diff --git a/drivers/staging/wimax/Documentation/index.rst b/drivers/staging/wimax/Documentation/index.rst
new file mode 100644
index 000000000000..fdf7c1f99ff5
--- /dev/null
+++ b/drivers/staging/wimax/Documentation/index.rst
@@ -0,0 +1,19 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+WiMAX subsystem
+===============
+
+.. toctree::
+ :maxdepth: 2
+
+ wimax
+
+ i2400m
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/drivers/staging/wimax/Documentation/wimax.rst b/drivers/staging/wimax/Documentation/wimax.rst
new file mode 100644
index 000000000000..817ee8ba2732
--- /dev/null
+++ b/drivers/staging/wimax/Documentation/wimax.rst
@@ -0,0 +1,89 @@
+.. include:: <isonum.txt>
+
+========================
+Linux kernel WiMAX stack
+========================
+
+:Copyright: |copy| 2008 Intel Corporation < linux-wimax@intel.com >
+
+ This provides a basic Linux kernel WiMAX stack to provide a common
+ control API for WiMAX devices, usable from kernel and user space.
+
+1. Design
+=========
+
+ The WiMAX stack is designed to provide for common WiMAX control
+ services to current and future WiMAX devices from any vendor.
+
+ Because currently there is only one and we don't know what would be the
+ common services, the APIs it currently provides are very minimal.
+ However, it is done in such a way that it is easily extensible to
+ accommodate future requirements.
+
+ The stack works by embedding a struct wimax_dev in your device's
+ control structures. This provides a set of callbacks that the WiMAX
+ stack will call in order to implement control operations requested by
+ the user. As well, the stack provides API functions that the driver
+ calls to notify about changes of state in the device.
+
+ The stack exports the API calls needed to control the device to user
+ space using generic netlink as a marshalling mechanism. You can access
+ them using your own code or use the wrappers provided for your
+ convenience in libwimax (in the wimax-tools package).
+
+ For detailed information on the stack, please see
+ include/linux/wimax.h.
+
+2. Usage
+========
+
+ For usage in a driver (registration, API, etc) please refer to the
+ instructions in the header file include/linux/wimax.h.
+
+ When a device is registered with the WiMAX stack, a set of debugfs
+ files will appear in /sys/kernel/debug/wimax:wmxX can tweak for
+ control.
+
+2.1. Obtaining debug information: debugfs entries
+-------------------------------------------------
+
+ The WiMAX stack is compiled, by default, with debug messages that can
+ be used to diagnose issues. By default, said messages are disabled.
+
+ The drivers will register debugfs entries that allow the user to tweak
+ debug settings.
+
+ Each driver, when registering with the stack, will cause a debugfs
+ directory named wimax:DEVICENAME to be created; optionally, it might
+ create more subentries below it.
+
+2.1.1. Increasing debug output
+------------------------------
+
+ The files named *dl_* indicate knobs for controlling the debug output
+ of different submodules of the WiMAX stack::
+
+ # find /sys/kernel/debug/wimax\:wmx0 -name \*dl_\*
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_stack
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_op_rfkill
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_op_reset
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_op_msg
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_id_table
+ /sys/kernel/debug/wimax:wmx0/wimax_dl_debugfs
+ /sys/kernel/debug/wimax:wmx0/.... # other driver specific files
+
+ NOTE:
+ Of course, if debugfs is mounted in a directory other than
+ /sys/kernel/debug, those paths will change.
+
+ By reading the file you can obtain the current value of said debug
+ level; by writing to it, you can set it.
+
+ To increase the debug level of, for example, the id-table submodule,
+ just write:
+
+ $ echo 3 > /sys/kernel/debug/wimax:wmx0/wimax_dl_id_table
+
+ Increasing numbers yield increasing debug information; for details of
+ what is printed and the available levels, check the source. The code
+ uses 0 for disabled and increasing values until 8.
diff --git a/drivers/staging/wimax/Kconfig b/drivers/staging/wimax/Kconfig
new file mode 100644
index 000000000000..ded8b70b25ee
--- /dev/null
+++ b/drivers/staging/wimax/Kconfig
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# WiMAX LAN device configuration
+#
+
+menuconfig WIMAX
+ tristate "WiMAX Wireless Broadband support"
+ depends on RFKILL || !RFKILL
+ help
+
+ Select to configure support for devices that provide
+ wireless broadband connectivity using the WiMAX protocol
+ (IEEE 802.16).
+
+ Please note that most of these devices require signing up
+ for a service plan with a provider.
+
+ The different WiMAX drivers can be enabled in the menu entry
+
+ Device Drivers > Network device support > WiMAX Wireless
+ Broadband devices
+
+ If unsure, it is safe to select M (module).
+
+if WIMAX
+
+config WIMAX_DEBUG_LEVEL
+ int "WiMAX debug level"
+ depends on WIMAX
+ default 8
+ help
+
+ Select the maximum debug verbosity level to be compiled into
+ the WiMAX stack code.
+
+ By default, debug messages are disabled at runtime and can
+ be selectively enabled for different parts of the code using
+ the sysfs debug-levels file.
+
+ If set at zero, this will compile out all the debug code.
+
+ It is recommended that it is left at 8.
+
+source "drivers/staging/wimax/i2400m/Kconfig"
+
+endif
diff --git a/drivers/staging/wimax/Makefile b/drivers/staging/wimax/Makefile
new file mode 100644
index 000000000000..0e3f988656aa
--- /dev/null
+++ b/drivers/staging/wimax/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_WIMAX) += wimax.o
+
+wimax-y := \
+ id-table.o \
+ op-msg.o \
+ op-reset.o \
+ op-rfkill.o \
+ op-state-get.o \
+ stack.o
+
+wimax-$(CONFIG_DEBUG_FS) += debugfs.o
+
+obj-$(CONFIG_WIMAX_I2400M) += i2400m/
diff --git a/drivers/staging/wimax/TODO b/drivers/staging/wimax/TODO
new file mode 100644
index 000000000000..26e4cb9e9599
--- /dev/null
+++ b/drivers/staging/wimax/TODO
@@ -0,0 +1,18 @@
+There are no known users of this driver as of October 2020, and it will
+be removed unless someone turns out to still need it in future releases.
+
+According to https://en.wikipedia.org/wiki/List_of_WiMAX_networks, there
+have been many public wimax networks, but it appears that many of these
+have migrated to LTE or discontinued their service altogether. As most
+PCs and phones lack WiMAX hardware support, the remaining networks tend
+to use standalone routers. These almost certainly run Linux, but not a
+modern kernel or the mainline wimax driver stack.
+
+NetworkManager appears to have dropped userspace support in 2015
+https://bugzilla.gnome.org/show_bug.cgi?id=747846, the www.linuxwimax.org
+site had already shut down earlier.
+
+WiMax is apparently still being deployed on airport campus networks
+("AeroMACS"), but in a frequency band that was not supported by the old
+Intel 2400m (used in Sandy Bridge laptops and earlier), which is the
+only driver using the kernel's wimax stack.
diff --git a/drivers/staging/wimax/debug-levels.h b/drivers/staging/wimax/debug-levels.h
new file mode 100644
index 000000000000..b854802d1d00
--- /dev/null
+++ b/drivers/staging/wimax/debug-levels.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Linux WiMAX Stack
+ * Debug levels control file for the wimax module
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ */
+#ifndef __debug_levels__h__
+#define __debug_levels__h__
+
+/* Maximum compile and run time debug level for all submodules */
+#define D_MODULENAME wimax
+#define D_MASTER CONFIG_WIMAX_DEBUG_LEVEL
+
+#include "linux-wimax-debug.h"
+
+/* List of all the enabled modules */
+enum d_module {
+ D_SUBMODULE_DECLARE(debugfs),
+ D_SUBMODULE_DECLARE(id_table),
+ D_SUBMODULE_DECLARE(op_msg),
+ D_SUBMODULE_DECLARE(op_reset),
+ D_SUBMODULE_DECLARE(op_rfkill),
+ D_SUBMODULE_DECLARE(op_state_get),
+ D_SUBMODULE_DECLARE(stack),
+};
+
+#endif /* #ifndef __debug_levels__h__ */
diff --git a/drivers/staging/wimax/debugfs.c b/drivers/staging/wimax/debugfs.c
new file mode 100644
index 000000000000..e11bff61ffcf
--- /dev/null
+++ b/drivers/staging/wimax/debugfs.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux WiMAX
+ * Debugfs support
+ *
+ * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ */
+#include <linux/debugfs.h>
+#include "linux-wimax.h"
+#include "wimax-internal.h"
+
+#define D_SUBMODULE debugfs
+#include "debug-levels.h"
+
+void wimax_debugfs_add(struct wimax_dev *wimax_dev)
+{
+ struct net_device *net_dev = wimax_dev->net_dev;
+ struct dentry *dentry;
+ char buf[128];
+
+ snprintf(buf, sizeof(buf), "wimax:%s", net_dev->name);
+ dentry = debugfs_create_dir(buf, NULL);
+ wimax_dev->debugfs_dentry = dentry;
+
+ d_level_register_debugfs("wimax_dl_", debugfs, dentry);
+ d_level_register_debugfs("wimax_dl_", id_table, dentry);
+ d_level_register_debugfs("wimax_dl_", op_msg, dentry);
+ d_level_register_debugfs("wimax_dl_", op_reset, dentry);
+ d_level_register_debugfs("wimax_dl_", op_rfkill, dentry);
+ d_level_register_debugfs("wimax_dl_", op_state_get, dentry);
+ d_level_register_debugfs("wimax_dl_", stack, dentry);
+}
+
+void wimax_debugfs_rm(struct wimax_dev *wimax_dev)
+{
+ debugfs_remove_recursive(wimax_dev->debugfs_dentry);
+}
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/staging/wimax/i2400m/Kconfig
index 843b905a26a3..843b905a26a3 100644
--- a/drivers/net/wimax/i2400m/Kconfig
+++ b/drivers/staging/wimax/i2400m/Kconfig
diff --git a/drivers/net/wimax/i2400m/Makefile b/drivers/staging/wimax/i2400m/Makefile
index b1db1eff0648..b1db1eff0648 100644
--- a/drivers/net/wimax/i2400m/Makefile
+++ b/drivers/staging/wimax/i2400m/Makefile
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/staging/wimax/i2400m/control.c
index 8df98757d901..fe885aa56cf3 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/staging/wimax/i2400m/control.c
@@ -77,7 +77,7 @@
#include "i2400m.h"
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/wimax/i2400m.h>
+#include "linux-wimax-i2400m.h"
#include <linux/export.h>
#include <linux/moduleparam.h>
diff --git a/drivers/net/wimax/i2400m/debug-levels.h b/drivers/staging/wimax/i2400m/debug-levels.h
index 00942bb1489b..a317e9fbb734 100644
--- a/drivers/net/wimax/i2400m/debug-levels.h
+++ b/drivers/staging/wimax/i2400m/debug-levels.h
@@ -13,7 +13,7 @@
#define D_MODULENAME i2400m
#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
-#include <linux/wimax/debug.h>
+#include "../linux-wimax-debug.h"
/* List of all the enabled modules */
enum d_module {
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/staging/wimax/i2400m/debugfs.c
index 1c640b41ea4c..1c640b41ea4c 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/staging/wimax/i2400m/debugfs.c
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/staging/wimax/i2400m/driver.c
index ecb3fccca603..dc8939ff78c0 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/staging/wimax/i2400m/driver.c
@@ -50,7 +50,7 @@
*/
#include "i2400m.h"
#include <linux/etherdevice.h>
-#include <linux/wimax/i2400m.h>
+#include "linux-wimax-i2400m.h"
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/suspend.h>
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/staging/wimax/i2400m/fw.c
index 6c9a41bff2e0..6c9a41bff2e0 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/staging/wimax/i2400m/fw.c
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/staging/wimax/i2400m/i2400m-usb.h
index eff4f464a23e..eff4f464a23e 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/staging/wimax/i2400m/i2400m-usb.h
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/staging/wimax/i2400m/i2400m.h
index a3733a6d14f5..de22cc6f2c5c 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/staging/wimax/i2400m/i2400m.h
@@ -156,8 +156,8 @@
#include <linux/completion.h>
#include <linux/rwsem.h>
#include <linux/atomic.h>
-#include <net/wimax.h>
-#include <linux/wimax/i2400m.h>
+#include "../net-wimax.h"
+#include "linux-wimax-i2400m.h"
#include <asm/byteorder.h>
enum {
diff --git a/drivers/staging/wimax/i2400m/linux-wimax-i2400m.h b/drivers/staging/wimax/i2400m/linux-wimax-i2400m.h
new file mode 100644
index 000000000000..fd198bc24a3c
--- /dev/null
+++ b/drivers/staging/wimax/i2400m/linux-wimax-i2400m.h
@@ -0,0 +1,572 @@
+/*
+ * Intel Wireless WiMax Connection 2400m
+ * Host-Device protocol interface definitions
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * - Initial implementation
+ *
+ *
+ * This header defines the data structures and constants used to
+ * communicate with the device.
+ *
+ * BOOTMODE/BOOTROM/FIRMWARE UPLOAD PROTOCOL
+ *
+ * The firmware upload protocol is quite simple and only requires a
+ * handful of commands. See drivers/net/wimax/i2400m/fw.c for more
+ * details.
+ *
+ * The BCF data structure is for the firmware file header.
+ *
+ *
+ * THE DATA / CONTROL PROTOCOL
+ *
+ * This is the normal protocol spoken with the device once the
+ * firmware is uploaded. It transports data payloads and control
+ * messages back and forth.
+ *
+ * It consists 'messages' that pack one or more payloads each. The
+ * format is described in detail in drivers/net/wimax/i2400m/rx.c and
+ * tx.c.
+ *
+ *
+ * THE L3L4 PROTOCOL
+ *
+ * The term L3L4 refers to Layer 3 (the device), Layer 4 (the
+ * driver/host software).
+ *
+ * This is the control protocol used by the host to control the i2400m
+ * device (scan, connect, disconnect...). This is sent to / received
+ * as control frames. These frames consist of a header and zero or
+ * more TLVs with information. We call each control frame a "message".
+ *
+ * Each message is composed of:
+ *
+ * HEADER
+ * [TLV0 + PAYLOAD0]
+ * [TLV1 + PAYLOAD1]
+ * [...]
+ * [TLVN + PAYLOADN]
+ *
+ * The HEADER is defined by 'struct i2400m_l3l4_hdr'. The payloads are
+ * defined by a TLV structure (Type Length Value) which is a 'header'
+ * (struct i2400m_tlv_hdr) and then the payload.
+ *
+ * All integers are represented as Little Endian.
+ *
+ * - REQUESTS AND EVENTS
+ *
+ * The requests can be clasified as follows:
+ *
+ * COMMAND: implies a request from the host to the device requesting
+ * an action being performed. The device will reply with a
+ * message (with the same type as the command), status and
+ * no (TLV) payload. Execution of a command might cause
+ * events (of different type) to be sent later on as
+ * device's state changes.
+ *
+ * GET/SET: similar to COMMAND, but will not cause other
+ * EVENTs. The reply, in the case of GET, will contain
+ * TLVs with the requested information.
+ *
+ * EVENT: asynchronous messages sent from the device, maybe as a
+ * consequence of previous COMMANDs but disassociated from
+ * them.
+ *
+ * Only one request might be pending at the same time (ie: don't
+ * parallelize nor post another GET request before the previous
+ * COMMAND has been acknowledged with it's corresponding reply by the
+ * device).
+ *
+ * The different requests and their formats are described below:
+ *
+ * I2400M_MT_* Message types
+ * I2400M_MS_* Message status (for replies, events)
+ * i2400m_tlv_* TLVs
+ *
+ * data types are named 'struct i2400m_msg_OPNAME', OPNAME matching the
+ * operation.
+ */
+
+#ifndef __LINUX__WIMAX__I2400M_H__
+#define __LINUX__WIMAX__I2400M_H__
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/*
+ * Host Device Interface (HDI) common to all busses
+ */
+
+/* Boot-mode (firmware upload mode) commands */
+
+/* Header for the firmware file */
+struct i2400m_bcf_hdr {
+ __le32 module_type;
+ __le32 header_len;
+ __le32 header_version;
+ __le32 module_id;
+ __le32 module_vendor;
+ __le32 date; /* BCD YYYMMDD */
+ __le32 size; /* in dwords */
+ __le32 key_size; /* in dwords */
+ __le32 modulus_size; /* in dwords */
+ __le32 exponent_size; /* in dwords */
+ __u8 reserved[88];
+} __attribute__ ((packed));
+
+/* Boot mode opcodes */
+enum i2400m_brh_opcode {
+ I2400M_BRH_READ = 1,
+ I2400M_BRH_WRITE = 2,
+ I2400M_BRH_JUMP = 3,
+ I2400M_BRH_SIGNED_JUMP = 8,
+ I2400M_BRH_HASH_PAYLOAD_ONLY = 9,
+};
+
+/* Boot mode command masks and stuff */
+enum i2400m_brh {
+ I2400M_BRH_SIGNATURE = 0xcbbc0000,
+ I2400M_BRH_SIGNATURE_MASK = 0xffff0000,
+ I2400M_BRH_SIGNATURE_SHIFT = 16,
+ I2400M_BRH_OPCODE_MASK = 0x0000000f,
+ I2400M_BRH_RESPONSE_MASK = 0x000000f0,
+ I2400M_BRH_RESPONSE_SHIFT = 4,
+ I2400M_BRH_DIRECT_ACCESS = 0x00000400,
+ I2400M_BRH_RESPONSE_REQUIRED = 0x00000200,
+ I2400M_BRH_USE_CHECKSUM = 0x00000100,
+};
+
+
+/**
+ * i2400m_bootrom_header - Header for a boot-mode command
+ *
+ * @cmd: the above command descriptor
+ * @target_addr: where on the device memory should the action be performed.
+ * @data_size: for read/write, amount of data to be read/written
+ * @block_checksum: checksum value (if applicable)
+ * @payload: the beginning of data attached to this header
+ */
+struct i2400m_bootrom_header {
+ __le32 command; /* Compose with enum i2400_brh */
+ __le32 target_addr;
+ __le32 data_size;
+ __le32 block_checksum;
+ char payload[0];
+} __attribute__ ((packed));
+
+
+/*
+ * Data / control protocol
+ */
+
+/* Packet types for the host-device interface */
+enum i2400m_pt {
+ I2400M_PT_DATA = 0,
+ I2400M_PT_CTRL,
+ I2400M_PT_TRACE, /* For device debug */
+ I2400M_PT_RESET_WARM, /* device reset */
+ I2400M_PT_RESET_COLD, /* USB[transport] reset, like reconnect */
+ I2400M_PT_EDATA, /* Extended RX data */
+ I2400M_PT_ILLEGAL
+};
+
+
+/*
+ * Payload for a data packet
+ *
+ * This is prefixed to each and every outgoing DATA type.
+ */
+struct i2400m_pl_data_hdr {
+ __le32 reserved;
+} __attribute__((packed));
+
+
+/*
+ * Payload for an extended data packet
+ *
+ * New in fw v1.4
+ *
+ * @reorder: if this payload has to be reorder or not (and how)
+ * @cs: the type of data in the packet, as defined per (802.16e
+ * T11.13.19.1). Currently only 2 (IPv4 packet) supported.
+ *
+ * This is prefixed to each and every INCOMING DATA packet.
+ */
+struct i2400m_pl_edata_hdr {
+ __le32 reorder; /* bits defined in i2400m_ro */
+ __u8 cs;
+ __u8 reserved[11];
+} __attribute__((packed));
+
+enum i2400m_cs {
+ I2400M_CS_IPV4_0 = 0,
+ I2400M_CS_IPV4 = 2,
+};
+
+enum i2400m_ro {
+ I2400M_RO_NEEDED = 0x01,
+ I2400M_RO_TYPE = 0x03,
+ I2400M_RO_TYPE_SHIFT = 1,
+ I2400M_RO_CIN = 0x0f,
+ I2400M_RO_CIN_SHIFT = 4,
+ I2400M_RO_FBN = 0x07ff,
+ I2400M_RO_FBN_SHIFT = 8,
+ I2400M_RO_SN = 0x07ff,
+ I2400M_RO_SN_SHIFT = 21,
+};
+
+enum i2400m_ro_type {
+ I2400M_RO_TYPE_RESET = 0,
+ I2400M_RO_TYPE_PACKET,
+ I2400M_RO_TYPE_WS,
+ I2400M_RO_TYPE_PACKET_WS,
+};
+
+
+/* Misc constants */
+enum {
+ I2400M_PL_ALIGN = 16, /* Payload data size alignment */
+ I2400M_PL_SIZE_MAX = 0x3EFF,
+ I2400M_MAX_PLS_IN_MSG = 60,
+ /* protocol barkers: sync sequences; for notifications they
+ * are sent in groups of four. */
+ I2400M_H2D_PREVIEW_BARKER = 0xcafe900d,
+ I2400M_COLD_RESET_BARKER = 0xc01dc01d,
+ I2400M_WARM_RESET_BARKER = 0x50f750f7,
+ I2400M_NBOOT_BARKER = 0xdeadbeef,
+ I2400M_SBOOT_BARKER = 0x0ff1c1a1,
+ I2400M_SBOOT_BARKER_6050 = 0x80000001,
+ I2400M_ACK_BARKER = 0xfeedbabe,
+ I2400M_D2H_MSG_BARKER = 0xbeefbabe,
+};
+
+
+/*
+ * Hardware payload descriptor
+ *
+ * Bitfields encoded in a struct to enforce typing semantics.
+ *
+ * Look in rx.c and tx.c for a full description of the format.
+ */
+struct i2400m_pld {
+ __le32 val;
+} __attribute__ ((packed));
+
+#define I2400M_PLD_SIZE_MASK 0x00003fff
+#define I2400M_PLD_TYPE_SHIFT 16
+#define I2400M_PLD_TYPE_MASK 0x000f0000
+
+/*
+ * Header for a TX message or RX message
+ *
+ * @barker: preamble
+ * @size: used for management of the FIFO queue buffer; before
+ * sending, this is converted to be a real preamble. This
+ * indicates the real size of the TX message that starts at this
+ * point. If the highest bit is set, then this message is to be
+ * skipped.
+ * @sequence: sequence number of this message
+ * @offset: offset where the message itself starts -- see the comments
+ * in the file header about message header and payload descriptor
+ * alignment.
+ * @num_pls: number of payloads in this message
+ * @padding: amount of padding bytes at the end of the message to make
+ * it be of block-size aligned
+ *
+ * Look in rx.c and tx.c for a full description of the format.
+ */
+struct i2400m_msg_hdr {
+ union {
+ __le32 barker;
+ __u32 size; /* same size type as barker!! */
+ };
+ union {
+ __le32 sequence;
+ __u32 offset; /* same size type as barker!! */
+ };
+ __le16 num_pls;
+ __le16 rsv1;
+ __le16 padding;
+ __le16 rsv2;
+ struct i2400m_pld pld[0];
+} __attribute__ ((packed));
+
+
+
+/*
+ * L3/L4 control protocol
+ */
+
+enum {
+ /* Interface version */
+ I2400M_L3L4_VERSION = 0x0100,
+};
+
+/* Message types */
+enum i2400m_mt {
+ I2400M_MT_RESERVED = 0x0000,
+ I2400M_MT_INVALID = 0xffff,
+ I2400M_MT_REPORT_MASK = 0x8000,
+
+ I2400M_MT_GET_SCAN_RESULT = 0x4202,
+ I2400M_MT_SET_SCAN_PARAM = 0x4402,
+ I2400M_MT_CMD_RF_CONTROL = 0x4602,
+ I2400M_MT_CMD_SCAN = 0x4603,
+ I2400M_MT_CMD_CONNECT = 0x4604,
+ I2400M_MT_CMD_DISCONNECT = 0x4605,
+ I2400M_MT_CMD_EXIT_IDLE = 0x4606,
+ I2400M_MT_GET_LM_VERSION = 0x5201,
+ I2400M_MT_GET_DEVICE_INFO = 0x5202,
+ I2400M_MT_GET_LINK_STATUS = 0x5203,
+ I2400M_MT_GET_STATISTICS = 0x5204,
+ I2400M_MT_GET_STATE = 0x5205,
+ I2400M_MT_GET_MEDIA_STATUS = 0x5206,
+ I2400M_MT_SET_INIT_CONFIG = 0x5404,
+ I2400M_MT_CMD_INIT = 0x5601,
+ I2400M_MT_CMD_TERMINATE = 0x5602,
+ I2400M_MT_CMD_MODE_OF_OP = 0x5603,
+ I2400M_MT_CMD_RESET_DEVICE = 0x5604,
+ I2400M_MT_CMD_MONITOR_CONTROL = 0x5605,
+ I2400M_MT_CMD_ENTER_POWERSAVE = 0x5606,
+ I2400M_MT_GET_TLS_OPERATION_RESULT = 0x6201,
+ I2400M_MT_SET_EAP_SUCCESS = 0x6402,
+ I2400M_MT_SET_EAP_FAIL = 0x6403,
+ I2400M_MT_SET_EAP_KEY = 0x6404,
+ I2400M_MT_CMD_SEND_EAP_RESPONSE = 0x6602,
+ I2400M_MT_REPORT_SCAN_RESULT = 0xc002,
+ I2400M_MT_REPORT_STATE = 0xd002,
+ I2400M_MT_REPORT_POWERSAVE_READY = 0xd005,
+ I2400M_MT_REPORT_EAP_REQUEST = 0xe002,
+ I2400M_MT_REPORT_EAP_RESTART = 0xe003,
+ I2400M_MT_REPORT_ALT_ACCEPT = 0xe004,
+ I2400M_MT_REPORT_KEY_REQUEST = 0xe005,
+};
+
+
+/*
+ * Message Ack Status codes
+ *
+ * When a message is replied-to, this status is reported.
+ */
+enum i2400m_ms {
+ I2400M_MS_DONE_OK = 0,
+ I2400M_MS_DONE_IN_PROGRESS = 1,
+ I2400M_MS_INVALID_OP = 2,
+ I2400M_MS_BAD_STATE = 3,
+ I2400M_MS_ILLEGAL_VALUE = 4,
+ I2400M_MS_MISSING_PARAMS = 5,
+ I2400M_MS_VERSION_ERROR = 6,
+ I2400M_MS_ACCESSIBILITY_ERROR = 7,
+ I2400M_MS_BUSY = 8,
+ I2400M_MS_CORRUPTED_TLV = 9,
+ I2400M_MS_UNINITIALIZED = 10,
+ I2400M_MS_UNKNOWN_ERROR = 11,
+ I2400M_MS_PRODUCTION_ERROR = 12,
+ I2400M_MS_NO_RF = 13,
+ I2400M_MS_NOT_READY_FOR_POWERSAVE = 14,
+ I2400M_MS_THERMAL_CRITICAL = 15,
+ I2400M_MS_MAX
+};
+
+
+/**
+ * i2400m_tlv - enumeration of the different types of TLVs
+ *
+ * TLVs stand for type-length-value and are the header for a payload
+ * composed of almost anything. Each payload has a type assigned
+ * and a length.
+ */
+enum i2400m_tlv {
+ I2400M_TLV_L4_MESSAGE_VERSIONS = 129,
+ I2400M_TLV_SYSTEM_STATE = 141,
+ I2400M_TLV_MEDIA_STATUS = 161,
+ I2400M_TLV_RF_OPERATION = 162,
+ I2400M_TLV_RF_STATUS = 163,
+ I2400M_TLV_DEVICE_RESET_TYPE = 132,
+ I2400M_TLV_CONFIG_IDLE_PARAMETERS = 601,
+ I2400M_TLV_CONFIG_IDLE_TIMEOUT = 611,
+ I2400M_TLV_CONFIG_D2H_DATA_FORMAT = 614,
+ I2400M_TLV_CONFIG_DL_HOST_REORDER = 615,
+};
+
+
+struct i2400m_tlv_hdr {
+ __le16 type;
+ __le16 length; /* payload's */
+ __u8 pl[0];
+} __attribute__((packed));
+
+
+struct i2400m_l3l4_hdr {
+ __le16 type;
+ __le16 length; /* payload's */
+ __le16 version;
+ __le16 resv1;
+ __le16 status;
+ __le16 resv2;
+ struct i2400m_tlv_hdr pl[0];
+} __attribute__((packed));
+
+
+/**
+ * i2400m_system_state - different states of the device
+ */
+enum i2400m_system_state {
+ I2400M_SS_UNINITIALIZED = 1,
+ I2400M_SS_INIT,
+ I2400M_SS_READY,
+ I2400M_SS_SCAN,
+ I2400M_SS_STANDBY,
+ I2400M_SS_CONNECTING,
+ I2400M_SS_WIMAX_CONNECTED,
+ I2400M_SS_DATA_PATH_CONNECTED,
+ I2400M_SS_IDLE,
+ I2400M_SS_DISCONNECTING,
+ I2400M_SS_OUT_OF_ZONE,
+ I2400M_SS_SLEEPACTIVE,
+ I2400M_SS_PRODUCTION,
+ I2400M_SS_CONFIG,
+ I2400M_SS_RF_OFF,
+ I2400M_SS_RF_SHUTDOWN,
+ I2400M_SS_DEVICE_DISCONNECT,
+ I2400M_SS_MAX,
+};
+
+
+/**
+ * i2400m_tlv_system_state - report on the state of the system
+ *
+ * @state: see enum i2400m_system_state
+ */
+struct i2400m_tlv_system_state {
+ struct i2400m_tlv_hdr hdr;
+ __le32 state;
+} __attribute__((packed));
+
+
+struct i2400m_tlv_l4_message_versions {
+ struct i2400m_tlv_hdr hdr;
+ __le16 major;
+ __le16 minor;
+ __le16 branch;
+ __le16 reserved;
+} __attribute__((packed));
+
+
+struct i2400m_tlv_detailed_device_info {
+ struct i2400m_tlv_hdr hdr;
+ __u8 reserved1[400];
+ __u8 mac_address[ETH_ALEN];
+ __u8 reserved2[2];
+} __attribute__((packed));
+
+
+enum i2400m_rf_switch_status {
+ I2400M_RF_SWITCH_ON = 1,
+ I2400M_RF_SWITCH_OFF = 2,
+};
+
+struct i2400m_tlv_rf_switches_status {
+ struct i2400m_tlv_hdr hdr;
+ __u8 sw_rf_switch; /* 1 ON, 2 OFF */
+ __u8 hw_rf_switch; /* 1 ON, 2 OFF */
+ __u8 reserved[2];
+} __attribute__((packed));
+
+
+enum {
+ i2400m_rf_operation_on = 1,
+ i2400m_rf_operation_off = 2
+};
+
+struct i2400m_tlv_rf_operation {
+ struct i2400m_tlv_hdr hdr;
+ __le32 status; /* 1 ON, 2 OFF */
+} __attribute__((packed));
+
+
+enum i2400m_tlv_reset_type {
+ I2400M_RESET_TYPE_COLD = 1,
+ I2400M_RESET_TYPE_WARM
+};
+
+struct i2400m_tlv_device_reset_type {
+ struct i2400m_tlv_hdr hdr;
+ __le32 reset_type;
+} __attribute__((packed));
+
+
+struct i2400m_tlv_config_idle_parameters {
+ struct i2400m_tlv_hdr hdr;
+ __le32 idle_timeout; /* 100 to 300000 ms [5min], 100 increments
+ * 0 disabled */
+ __le32 idle_paging_interval; /* frames */
+} __attribute__((packed));
+
+
+enum i2400m_media_status {
+ I2400M_MEDIA_STATUS_LINK_UP = 1,
+ I2400M_MEDIA_STATUS_LINK_DOWN,
+ I2400M_MEDIA_STATUS_LINK_RENEW,
+};
+
+struct i2400m_tlv_media_status {
+ struct i2400m_tlv_hdr hdr;
+ __le32 media_status;
+} __attribute__((packed));
+
+
+/* New in v1.4 */
+struct i2400m_tlv_config_idle_timeout {
+ struct i2400m_tlv_hdr hdr;
+ __le32 timeout; /* 100 to 300000 ms [5min], 100 increments
+ * 0 disabled */
+} __attribute__((packed));
+
+/* New in v1.4 -- for backward compat, will be removed */
+struct i2400m_tlv_config_d2h_data_format {
+ struct i2400m_tlv_hdr hdr;
+ __u8 format; /* 0 old format, 1 enhanced */
+ __u8 reserved[3];
+} __attribute__((packed));
+
+/* New in v1.4 */
+struct i2400m_tlv_config_dl_host_reorder {
+ struct i2400m_tlv_hdr hdr;
+ __u8 reorder; /* 0 disabled, 1 enabled */
+ __u8 reserved[3];
+} __attribute__((packed));
+
+
+#endif /* #ifndef __LINUX__WIMAX__I2400M_H__ */
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/staging/wimax/i2400m/netdev.c
index a7fcbceb6e6b..a7fcbceb6e6b 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/staging/wimax/i2400m/netdev.c
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/staging/wimax/i2400m/op-rfkill.c
index 5c79f052cad2..fbddf2e18c14 100644
--- a/drivers/net/wimax/i2400m/op-rfkill.c
+++ b/drivers/staging/wimax/i2400m/op-rfkill.c
@@ -18,7 +18,7 @@
* switch (coming from sysfs, the wimax stack or user space).
*/
#include "i2400m.h"
-#include <linux/wimax/i2400m.h>
+#include "linux-wimax-i2400m.h"
#include <linux/slab.h>
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/staging/wimax/i2400m/rx.c
index c9fb619a9e01..c9fb619a9e01 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/staging/wimax/i2400m/rx.c
diff --git a/drivers/net/wimax/i2400m/sysfs.c b/drivers/staging/wimax/i2400m/sysfs.c
index 895ee265909b..895ee265909b 100644
--- a/drivers/net/wimax/i2400m/sysfs.c
+++ b/drivers/staging/wimax/i2400m/sysfs.c
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/staging/wimax/i2400m/tx.c
index 1255302e251e..1255302e251e 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/staging/wimax/i2400m/tx.c
diff --git a/drivers/net/wimax/i2400m/usb-debug-levels.h b/drivers/staging/wimax/i2400m/usb-debug-levels.h
index b6f7335de765..8fd0111560f6 100644
--- a/drivers/net/wimax/i2400m/usb-debug-levels.h
+++ b/drivers/staging/wimax/i2400m/usb-debug-levels.h
@@ -13,7 +13,7 @@
#define D_MODULENAME i2400m_usb
#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
-#include <linux/wimax/debug.h>
+#include "../linux-wimax-debug.h"
/* List of all the enabled modules */
enum d_module {
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/staging/wimax/i2400m/usb-fw.c
index 27ab233650d5..27ab233650d5 100644
--- a/drivers/net/wimax/i2400m/usb-fw.c
+++ b/drivers/staging/wimax/i2400m/usb-fw.c
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/staging/wimax/i2400m/usb-notif.c
index 5d429f816125..5d429f816125 100644
--- a/drivers/net/wimax/i2400m/usb-notif.c
+++ b/drivers/staging/wimax/i2400m/usb-notif.c
diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/staging/wimax/i2400m/usb-rx.c
index 5b64bda7d9e7..5b64bda7d9e7 100644
--- a/drivers/net/wimax/i2400m/usb-rx.c
+++ b/drivers/staging/wimax/i2400m/usb-rx.c
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/staging/wimax/i2400m/usb-tx.c
index 3ba9d70cca1b..3ba9d70cca1b 100644
--- a/drivers/net/wimax/i2400m/usb-tx.c
+++ b/drivers/staging/wimax/i2400m/usb-tx.c
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/staging/wimax/i2400m/usb.c
index b684e97ac976..f250d03ce7c7 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/staging/wimax/i2400m/usb.c
@@ -49,8 +49,9 @@
* usb_reset_device()
*/
#include "i2400m-usb.h"
-#include <linux/wimax/i2400m.h>
+#include "linux-wimax-i2400m.h"
#include <linux/debugfs.h>
+#include <linux/ethtool.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/staging/wimax/id-table.c b/drivers/staging/wimax/id-table.c
new file mode 100644
index 000000000000..0e6f4aa87bc9
--- /dev/null
+++ b/drivers/staging/wimax/id-table.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux WiMAX
+ * Mappping of generic netlink family IDs to net devices
+ *
+ * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * We assign a single generic netlink family ID to each device (to
+ * simplify lookup).
+ *
+ * We need a way to map family ID to a wimax_dev pointer.
+ *
+ * The idea is to use a very simple lookup. Using a netlink attribute
+ * with (for example) the interface name implies a heavier search over
+ * all the network devices; seemed kind of a waste given that we know
+ * we are looking for a WiMAX device and that most systems will have
+ * just a single WiMAX adapter.
+ *
+ * We put all the WiMAX devices in the system in a linked list and
+ * match the generic link family ID against the list.
+ *
+ * By using a linked list, the case of a single adapter in the system
+ * becomes (almost) no overhead, while still working for many more. If
+ * it ever goes beyond two, I'll be surprised.
+ */
+#include <linux/device.h>
+#include <net/genetlink.h>
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include "linux-wimax.h"
+#include "wimax-internal.h"
+
+
+#define D_SUBMODULE id_table
+#include "debug-levels.h"
+
+
+static DEFINE_SPINLOCK(wimax_id_table_lock);
+static struct list_head wimax_id_table = LIST_HEAD_INIT(wimax_id_table);
+
+
+/*
+ * wimax_id_table_add - add a gennetlink familiy ID / wimax_dev mapping
+ *
+ * @wimax_dev: WiMAX device descriptor to associate to the Generic
+ * Netlink family ID.
+ *
+ * Look for an empty spot in the ID table; if none found, double the
+ * table's size and get the first spot.
+ */
+void wimax_id_table_add(struct wimax_dev *wimax_dev)
+{
+ d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev);
+ spin_lock(&wimax_id_table_lock);
+ list_add(&wimax_dev->id_table_node, &wimax_id_table);
+ spin_unlock(&wimax_id_table_lock);
+ d_fnend(3, NULL, "(wimax_dev %p)\n", wimax_dev);
+}
+
+
+/*
+ * wimax_get_netdev_by_info - lookup a wimax_dev from the gennetlink info
+ *
+ * The generic netlink family ID has been filled out in the
+ * nlmsghdr->nlmsg_type field, so we pull it from there, look it up in
+ * the mapping table and reference the wimax_dev.
+ *
+ * When done, the reference should be dropped with
+ * 'dev_put(wimax_dev->net_dev)'.
+ */
+struct wimax_dev *wimax_dev_get_by_genl_info(
+ struct genl_info *info, int ifindex)
+{
+ struct wimax_dev *wimax_dev = NULL;
+
+ d_fnstart(3, NULL, "(info %p ifindex %d)\n", info, ifindex);
+ spin_lock(&wimax_id_table_lock);
+ list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
+ if (wimax_dev->net_dev->ifindex == ifindex) {
+ dev_hold(wimax_dev->net_dev);
+ goto found;
+ }
+ }
+ wimax_dev = NULL;
+ d_printf(1, NULL, "wimax: no devices found with ifindex %d\n",
+ ifindex);
+found:
+ spin_unlock(&wimax_id_table_lock);
+ d_fnend(3, NULL, "(info %p ifindex %d) = %p\n",
+ info, ifindex, wimax_dev);
+ return wimax_dev;
+}
+
+
+/*
+ * wimax_id_table_rm - Remove a gennetlink familiy ID / wimax_dev mapping
+ *
+ * @id: family ID to remove from the table
+ */
+void wimax_id_table_rm(struct wimax_dev *wimax_dev)
+{
+ spin_lock(&wimax_id_table_lock);
+ list_del_init(&wimax_dev->id_table_node);
+ spin_unlock(&wimax_id_table_lock);
+}
+
+
+/*
+ * Release the gennetlink family id / mapping table
+ *
+ * On debug, verify that the table is empty upon removal. We want the
+ * code always compiled, to ensure it doesn't bit rot. It will be
+ * compiled out if CONFIG_BUG is disabled.
+ */
+void wimax_id_table_release(void)
+{
+ struct wimax_dev *wimax_dev;
+
+#ifndef CONFIG_BUG
+ return;
+#endif
+ spin_lock(&wimax_id_table_lock);
+ list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
+ pr_err("BUG: %s wimax_dev %p ifindex %d not cleared\n",
+ __func__, wimax_dev, wimax_dev->net_dev->ifindex);
+ WARN_ON(1);
+ }
+ spin_unlock(&wimax_id_table_lock);
+}
diff --git a/drivers/staging/wimax/linux-wimax-debug.h b/drivers/staging/wimax/linux-wimax-debug.h
new file mode 100644
index 000000000000..5b5ec405143b
--- /dev/null
+++ b/drivers/staging/wimax/linux-wimax-debug.h
@@ -0,0 +1,491 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Linux WiMAX
+ * Collection of tools to manage debug operations.
+ *
+ * Copyright (C) 2005-2007 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * Don't #include this file directly, read on!
+ *
+ * EXECUTING DEBUGGING ACTIONS OR NOT
+ *
+ * The main thing this framework provides is decission power to take a
+ * debug action (like printing a message) if the current debug level
+ * allows it.
+ *
+ * The decission power is at two levels: at compile-time (what does
+ * not make it is compiled out) and at run-time. The run-time
+ * selection is done per-submodule (as they are declared by the user
+ * of the framework).
+ *
+ * A call to d_test(L) (L being the target debug level) returns true
+ * if the action should be taken because the current debug levels
+ * allow it (both compile and run time).
+ *
+ * It follows that a call to d_test() that can be determined to be
+ * always false at compile time will get the code depending on it
+ * compiled out by optimization.
+ *
+ * DEBUG LEVELS
+ *
+ * It is up to the caller to define how much a debugging level is.
+ *
+ * Convention sets 0 as "no debug" (so an action marked as debug level 0
+ * will always be taken). The increasing debug levels are used for
+ * increased verbosity.
+ *
+ * USAGE
+ *
+ * Group the code in modules and submodules inside each module [which
+ * in most cases maps to Linux modules and .c files that compose
+ * those].
+ *
+ * For each module, there is:
+ *
+ * - a MODULENAME (single word, legal C identifier)
+ *
+ * - a debug-levels.h header file that declares the list of
+ * submodules and that is included by all .c files that use
+ * the debugging tools. The file name can be anything.
+ *
+ * - some (optional) .c code to manipulate the runtime debug levels
+ * through debugfs.
+ *
+ * The debug-levels.h file would look like:
+ *
+ * #ifndef __debug_levels__h__
+ * #define __debug_levels__h__
+ *
+ * #define D_MODULENAME modulename
+ * #define D_MASTER 10
+ *
+ * #include "linux-wimax-debug.h"
+ *
+ * enum d_module {
+ * D_SUBMODULE_DECLARE(submodule_1),
+ * D_SUBMODULE_DECLARE(submodule_2),
+ * ...
+ * D_SUBMODULE_DECLARE(submodule_N)
+ * };
+ *
+ * #endif
+ *
+ * D_MASTER is the maximum compile-time debug level; any debug actions
+ * above this will be out. D_MODULENAME is the module name (legal C
+ * identifier), which has to be unique for each module (to avoid
+ * namespace collisions during linkage). Note those #defines need to
+ * be done before #including debug.h
+ *
+ * We declare N different submodules whose debug level can be
+ * independently controlled during runtime.
+ *
+ * In a .c file of the module (and only in one of them), define the
+ * following code:
+ *
+ * struct d_level D_LEVEL[] = {
+ * D_SUBMODULE_DEFINE(submodule_1),
+ * D_SUBMODULE_DEFINE(submodule_2),
+ * ...
+ * D_SUBMODULE_DEFINE(submodule_N),
+ * };
+ * size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+ *
+ * Externs for d_level_MODULENAME and d_level_size_MODULENAME are used
+ * and declared in this file using the D_LEVEL and D_LEVEL_SIZE macros
+ * #defined also in this file.
+ *
+ * To manipulate from user space the levels, create a debugfs dentry
+ * and then register each submodule with:
+ *
+ * d_level_register_debugfs("PREFIX_", submodule_X, parent);
+ *
+ * Where PREFIX_ is a name of your chosing. This will create debugfs
+ * file with a single numeric value that can be use to tweak it. To
+ * remove the entires, just use debugfs_remove_recursive() on 'parent'.
+ *
+ * NOTE: remember that even if this will show attached to some
+ * particular instance of a device, the settings are *global*.
+ *
+ * On each submodule (for example, .c files), the debug infrastructure
+ * should be included like this:
+ *
+ * #define D_SUBMODULE submodule_x // matches one in debug-levels.h
+ * #include "debug-levels.h"
+ *
+ * after #including all your include files.
+ *
+ * Now you can use the d_*() macros below [d_test(), d_fnstart(),
+ * d_fnend(), d_printf(), d_dump()].
+ *
+ * If their debug level is greater than D_MASTER, they will be
+ * compiled out.
+ *
+ * If their debug level is lower or equal than D_MASTER but greater
+ * than the current debug level of their submodule, they'll be
+ * ignored.
+ *
+ * Otherwise, the action will be performed.
+ */
+#ifndef __debug__h__
+#define __debug__h__
+
+#include <linux/types.h>
+#include <linux/slab.h>
+
+struct device;
+
+/* Backend stuff */
+
+/*
+ * Debug backend: generate a message header from a 'struct device'
+ *
+ * @head: buffer where to place the header
+ * @head_size: length of @head
+ * @dev: pointer to device used to generate a header from. If NULL,
+ * an empty ("") header is generated.
+ */
+static inline
+void __d_head(char *head, size_t head_size,
+ struct device *dev)
+{
+ if (dev == NULL)
+ head[0] = 0;
+ else if ((unsigned long)dev < 4096) {
+ printk(KERN_ERR "E: Corrupt dev %p\n", dev);
+ WARN_ON(1);
+ } else
+ snprintf(head, head_size, "%s %s: ",
+ dev_driver_string(dev), dev_name(dev));
+}
+
+
+/*
+ * Debug backend: log some message if debugging is enabled
+ *
+ * @l: intended debug level
+ * @tag: tag to prefix the message with
+ * @dev: 'struct device' associated to this message
+ * @f: printf-like format and arguments
+ *
+ * Note this is optimized out if it doesn't pass the compile-time
+ * check; however, it is *always* compiled. This is useful to make
+ * sure the printf-like formats and variables are always checked and
+ * they don't get bit rot if you have all the debugging disabled.
+ */
+#define _d_printf(l, tag, dev, f, a...) \
+do { \
+ char head[64]; \
+ if (!d_test(l)) \
+ break; \
+ __d_head(head, sizeof(head), dev); \
+ printk(KERN_ERR "%s%s%s: " f, head, __func__, tag, ##a); \
+} while (0)
+
+
+/*
+ * CPP syntactic sugar to generate A_B like symbol names when one of
+ * the arguments is a preprocessor #define.
+ */
+#define __D_PASTE__(varname, modulename) varname##_##modulename
+#define __D_PASTE(varname, modulename) (__D_PASTE__(varname, modulename))
+#define _D_SUBMODULE_INDEX(_name) (D_SUBMODULE_DECLARE(_name))
+
+
+/*
+ * Store a submodule's runtime debug level and name
+ */
+struct d_level {
+ u8 level;
+ const char *name;
+};
+
+
+/*
+ * List of available submodules and their debug levels
+ *
+ * We call them d_level_MODULENAME and d_level_size_MODULENAME; the
+ * macros D_LEVEL and D_LEVEL_SIZE contain the name already for
+ * convenience.
+ *
+ * This array and the size are defined on some .c file that is part of
+ * the current module.
+ */
+#define D_LEVEL __D_PASTE(d_level, D_MODULENAME)
+#define D_LEVEL_SIZE __D_PASTE(d_level_size, D_MODULENAME)
+
+extern struct d_level D_LEVEL[];
+extern size_t D_LEVEL_SIZE;
+
+
+/*
+ * Frontend stuff
+ *
+ *
+ * Stuff you need to declare prior to using the actual "debug" actions
+ * (defined below).
+ */
+
+#ifndef D_MODULENAME
+#error D_MODULENAME is not defined in your debug-levels.h file
+/**
+ * D_MODULE - Name of the current module
+ *
+ * #define in your module's debug-levels.h, making sure it is
+ * unique. This has to be a legal C identifier.
+ */
+#define D_MODULENAME undefined_modulename
+#endif
+
+
+#ifndef D_MASTER
+#warning D_MASTER not defined, but debug.h included! [see docs]
+/**
+ * D_MASTER - Compile time maximum debug level
+ *
+ * #define in your debug-levels.h file to the maximum debug level the
+ * runtime code will be allowed to have. This allows you to provide a
+ * main knob.
+ *
+ * Anything above that level will be optimized out of the compile.
+ *
+ * Defaults to zero (no debug code compiled in).
+ *
+ * Maximum one definition per module (at the debug-levels.h file).
+ */
+#define D_MASTER 0
+#endif
+
+#ifndef D_SUBMODULE
+#error D_SUBMODULE not defined, but debug.h included! [see docs]
+/**
+ * D_SUBMODULE - Name of the current submodule
+ *
+ * #define in your submodule .c file before #including debug-levels.h
+ * to the name of the current submodule as previously declared and
+ * defined with D_SUBMODULE_DECLARE() (in your module's
+ * debug-levels.h) and D_SUBMODULE_DEFINE().
+ *
+ * This is used to provide runtime-control over the debug levels.
+ *
+ * Maximum one per .c file! Can be shared among different .c files
+ * (meaning they belong to the same submodule categorization).
+ */
+#define D_SUBMODULE undefined_module
+#endif
+
+
+/**
+ * D_SUBMODULE_DECLARE - Declare a submodule for runtime debug level control
+ *
+ * @_name: name of the submodule, restricted to the chars that make up a
+ * valid C identifier ([a-zA-Z0-9_]).
+ *
+ * Declare in the module's debug-levels.h header file as:
+ *
+ * enum d_module {
+ * D_SUBMODULE_DECLARE(submodule_1),
+ * D_SUBMODULE_DECLARE(submodule_2),
+ * D_SUBMODULE_DECLARE(submodule_3),
+ * };
+ *
+ * Some corresponding .c file needs to have a matching
+ * D_SUBMODULE_DEFINE().
+ */
+#define D_SUBMODULE_DECLARE(_name) __D_SUBMODULE_##_name
+
+
+/**
+ * D_SUBMODULE_DEFINE - Define a submodule for runtime debug level control
+ *
+ * @_name: name of the submodule, restricted to the chars that make up a
+ * valid C identifier ([a-zA-Z0-9_]).
+ *
+ * Use once per module (in some .c file) as:
+ *
+ * static
+ * struct d_level d_level_SUBMODULENAME[] = {
+ * D_SUBMODULE_DEFINE(submodule_1),
+ * D_SUBMODULE_DEFINE(submodule_2),
+ * D_SUBMODULE_DEFINE(submodule_3),
+ * };
+ * size_t d_level_size_SUBDMODULENAME = ARRAY_SIZE(d_level_SUBDMODULENAME);
+ *
+ * Matching D_SUBMODULE_DECLARE()s have to be present in a
+ * debug-levels.h header file.
+ */
+#define D_SUBMODULE_DEFINE(_name) \
+[__D_SUBMODULE_##_name] = { \
+ .level = 0, \
+ .name = #_name \
+}
+
+
+
+/* The actual "debug" operations */
+
+
+/**
+ * d_test - Returns true if debugging should be enabled
+ *
+ * @l: intended debug level (unsigned)
+ *
+ * If the master debug switch is enabled and the current settings are
+ * higher or equal to the requested level, then debugging
+ * output/actions should be enabled.
+ *
+ * NOTE:
+ *
+ * This needs to be coded so that it can be evaluated in compile
+ * time; this is why the ugly BUG_ON() is placed in there, so the
+ * D_MASTER evaluation compiles all out if it is compile-time false.
+ */
+#define d_test(l) \
+({ \
+ unsigned __l = l; /* type enforcer */ \
+ (D_MASTER) >= __l \
+ && ({ \
+ BUG_ON(_D_SUBMODULE_INDEX(D_SUBMODULE) >= D_LEVEL_SIZE);\
+ D_LEVEL[_D_SUBMODULE_INDEX(D_SUBMODULE)].level >= __l; \
+ }); \
+})
+
+
+/**
+ * d_fnstart - log message at function start if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_fnstart(l, _dev, f, a...) _d_printf(l, " FNSTART", _dev, f, ## a)
+
+
+/**
+ * d_fnend - log message at function end if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_fnend(l, _dev, f, a...) _d_printf(l, " FNEND", _dev, f, ## a)
+
+
+/**
+ * d_printf - log message if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_printf(l, _dev, f, a...) _d_printf(l, "", _dev, f, ## a)
+
+
+/**
+ * d_dump - log buffer hex dump if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_dump(l, dev, ptr, size) \
+do { \
+ char head[64]; \
+ if (!d_test(l)) \
+ break; \
+ __d_head(head, sizeof(head), dev); \
+ print_hex_dump(KERN_ERR, head, 0, 16, 1, \
+ ((void *) ptr), (size), 0); \
+} while (0)
+
+
+/**
+ * Export a submodule's debug level over debugfs as PREFIXSUBMODULE
+ *
+ * @prefix: string to prefix the name with
+ * @submodule: name of submodule (not a string, just the name)
+ * @dentry: debugfs parent dentry
+ *
+ * For removing, just use debugfs_remove_recursive() on the parent.
+ */
+#define d_level_register_debugfs(prefix, name, parent) \
+({ \
+ debugfs_create_u8( \
+ prefix #name, 0600, parent, \
+ &(D_LEVEL[__D_SUBMODULE_ ## name].level)); \
+})
+
+
+static inline
+void d_submodule_set(struct d_level *d_level, size_t d_level_size,
+ const char *submodule, u8 level, const char *tag)
+{
+ struct d_level *itr, *top;
+ int index = -1;
+
+ for (itr = d_level, top = itr + d_level_size; itr < top; itr++) {
+ index++;
+ if (itr->name == NULL) {
+ printk(KERN_ERR "%s: itr->name NULL?? (%p, #%d)\n",
+ tag, itr, index);
+ continue;
+ }
+ if (!strcmp(itr->name, submodule)) {
+ itr->level = level;
+ return;
+ }
+ }
+ printk(KERN_ERR "%s: unknown submodule %s\n", tag, submodule);
+}
+
+
+/**
+ * d_parse_params - Parse a string with debug parameters from the
+ * command line
+ *
+ * @d_level: level structure (D_LEVEL)
+ * @d_level_size: number of items in the level structure
+ * (D_LEVEL_SIZE).
+ * @_params: string with the parameters; this is a space (not tab!)
+ * separated list of NAME:VALUE, where value is the debug level
+ * and NAME is the name of the submodule.
+ * @tag: string for error messages (example: MODULE.ARGNAME).
+ */
+static inline
+void d_parse_params(struct d_level *d_level, size_t d_level_size,
+ const char *_params, const char *tag)
+{
+ char submodule[130], *params, *params_orig, *token, *colon;
+ unsigned level, tokens;
+
+ if (_params == NULL)
+ return;
+ params_orig = kstrdup(_params, GFP_KERNEL);
+ params = params_orig;
+ while (1) {
+ token = strsep(&params, " ");
+ if (token == NULL)
+ break;
+ if (*token == '\0') /* eat joint spaces */
+ continue;
+ /* kernel's sscanf %s eats until whitespace, so we
+ * replace : by \n so it doesn't get eaten later by
+ * strsep */
+ colon = strchr(token, ':');
+ if (colon != NULL)
+ *colon = '\n';
+ tokens = sscanf(token, "%s\n%u", submodule, &level);
+ if (colon != NULL)
+ *colon = ':'; /* set back, for error messages */
+ if (tokens == 2)
+ d_submodule_set(d_level, d_level_size,
+ submodule, level, tag);
+ else
+ printk(KERN_ERR "%s: can't parse '%s' as a "
+ "SUBMODULE:LEVEL (%d tokens)\n",
+ tag, token, tokens);
+ }
+ kfree(params_orig);
+}
+
+#endif /* #ifndef __debug__h__ */
diff --git a/drivers/staging/wimax/linux-wimax.h b/drivers/staging/wimax/linux-wimax.h
new file mode 100644
index 000000000000..9f6b77af2f6d
--- /dev/null
+++ b/drivers/staging/wimax/linux-wimax.h
@@ -0,0 +1,239 @@
+/*
+ * Linux WiMax
+ * API for user space
+ *
+ *
+ * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * - Initial implementation
+ *
+ *
+ * This file declares the user/kernel protocol that is spoken over
+ * Generic Netlink, as well as any type declaration that is to be used
+ * by kernel and user space.
+ *
+ * It is intended for user space to clone it verbatim to use it as a
+ * primary reference for definitions.
+ *
+ * Stuff intended for kernel usage as well as full protocol and stack
+ * documentation is rooted in include/net/wimax.h.
+ */
+
+#ifndef __LINUX__WIMAX_H__
+#define __LINUX__WIMAX_H__
+
+#include <linux/types.h>
+
+enum {
+ /**
+ * Version of the interface (unsigned decimal, MMm, max 25.5)
+ * M - Major: change if removing or modifying an existing call.
+ * m - minor: change when adding a new call
+ */
+ WIMAX_GNL_VERSION = 01,
+ /* Generic NetLink attributes */
+ WIMAX_GNL_ATTR_INVALID = 0x00,
+ WIMAX_GNL_ATTR_MAX = 10,
+};
+
+
+/*
+ * Generic NetLink operations
+ *
+ * Most of these map to an API call; _OP_ stands for operation, _RP_
+ * for reply and _RE_ for report (aka: signal).
+ */
+enum {
+ WIMAX_GNL_OP_MSG_FROM_USER, /* User to kernel message */
+ WIMAX_GNL_OP_MSG_TO_USER, /* Kernel to user message */
+ WIMAX_GNL_OP_RFKILL, /* Run wimax_rfkill() */
+ WIMAX_GNL_OP_RESET, /* Run wimax_rfkill() */
+ WIMAX_GNL_RE_STATE_CHANGE, /* Report: status change */
+ WIMAX_GNL_OP_STATE_GET, /* Request for current state */
+};
+
+
+/* Message from user / to user */
+enum {
+ WIMAX_GNL_MSG_IFIDX = 1,
+ WIMAX_GNL_MSG_PIPE_NAME,
+ WIMAX_GNL_MSG_DATA,
+};
+
+
+/*
+ * wimax_rfkill()
+ *
+ * The state of the radio (ON/OFF) is mapped to the rfkill subsystem's
+ * switch state (DISABLED/ENABLED).
+ */
+enum wimax_rf_state {
+ WIMAX_RF_OFF = 0, /* Radio is off, rfkill on/enabled */
+ WIMAX_RF_ON = 1, /* Radio is on, rfkill off/disabled */
+ WIMAX_RF_QUERY = 2,
+};
+
+/* Attributes */
+enum {
+ WIMAX_GNL_RFKILL_IFIDX = 1,
+ WIMAX_GNL_RFKILL_STATE,
+};
+
+
+/* Attributes for wimax_reset() */
+enum {
+ WIMAX_GNL_RESET_IFIDX = 1,
+};
+
+/* Attributes for wimax_state_get() */
+enum {
+ WIMAX_GNL_STGET_IFIDX = 1,
+};
+
+/*
+ * Attributes for the Report State Change
+ *
+ * For now we just have the old and new states; new attributes might
+ * be added later on.
+ */
+enum {
+ WIMAX_GNL_STCH_IFIDX = 1,
+ WIMAX_GNL_STCH_STATE_OLD,
+ WIMAX_GNL_STCH_STATE_NEW,
+};
+
+
+/**
+ * enum wimax_st - The different states of a WiMAX device
+ * @__WIMAX_ST_NULL: The device structure has been allocated and zeroed,
+ * but still wimax_dev_add() hasn't been called. There is no state.
+ *
+ * @WIMAX_ST_DOWN: The device has been registered with the WiMAX and
+ * networking stacks, but it is not initialized (normally that is
+ * done with 'ifconfig DEV up' [or equivalent], which can upload
+ * firmware and enable communications with the device).
+ * In this state, the device is powered down and using as less
+ * power as possible.
+ * This state is the default after a call to wimax_dev_add(). It
+ * is ok to have drivers move directly to %WIMAX_ST_UNINITIALIZED
+ * or %WIMAX_ST_RADIO_OFF in _probe() after the call to
+ * wimax_dev_add().
+ * It is recommended that the driver leaves this state when
+ * calling 'ifconfig DEV up' and enters it back on 'ifconfig DEV
+ * down'.
+ *
+ * @__WIMAX_ST_QUIESCING: The device is being torn down, so no API
+ * operations are allowed to proceed except the ones needed to
+ * complete the device clean up process.
+ *
+ * @WIMAX_ST_UNINITIALIZED: [optional] Communication with the device
+ * is setup, but the device still requires some configuration
+ * before being operational.
+ * Some WiMAX API calls might work.
+ *
+ * @WIMAX_ST_RADIO_OFF: The device is fully up; radio is off (wether
+ * by hardware or software switches).
+ * It is recommended to always leave the device in this state
+ * after initialization.
+ *
+ * @WIMAX_ST_READY: The device is fully up and radio is on.
+ *
+ * @WIMAX_ST_SCANNING: [optional] The device has been instructed to
+ * scan. In this state, the device cannot be actively connected to
+ * a network.
+ *
+ * @WIMAX_ST_CONNECTING: The device is connecting to a network. This
+ * state exists because in some devices, the connect process can
+ * include a number of negotiations between user space, kernel
+ * space and the device. User space needs to know what the device
+ * is doing. If the connect sequence in a device is atomic and
+ * fast, the device can transition directly to CONNECTED
+ *
+ * @WIMAX_ST_CONNECTED: The device is connected to a network.
+ *
+ * @__WIMAX_ST_INVALID: This is an invalid state used to mark the
+ * maximum numeric value of states.
+ *
+ * Description:
+ *
+ * Transitions from one state to another one are atomic and can only
+ * be caused in kernel space with wimax_state_change(). To read the
+ * state, use wimax_state_get().
+ *
+ * States starting with __ are internal and shall not be used or
+ * referred to by drivers or userspace. They look ugly, but that's the
+ * point -- if any use is made non-internal to the stack, it is easier
+ * to catch on review.
+ *
+ * All API operations [with well defined exceptions] will take the
+ * device mutex before starting and then check the state. If the state
+ * is %__WIMAX_ST_NULL, %WIMAX_ST_DOWN, %WIMAX_ST_UNINITIALIZED or
+ * %__WIMAX_ST_QUIESCING, it will drop the lock and quit with
+ * -%EINVAL, -%ENOMEDIUM, -%ENOTCONN or -%ESHUTDOWN.
+ *
+ * The order of the definitions is important, so we can do numerical
+ * comparisons (eg: < %WIMAX_ST_RADIO_OFF means the device is not ready
+ * to operate).
+ */
+/*
+ * The allowed state transitions are described in the table below
+ * (states in rows can go to states in columns where there is an X):
+ *
+ * UNINI RADIO READY SCAN CONNEC CONNEC
+ * NULL DOWN QUIESCING TIALIZED OFF NING TING TED
+ * NULL - x
+ * DOWN - x x x
+ * QUIESCING x -
+ * UNINITIALIZED x - x
+ * RADIO_OFF x - x
+ * READY x x - x x x
+ * SCANNING x x x - x x
+ * CONNECTING x x x x - x
+ * CONNECTED x x x -
+ *
+ * This table not available in kernel-doc because the formatting messes it up.
+ */
+ enum wimax_st {
+ __WIMAX_ST_NULL = 0,
+ WIMAX_ST_DOWN,
+ __WIMAX_ST_QUIESCING,
+ WIMAX_ST_UNINITIALIZED,
+ WIMAX_ST_RADIO_OFF,
+ WIMAX_ST_READY,
+ WIMAX_ST_SCANNING,
+ WIMAX_ST_CONNECTING,
+ WIMAX_ST_CONNECTED,
+ __WIMAX_ST_INVALID /* Always keep last */
+};
+
+
+#endif /* #ifndef __LINUX__WIMAX_H__ */
diff --git a/drivers/staging/wimax/net-wimax.h b/drivers/staging/wimax/net-wimax.h
new file mode 100644
index 000000000000..f578e345e2bd
--- /dev/null
+++ b/drivers/staging/wimax/net-wimax.h
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Linux WiMAX
+ * Kernel space API for accessing WiMAX devices
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * The WiMAX stack provides an API for controlling and managing the
+ * system's WiMAX devices. This API affects the control plane; the
+ * data plane is accessed via the network stack (netdev).
+ *
+ * Parts of the WiMAX stack API and notifications are exported to
+ * user space via Generic Netlink. In user space, libwimax (part of
+ * the wimax-tools package) provides a shim layer for accessing those
+ * calls.
+ *
+ * The API is standarized for all WiMAX devices and different drivers
+ * implement the backend support for it. However, device-specific
+ * messaging pipes are provided that can be used to issue commands and
+ * receive notifications in free form.
+ *
+ * Currently the messaging pipes are the only means of control as it
+ * is not known (due to the lack of more devices in the market) what
+ * will be a good abstraction layer. Expect this to change as more
+ * devices show in the market. This API is designed to be growable in
+ * order to address this problem.
+ *
+ * USAGE
+ *
+ * Embed a `struct wimax_dev` at the beginning of the device's
+ * private structure, initialize and register it. For details, see
+ * `struct wimax_dev`s documentation.
+ *
+ * Once this is done, wimax-tools's libwimaxll can be used to
+ * communicate with the driver from user space. You user space
+ * application does not have to forcibily use libwimaxll and can talk
+ * the generic netlink protocol directly if desired.
+ *
+ * Remember this is a very low level API that will to provide all of
+ * WiMAX features. Other daemons and services running in user space
+ * are the expected clients of it. They offer a higher level API that
+ * applications should use (an example of this is the Intel's WiMAX
+ * Network Service for the i2400m).
+ *
+ * DESIGN
+ *
+ * Although not set on final stone, this very basic interface is
+ * mostly completed. Remember this is meant to grow as new common
+ * operations are decided upon. New operations will be added to the
+ * interface, intent being on keeping backwards compatibility as much
+ * as possible.
+ *
+ * This layer implements a set of calls to control a WiMAX device,
+ * exposing a frontend to the rest of the kernel and user space (via
+ * generic netlink) and a backend implementation in the driver through
+ * function pointers.
+ *
+ * WiMAX devices have a state, and a kernel-only API allows the
+ * drivers to manipulate that state. State transitions are atomic, and
+ * only some of them are allowed (see `enum wimax_st`).
+ *
+ * Most API calls will set the state automatically; in most cases
+ * drivers have to only report state changes due to external
+ * conditions.
+ *
+ * All API operations are 'atomic', serialized through a mutex in the
+ * `struct wimax_dev`.
+ *
+ * EXPORTING TO USER SPACE THROUGH GENERIC NETLINK
+ *
+ * The API is exported to user space using generic netlink (other
+ * methods can be added as needed).
+ *
+ * There is a Generic Netlink Family named "WiMAX", where interfaces
+ * supporting the WiMAX interface receive commands and broadcast their
+ * signals over a multicast group named "msg".
+ *
+ * Mapping to the source/destination interface is done by an interface
+ * index attribute.
+ *
+ * For user-to-kernel traffic (commands) we use a function call
+ * marshalling mechanism, where a message X with attributes A, B, C
+ * sent from user space to kernel space means executing the WiMAX API
+ * call wimax_X(A, B, C), sending the results back as a message.
+ *
+ * Kernel-to-user (notifications or signals) communication is sent
+ * over multicast groups. This allows to have multiple applications
+ * monitoring them.
+ *
+ * Each command/signal gets assigned it's own attribute policy. This
+ * way the validator will verify that all the attributes in there are
+ * only the ones that should be for each command/signal. Thing of an
+ * attribute mapping to a type+argumentname for each command/signal.
+ *
+ * If we had a single policy for *all* commands/signals, after running
+ * the validator we'd have to check "does this attribute belong in
+ * here"? for each one. It can be done manually, but it's just easier
+ * to have the validator do that job with multiple policies. As well,
+ * it makes it easier to later expand each command/signal signature
+ * without affecting others and keeping the namespace more or less
+ * sane. Not that it is too complicated, but it makes it even easier.
+ *
+ * No state information is maintained in the kernel for each user
+ * space connection (the connection is stateless).
+ *
+ * TESTING FOR THE INTERFACE AND VERSIONING
+ *
+ * If network interface X is a WiMAX device, there will be a Generic
+ * Netlink family named "WiMAX X" and the device will present a
+ * "wimax" directory in it's network sysfs directory
+ * (/sys/class/net/DEVICE/wimax) [used by HAL].
+ *
+ * The inexistence of any of these means the device does not support
+ * this WiMAX API.
+ *
+ * By querying the generic netlink controller, versioning information
+ * and the multicast groups available can be found. Applications using
+ * the interface can either rely on that or use the generic netlink
+ * controller to figure out which generic netlink commands/signals are
+ * supported.
+ *
+ * NOTE: this versioning is a last resort to avoid hard
+ * incompatibilities. It is the intention of the design of this
+ * stack not to introduce backward incompatible changes.
+ *
+ * The version code has to fit in one byte (restrictions imposed by
+ * generic netlink); we use `version / 10` for the major version and
+ * `version % 10` for the minor. This gives 9 minors for each major
+ * and 25 majors.
+ *
+ * The version change protocol is as follow:
+ *
+ * - Major versions: needs to be increased if an existing message/API
+ * call is changed or removed. Doesn't need to be changed if a new
+ * message is added.
+ *
+ * - Minor version: needs to be increased if new messages/API calls are
+ * being added or some other consideration that doesn't impact the
+ * user-kernel interface too much (like some kind of bug fix) and
+ * that is kind of left up in the air to common sense.
+ *
+ * User space code should not try to work if the major version it was
+ * compiled for differs from what the kernel offers. As well, if the
+ * minor version of the kernel interface is lower than the one user
+ * space is expecting (the one it was compiled for), the kernel
+ * might be missing API calls; user space shall be ready to handle
+ * said condition. Use the generic netlink controller operations to
+ * find which ones are supported and which not.
+ *
+ * libwimaxll:wimaxll_open() takes care of checking versions.
+ *
+ * THE OPERATIONS:
+ *
+ * Each operation is defined in its on file (drivers/net/wimax/op-*.c)
+ * for clarity. The parts needed for an operation are:
+ *
+ * - a function pointer in `struct wimax_dev`: optional, as the
+ * operation might be implemented by the stack and not by the
+ * driver.
+ *
+ * All function pointers are named wimax_dev->op_*(), and drivers
+ * must implement them except where noted otherwise.
+ *
+ * - When exported to user space, a `struct nla_policy` to define the
+ * attributes of the generic netlink command and a `struct genl_ops`
+ * to define the operation.
+ *
+ * All the declarations for the operation codes (WIMAX_GNL_OP_<NAME>)
+ * and generic netlink attributes (WIMAX_GNL_<NAME>_*) are declared in
+ * include/linux/wimax.h; this file is intended to be cloned by user
+ * space to gain access to those declarations.
+ *
+ * A few caveats to remember:
+ *
+ * - Need to define attribute numbers starting in 1; otherwise it
+ * fails.
+ *
+ * - the `struct genl_family` requires a maximum attribute id; when
+ * defining the `struct nla_policy` for each message, it has to have
+ * an array size of WIMAX_GNL_ATTR_MAX+1.
+ *
+ * The op_*() function pointers will not be called if the wimax_dev is
+ * in a state <= %WIMAX_ST_UNINITIALIZED. The exception is:
+ *
+ * - op_reset: can be called at any time after wimax_dev_add() has
+ * been called.
+ *
+ * THE PIPE INTERFACE:
+ *
+ * This interface is kept intentionally simple. The driver can send
+ * and receive free-form messages to/from user space through a
+ * pipe. See drivers/net/wimax/op-msg.c for details.
+ *
+ * The kernel-to-user messages are sent with
+ * wimax_msg(). user-to-kernel messages are delivered via
+ * wimax_dev->op_msg_from_user().
+ *
+ * RFKILL:
+ *
+ * RFKILL support is built into the wimax_dev layer; the driver just
+ * needs to call wimax_report_rfkill_{hw,sw}() to inform of changes in
+ * the hardware or software RF kill switches. When the stack wants to
+ * turn the radio off, it will call wimax_dev->op_rfkill_sw_toggle(),
+ * which the driver implements.
+ *
+ * User space can set the software RF Kill switch by calling
+ * wimax_rfkill().
+ *
+ * The code for now only supports devices that don't require polling;
+ * If the device needs to be polled, create a self-rearming delayed
+ * work struct for polling or look into adding polled support to the
+ * WiMAX stack.
+ *
+ * When initializing the hardware (_probe), after calling
+ * wimax_dev_add(), query the device for it's RF Kill switches status
+ * and feed it back to the WiMAX stack using
+ * wimax_report_rfkill_{hw,sw}(). If any switch is missing, always
+ * report it as ON.
+ *
+ * NOTE: the wimax stack uses an inverted terminology to that of the
+ * RFKILL subsystem:
+ *
+ * - ON: radio is ON, RFKILL is DISABLED or OFF.
+ * - OFF: radio is OFF, RFKILL is ENABLED or ON.
+ *
+ * MISCELLANEOUS OPS:
+ *
+ * wimax_reset() can be used to reset the device to power on state; by
+ * default it issues a warm reset that maintains the same device
+ * node. If that is not possible, it falls back to a cold reset
+ * (device reconnect). The driver implements the backend to this
+ * through wimax_dev->op_reset().
+ */
+
+#ifndef __NET__WIMAX_H__
+#define __NET__WIMAX_H__
+
+#include "linux-wimax.h"
+#include <net/genetlink.h>
+#include <linux/netdevice.h>
+
+struct net_device;
+struct genl_info;
+struct wimax_dev;
+
+/**
+ * struct wimax_dev - Generic WiMAX device
+ *
+ * @net_dev: [fill] Pointer to the &struct net_device this WiMAX
+ * device implements.
+ *
+ * @op_msg_from_user: [fill] Driver-specific operation to
+ * handle a raw message from user space to the driver. The
+ * driver can send messages to user space using with
+ * wimax_msg_to_user().
+ *
+ * @op_rfkill_sw_toggle: [fill] Driver-specific operation to act on
+ * userspace (or any other agent) requesting the WiMAX device to
+ * change the RF Kill software switch (WIMAX_RF_ON or
+ * WIMAX_RF_OFF).
+ * If such hardware support is not present, it is assumed the
+ * radio cannot be switched off and it is always on (and the stack
+ * will error out when trying to switch it off). In such case,
+ * this function pointer can be left as NULL.
+ *
+ * @op_reset: [fill] Driver specific operation to reset the
+ * device.
+ * This operation should always attempt first a warm reset that
+ * does not disconnect the device from the bus and return 0.
+ * If that fails, it should resort to some sort of cold or bus
+ * reset (even if it implies a bus disconnection and device
+ * disappearance). In that case, -ENODEV should be returned to
+ * indicate the device is gone.
+ * This operation has to be synchronous, and return only when the
+ * reset is complete. In case of having had to resort to bus/cold
+ * reset implying a device disconnection, the call is allowed to
+ * return immediately.
+ * NOTE: wimax_dev->mutex is NOT locked when this op is being
+ * called; however, wimax_dev->mutex_reset IS locked to ensure
+ * serialization of calls to wimax_reset().
+ * See wimax_reset()'s documentation.
+ *
+ * @name: [fill] A way to identify this device. We need to register a
+ * name with many subsystems (rfkill, workqueue creation, etc).
+ * We can't use the network device name as that
+ * might change and in some instances we don't know it yet (until
+ * we don't call register_netdev()). So we generate an unique one
+ * using the driver name and device bus id, place it here and use
+ * it across the board. Recommended naming:
+ * DRIVERNAME-BUSNAME:BUSID (dev->bus->name, dev->bus_id).
+ *
+ * @id_table_node: [private] link to the list of wimax devices kept by
+ * id-table.c. Protected by it's own spinlock.
+ *
+ * @mutex: [private] Serializes all concurrent access and execution of
+ * operations.
+ *
+ * @mutex_reset: [private] Serializes reset operations. Needs to be a
+ * different mutex because as part of the reset operation, the
+ * driver has to call back into the stack to do things such as
+ * state change, that require wimax_dev->mutex.
+ *
+ * @state: [private] Current state of the WiMAX device.
+ *
+ * @rfkill: [private] integration into the RF-Kill infrastructure.
+ *
+ * @rf_sw: [private] State of the software radio switch (OFF/ON)
+ *
+ * @rf_hw: [private] State of the hardware radio switch (OFF/ON)
+ *
+ * @debugfs_dentry: [private] Used to hook up a debugfs entry. This
+ * shows up in the debugfs root as wimax\:DEVICENAME.
+ *
+ * Description:
+ * This structure defines a common interface to access all WiMAX
+ * devices from different vendors and provides a common API as well as
+ * a free-form device-specific messaging channel.
+ *
+ * Usage:
+ * 1. Embed a &struct wimax_dev at *the beginning* the network
+ * device structure so that netdev_priv() points to it.
+ *
+ * 2. memset() it to zero
+ *
+ * 3. Initialize with wimax_dev_init(). This will leave the WiMAX
+ * device in the %__WIMAX_ST_NULL state.
+ *
+ * 4. Fill all the fields marked with [fill]; once called
+ * wimax_dev_add(), those fields CANNOT be modified.
+ *
+ * 5. Call wimax_dev_add() *after* registering the network
+ * device. This will leave the WiMAX device in the %WIMAX_ST_DOWN
+ * state.
+ * Protect the driver's net_device->open() against succeeding if
+ * the wimax device state is lower than %WIMAX_ST_DOWN.
+ *
+ * 6. Select when the device is going to be turned on/initialized;
+ * for example, it could be initialized on 'ifconfig up' (when the
+ * netdev op 'open()' is called on the driver).
+ *
+ * When the device is initialized (at `ifconfig up` time, or right
+ * after calling wimax_dev_add() from _probe(), make sure the
+ * following steps are taken
+ *
+ * a. Move the device to %WIMAX_ST_UNINITIALIZED. This is needed so
+ * some API calls that shouldn't work until the device is ready
+ * can be blocked.
+ *
+ * b. Initialize the device. Make sure to turn the SW radio switch
+ * off and move the device to state %WIMAX_ST_RADIO_OFF when
+ * done. When just initialized, a device should be left in RADIO
+ * OFF state until user space devices to turn it on.
+ *
+ * c. Query the device for the state of the hardware rfkill switch
+ * and call wimax_rfkill_report_hw() and wimax_rfkill_report_sw()
+ * as needed. See below.
+ *
+ * wimax_dev_rm() undoes before unregistering the network device. Once
+ * wimax_dev_add() is called, the driver can get called on the
+ * wimax_dev->op_* function pointers
+ *
+ * CONCURRENCY:
+ *
+ * The stack provides a mutex for each device that will disallow API
+ * calls happening concurrently; thus, op calls into the driver
+ * through the wimax_dev->op*() function pointers will always be
+ * serialized and *never* concurrent.
+ *
+ * For locking, take wimax_dev->mutex is taken; (most) operations in
+ * the API have to check for wimax_dev_is_ready() to return 0 before
+ * continuing (this is done internally).
+ *
+ * REFERENCE COUNTING:
+ *
+ * The WiMAX device is reference counted by the associated network
+ * device. The only operation that can be used to reference the device
+ * is wimax_dev_get_by_genl_info(), and the reference it acquires has
+ * to be released with dev_put(wimax_dev->net_dev).
+ *
+ * RFKILL:
+ *
+ * At startup, both HW and SW radio switchess are assumed to be off.
+ *
+ * At initialization time [after calling wimax_dev_add()], have the
+ * driver query the device for the status of the software and hardware
+ * RF kill switches and call wimax_report_rfkill_hw() and
+ * wimax_rfkill_report_sw() to indicate their state. If any is
+ * missing, just call it to indicate it is ON (radio always on).
+ *
+ * Whenever the driver detects a change in the state of the RF kill
+ * switches, it should call wimax_report_rfkill_hw() or
+ * wimax_report_rfkill_sw() to report it to the stack.
+ */
+struct wimax_dev {
+ struct net_device *net_dev;
+ struct list_head id_table_node;
+ struct mutex mutex; /* Protects all members and API calls */
+ struct mutex mutex_reset;
+ enum wimax_st state;
+
+ int (*op_msg_from_user)(struct wimax_dev *wimax_dev,
+ const char *,
+ const void *, size_t,
+ const struct genl_info *info);
+ int (*op_rfkill_sw_toggle)(struct wimax_dev *wimax_dev,
+ enum wimax_rf_state);
+ int (*op_reset)(struct wimax_dev *wimax_dev);
+
+ struct rfkill *rfkill;
+ unsigned int rf_hw;
+ unsigned int rf_sw;
+ char name[32];
+
+ struct dentry *debugfs_dentry;
+};
+
+
+
+/*
+ * WiMAX stack public API for device drivers
+ * -----------------------------------------
+ *
+ * These functions are not exported to user space.
+ */
+void wimax_dev_init(struct wimax_dev *);
+int wimax_dev_add(struct wimax_dev *, struct net_device *);
+void wimax_dev_rm(struct wimax_dev *);
+
+static inline
+struct wimax_dev *net_dev_to_wimax(struct net_device *net_dev)
+{
+ return netdev_priv(net_dev);
+}
+
+static inline
+struct device *wimax_dev_to_dev(struct wimax_dev *wimax_dev)
+{
+ return wimax_dev->net_dev->dev.parent;
+}
+
+void wimax_state_change(struct wimax_dev *, enum wimax_st);
+enum wimax_st wimax_state_get(struct wimax_dev *);
+
+/*
+ * Radio Switch state reporting.
+ *
+ * enum wimax_rf_state is declared in linux/wimax.h so the exports
+ * to user space can use it.
+ */
+void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state);
+void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
+
+
+/*
+ * Free-form messaging to/from user space
+ *
+ * Sending a message:
+ *
+ * wimax_msg(wimax_dev, pipe_name, buf, buf_size, GFP_KERNEL);
+ *
+ * Broken up:
+ *
+ * skb = wimax_msg_alloc(wimax_dev, pipe_name, buf_size, GFP_KERNEL);
+ * ...fill up skb...
+ * wimax_msg_send(wimax_dev, pipe_name, skb);
+ *
+ * Be sure not to modify skb->data in the middle (ie: don't use
+ * skb_push()/skb_pull()/skb_reserve() on the skb).
+ *
+ * "pipe_name" is any string, that can be interpreted as the name of
+ * the pipe or recipient; the interpretation of it is driver
+ * specific, so the recipient can multiplex it as wished. It can be
+ * NULL, it won't be used - an example is using a "diagnostics" tag to
+ * send diagnostics information that a device-specific diagnostics
+ * tool would be interested in.
+ */
+struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *, const void *,
+ size_t, gfp_t);
+int wimax_msg_send(struct wimax_dev *, struct sk_buff *);
+int wimax_msg(struct wimax_dev *, const char *, const void *, size_t, gfp_t);
+
+const void *wimax_msg_data_len(struct sk_buff *, size_t *);
+const void *wimax_msg_data(struct sk_buff *);
+ssize_t wimax_msg_len(struct sk_buff *);
+
+
+/*
+ * WiMAX stack user space API
+ * --------------------------
+ *
+ * This API is what gets exported to user space for general
+ * operations. As well, they can be called from within the kernel,
+ * (with a properly referenced `struct wimax_dev`).
+ *
+ * Properly referenced means: the 'struct net_device' that embeds the
+ * device's control structure and (as such) the 'struct wimax_dev' is
+ * referenced by the caller.
+ */
+int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state);
+int wimax_reset(struct wimax_dev *);
+
+#endif /* #ifndef __NET__WIMAX_H__ */
diff --git a/drivers/staging/wimax/op-msg.c b/drivers/staging/wimax/op-msg.c
new file mode 100644
index 000000000000..e20ac7d84e82
--- /dev/null
+++ b/drivers/staging/wimax/op-msg.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux WiMAX
+ * Generic messaging interface between userspace and driver/device
+ *
+ * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This implements a direct communication channel between user space and
+ * the driver/device, by which free form messages can be sent back and
+ * forth.
+ *
+ * This is intended for device-specific features, vendor quirks, etc.
+ *
+ * See include/net/wimax.h
+ *
+ * GENERIC NETLINK ENCODING AND CAPACITY
+ *
+ * A destination "pipe name" is added to each message; it is up to the
+ * drivers to assign or use those names (if using them at all).
+ *
+ * Messages are encoded as a binary netlink attribute using nla_put()
+ * using type NLA_UNSPEC (as some versions of libnl still in
+ * deployment don't yet understand NLA_BINARY).
+ *
+ * The maximum capacity of this transport is PAGESIZE per message (so
+ * the actual payload will be bit smaller depending on the
+ * netlink/generic netlink attributes and headers).
+ *
+ * RECEPTION OF MESSAGES
+ *
+ * When a message is received from user space, it is passed verbatim
+ * to the driver calling wimax_dev->op_msg_from_user(). The return
+ * value from this function is passed back to user space as an ack
+ * over the generic netlink protocol.
+ *
+ * The stack doesn't do any processing or interpretation of these
+ * messages.
+ *
+ * SENDING MESSAGES
+ *
+ * Messages can be sent with wimax_msg().
+ *
+ * If the message delivery needs to happen on a different context to
+ * that of its creation, wimax_msg_alloc() can be used to get a
+ * pointer to the message that can be delivered later on with
+ * wimax_msg_send().
+ *
+ * ROADMAP
+ *
+ * wimax_gnl_doit_msg_from_user() Process a message from user space
+ * wimax_dev_get_by_genl_info()
+ * wimax_dev->op_msg_from_user() Delivery of message to the driver
+ *
+ * wimax_msg() Send a message to user space
+ * wimax_msg_alloc()
+ * wimax_msg_send()
+ */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <net/genetlink.h>
+#include <linux/netdevice.h>
+#include "linux-wimax.h"
+#include <linux/security.h>
+#include <linux/export.h>
+#include "wimax-internal.h"
+
+
+#define D_SUBMODULE op_msg
+#include "debug-levels.h"
+
+
+/**
+ * wimax_msg_alloc - Create a new skb for sending a message to userspace
+ *
+ * @wimax_dev: WiMAX device descriptor
+ * @pipe_name: "named pipe" the message will be sent to
+ * @msg: pointer to the message data to send
+ * @size: size of the message to send (in bytes), including the header.
+ * @gfp_flags: flags for memory allocation.
+ *
+ * Returns: %0 if ok, negative errno code on error
+ *
+ * Description:
+ *
+ * Allocates an skb that will contain the message to send to user
+ * space over the messaging pipe and initializes it, copying the
+ * payload.
+ *
+ * Once this call is done, you can deliver it with
+ * wimax_msg_send().
+ *
+ * IMPORTANT:
+ *
+ * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
+ * wimax_msg_send() depends on skb->data being placed at the
+ * beginning of the user message.
+ *
+ * Unlike other WiMAX stack calls, this call can be used way early,
+ * even before wimax_dev_add() is called, as long as the
+ * wimax_dev->net_dev pointer is set to point to a proper
+ * net_dev. This is so that drivers can use it early in case they need
+ * to send stuff around or communicate with user space.
+ */
+struct sk_buff *wimax_msg_alloc(struct wimax_dev *wimax_dev,
+ const char *pipe_name,
+ const void *msg, size_t size,
+ gfp_t gfp_flags)
+{
+ int result;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ size_t msg_size;
+ void *genl_msg;
+ struct sk_buff *skb;
+
+ msg_size = nla_total_size(size)
+ + nla_total_size(sizeof(u32))
+ + (pipe_name ? nla_total_size(strlen(pipe_name)) : 0);
+ result = -ENOMEM;
+ skb = genlmsg_new(msg_size, gfp_flags);
+ if (skb == NULL)
+ goto error_new;
+ genl_msg = genlmsg_put(skb, 0, 0, &wimax_gnl_family,
+ 0, WIMAX_GNL_OP_MSG_TO_USER);
+ if (genl_msg == NULL) {
+ dev_err(dev, "no memory to create generic netlink message\n");
+ goto error_genlmsg_put;
+ }
+ result = nla_put_u32(skb, WIMAX_GNL_MSG_IFIDX,
+ wimax_dev->net_dev->ifindex);
+ if (result < 0) {
+ dev_err(dev, "no memory to add ifindex attribute\n");
+ goto error_nla_put;
+ }
+ if (pipe_name) {
+ result = nla_put_string(skb, WIMAX_GNL_MSG_PIPE_NAME,
+ pipe_name);
+ if (result < 0) {
+ dev_err(dev, "no memory to add pipe_name attribute\n");
+ goto error_nla_put;
+ }
+ }
+ result = nla_put(skb, WIMAX_GNL_MSG_DATA, size, msg);
+ if (result < 0) {
+ dev_err(dev, "no memory to add payload (msg %p size %zu) in "
+ "attribute: %d\n", msg, size, result);
+ goto error_nla_put;
+ }
+ genlmsg_end(skb, genl_msg);
+ return skb;
+
+error_nla_put:
+error_genlmsg_put:
+error_new:
+ nlmsg_free(skb);
+ return ERR_PTR(result);
+}
+EXPORT_SYMBOL_GPL(wimax_msg_alloc);
+
+
+/**
+ * wimax_msg_data_len - Return a pointer and size of a message's payload
+ *
+ * @msg: Pointer to a message created with wimax_msg_alloc()
+ * @size: Pointer to where to store the message's size
+ *
+ * Returns the pointer to the message data.
+ */
+const void *wimax_msg_data_len(struct sk_buff *msg, size_t *size)
+{
+ struct nlmsghdr *nlh = (void *) msg->head;
+ struct nlattr *nla;
+
+ nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
+ WIMAX_GNL_MSG_DATA);
+ if (nla == NULL) {
+ pr_err("Cannot find attribute WIMAX_GNL_MSG_DATA\n");
+ return NULL;
+ }
+ *size = nla_len(nla);
+ return nla_data(nla);
+}
+EXPORT_SYMBOL_GPL(wimax_msg_data_len);
+
+
+/**
+ * wimax_msg_data - Return a pointer to a message's payload
+ *
+ * @msg: Pointer to a message created with wimax_msg_alloc()
+ */
+const void *wimax_msg_data(struct sk_buff *msg)
+{
+ struct nlmsghdr *nlh = (void *) msg->head;
+ struct nlattr *nla;
+
+ nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
+ WIMAX_GNL_MSG_DATA);
+ if (nla == NULL) {
+ pr_err("Cannot find attribute WIMAX_GNL_MSG_DATA\n");
+ return NULL;
+ }
+ return nla_data(nla);
+}
+EXPORT_SYMBOL_GPL(wimax_msg_data);
+
+
+/**
+ * wimax_msg_len - Return a message's payload length
+ *
+ * @msg: Pointer to a message created with wimax_msg_alloc()
+ */
+ssize_t wimax_msg_len(struct sk_buff *msg)
+{
+ struct nlmsghdr *nlh = (void *) msg->head;
+ struct nlattr *nla;
+
+ nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
+ WIMAX_GNL_MSG_DATA);
+ if (nla == NULL) {
+ pr_err("Cannot find attribute WIMAX_GNL_MSG_DATA\n");
+ return -EINVAL;
+ }
+ return nla_len(nla);
+}
+EXPORT_SYMBOL_GPL(wimax_msg_len);
+
+
+/**
+ * wimax_msg_send - Send a pre-allocated message to user space
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * @skb: &struct sk_buff returned by wimax_msg_alloc(). Note the
+ * ownership of @skb is transferred to this function.
+ *
+ * Returns: 0 if ok, < 0 errno code on error
+ *
+ * Description:
+ *
+ * Sends a free-form message that was preallocated with
+ * wimax_msg_alloc() and filled up.
+ *
+ * Assumes that once you pass an skb to this function for sending, it
+ * owns it and will release it when done (on success).
+ *
+ * IMPORTANT:
+ *
+ * Don't use skb_push()/skb_pull()/skb_reserve() on the skb, as
+ * wimax_msg_send() depends on skb->data being placed at the
+ * beginning of the user message.
+ *
+ * Unlike other WiMAX stack calls, this call can be used way early,
+ * even before wimax_dev_add() is called, as long as the
+ * wimax_dev->net_dev pointer is set to point to a proper
+ * net_dev. This is so that drivers can use it early in case they need
+ * to send stuff around or communicate with user space.
+ */
+int wimax_msg_send(struct wimax_dev *wimax_dev, struct sk_buff *skb)
+{
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ void *msg = skb->data;
+ size_t size = skb->len;
+ might_sleep();
+
+ d_printf(1, dev, "CTX: wimax msg, %zu bytes\n", size);
+ d_dump(2, dev, msg, size);
+ genlmsg_multicast(&wimax_gnl_family, skb, 0, 0, GFP_KERNEL);
+ d_printf(1, dev, "CTX: genl multicast done\n");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wimax_msg_send);
+
+
+/**
+ * wimax_msg - Send a message to user space
+ *
+ * @wimax_dev: WiMAX device descriptor (properly referenced)
+ * @pipe_name: "named pipe" the message will be sent to
+ * @buf: pointer to the message to send.
+ * @size: size of the buffer pointed to by @buf (in bytes).
+ * @gfp_flags: flags for memory allocation.
+ *
+ * Returns: %0 if ok, negative errno code on error.
+ *
+ * Description:
+ *
+ * Sends a free-form message to user space on the device @wimax_dev.
+ *
+ * NOTES:
+ *
+ * Once the @skb is given to this function, who will own it and will
+ * release it when done (unless it returns error).
+ */
+int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
+ const void *buf, size_t size, gfp_t gfp_flags)
+{
+ int result = -ENOMEM;
+ struct sk_buff *skb;
+
+ skb = wimax_msg_alloc(wimax_dev, pipe_name, buf, size, gfp_flags);
+ if (IS_ERR(skb))
+ result = PTR_ERR(skb);
+ else
+ result = wimax_msg_send(wimax_dev, skb);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wimax_msg);
+
+/*
+ * Relays a message from user space to the driver
+ *
+ * The skb is passed to the driver-specific function with the netlink
+ * and generic netlink headers already stripped.
+ *
+ * This call will block while handling/relaying the message.
+ */
+int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info)
+{
+ int result, ifindex;
+ struct wimax_dev *wimax_dev;
+ struct device *dev;
+ struct nlmsghdr *nlh = info->nlhdr;
+ char *pipe_name;
+ void *msg_buf;
+ size_t msg_len;
+
+ might_sleep();
+ d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
+ result = -ENODEV;
+ if (info->attrs[WIMAX_GNL_MSG_IFIDX] == NULL) {
+ pr_err("WIMAX_GNL_MSG_FROM_USER: can't find IFIDX attribute\n");
+ goto error_no_wimax_dev;
+ }
+ ifindex = nla_get_u32(info->attrs[WIMAX_GNL_MSG_IFIDX]);
+ wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
+ if (wimax_dev == NULL)
+ goto error_no_wimax_dev;
+ dev = wimax_dev_to_dev(wimax_dev);
+
+ /* Unpack arguments */
+ result = -EINVAL;
+ if (info->attrs[WIMAX_GNL_MSG_DATA] == NULL) {
+ dev_err(dev, "WIMAX_GNL_MSG_FROM_USER: can't find MSG_DATA "
+ "attribute\n");
+ goto error_no_data;
+ }
+ msg_buf = nla_data(info->attrs[WIMAX_GNL_MSG_DATA]);
+ msg_len = nla_len(info->attrs[WIMAX_GNL_MSG_DATA]);
+
+ if (info->attrs[WIMAX_GNL_MSG_PIPE_NAME] == NULL)
+ pipe_name = NULL;
+ else {
+ struct nlattr *attr = info->attrs[WIMAX_GNL_MSG_PIPE_NAME];
+ size_t attr_len = nla_len(attr);
+ /* libnl-1.1 does not yet support NLA_NUL_STRING */
+ result = -ENOMEM;
+ pipe_name = kstrndup(nla_data(attr), attr_len + 1, GFP_KERNEL);
+ if (pipe_name == NULL)
+ goto error_alloc;
+ pipe_name[attr_len] = 0;
+ }
+ mutex_lock(&wimax_dev->mutex);
+ result = wimax_dev_is_ready(wimax_dev);
+ if (result == -ENOMEDIUM)
+ result = 0;
+ if (result < 0)
+ goto error_not_ready;
+ result = -ENOSYS;
+ if (wimax_dev->op_msg_from_user == NULL)
+ goto error_noop;
+
+ d_printf(1, dev,
+ "CRX: nlmsghdr len %u type %u flags 0x%04x seq 0x%x pid %u\n",
+ nlh->nlmsg_len, nlh->nlmsg_type, nlh->nlmsg_flags,
+ nlh->nlmsg_seq, nlh->nlmsg_pid);
+ d_printf(1, dev, "CRX: wimax message %zu bytes\n", msg_len);
+ d_dump(2, dev, msg_buf, msg_len);
+
+ result = wimax_dev->op_msg_from_user(wimax_dev, pipe_name,
+ msg_buf, msg_len, info);
+error_noop:
+error_not_ready:
+ mutex_unlock(&wimax_dev->mutex);
+error_alloc:
+ kfree(pipe_name);
+error_no_data:
+ dev_put(wimax_dev->net_dev);
+error_no_wimax_dev:
+ d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
+ return result;
+}
diff --git a/drivers/staging/wimax/op-reset.c b/drivers/staging/wimax/op-reset.c
new file mode 100644
index 000000000000..b3f000cbe112
--- /dev/null
+++ b/drivers/staging/wimax/op-reset.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux WiMAX
+ * Implement and export a method for resetting a WiMAX device
+ *
+ * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This implements a simple synchronous call to reset a WiMAX device.
+ *
+ * Resets aim at being warm, keeping the device handles active;
+ * however, when that fails, it falls back to a cold reset (that will
+ * disconnect and reconnect the device).
+ */
+
+#include "net-wimax.h"
+#include <net/genetlink.h>
+#include "linux-wimax.h"
+#include <linux/security.h>
+#include <linux/export.h>
+#include "wimax-internal.h"
+
+#define D_SUBMODULE op_reset
+#include "debug-levels.h"
+
+
+/**
+ * wimax_reset - Reset a WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * Returns:
+ *
+ * %0 if ok and a warm reset was done (the device still exists in
+ * the system).
+ *
+ * -%ENODEV if a cold/bus reset had to be done (device has
+ * disconnected and reconnected, so current handle is not valid
+ * any more).
+ *
+ * -%EINVAL if the device is not even registered.
+ *
+ * Any other negative error code shall be considered as
+ * non-recoverable.
+ *
+ * Description:
+ *
+ * Called when wanting to reset the device for any reason. Device is
+ * taken back to power on status.
+ *
+ * This call blocks; on successful return, the device has completed the
+ * reset process and is ready to operate.
+ */
+int wimax_reset(struct wimax_dev *wimax_dev)
+{
+ int result = -EINVAL;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ enum wimax_st state;
+
+ might_sleep();
+ d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
+ mutex_lock(&wimax_dev->mutex);
+ dev_hold(wimax_dev->net_dev);
+ state = wimax_dev->state;
+ mutex_unlock(&wimax_dev->mutex);
+
+ if (state >= WIMAX_ST_DOWN) {
+ mutex_lock(&wimax_dev->mutex_reset);
+ result = wimax_dev->op_reset(wimax_dev);
+ mutex_unlock(&wimax_dev->mutex_reset);
+ }
+ dev_put(wimax_dev->net_dev);
+
+ d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
+ return result;
+}
+EXPORT_SYMBOL(wimax_reset);
+
+
+/*
+ * Exporting to user space over generic netlink
+ *
+ * Parse the reset command from user space, return error code.
+ *
+ * No attributes.
+ */
+int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
+{
+ int result, ifindex;
+ struct wimax_dev *wimax_dev;
+
+ d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
+ result = -ENODEV;
+ if (info->attrs[WIMAX_GNL_RESET_IFIDX] == NULL) {
+ pr_err("WIMAX_GNL_OP_RFKILL: can't find IFIDX attribute\n");
+ goto error_no_wimax_dev;
+ }
+ ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RESET_IFIDX]);
+ wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
+ if (wimax_dev == NULL)
+ goto error_no_wimax_dev;
+ /* Execute the operation and send the result back to user space */
+ result = wimax_reset(wimax_dev);
+ dev_put(wimax_dev->net_dev);
+error_no_wimax_dev:
+ d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
+ return result;
+}
diff --git a/drivers/staging/wimax/op-rfkill.c b/drivers/staging/wimax/op-rfkill.c
new file mode 100644
index 000000000000..78b294481a59
--- /dev/null
+++ b/drivers/staging/wimax/op-rfkill.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux WiMAX
+ * RF-kill framework integration
+ *
+ * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This integrates into the Linux Kernel rfkill susbystem so that the
+ * drivers just have to do the bare minimal work, which is providing a
+ * method to set the software RF-Kill switch and to report changes in
+ * the software and hardware switch status.
+ *
+ * A non-polled generic rfkill device is embedded into the WiMAX
+ * subsystem's representation of a device.
+ *
+ * FIXME: Need polled support? Let drivers provide a poll routine
+ * and hand it to rfkill ops then?
+ *
+ * All device drivers have to do is after wimax_dev_init(), call
+ * wimax_report_rfkill_hw() and wimax_report_rfkill_sw() to update
+ * initial state and then every time it changes. See wimax.h:struct
+ * wimax_dev for more information.
+ *
+ * ROADMAP
+ *
+ * wimax_gnl_doit_rfkill() User space calling wimax_rfkill()
+ * wimax_rfkill() Kernel calling wimax_rfkill()
+ * __wimax_rf_toggle_radio()
+ *
+ * wimax_rfkill_set_radio_block() RF-Kill subsystem calling
+ * __wimax_rf_toggle_radio()
+ *
+ * __wimax_rf_toggle_radio()
+ * wimax_dev->op_rfkill_sw_toggle() Driver backend
+ * __wimax_state_change()
+ *
+ * wimax_report_rfkill_sw() Driver reports state change
+ * __wimax_state_change()
+ *
+ * wimax_report_rfkill_hw() Driver reports state change
+ * __wimax_state_change()
+ *
+ * wimax_rfkill_add() Initialize/shutdown rfkill support
+ * wimax_rfkill_rm() [called by wimax_dev_add/rm()]
+ */
+
+#include "net-wimax.h"
+#include <net/genetlink.h>
+#include "linux-wimax.h"
+#include <linux/security.h>
+#include <linux/rfkill.h>
+#include <linux/export.h>
+#include "wimax-internal.h"
+
+#define D_SUBMODULE op_rfkill
+#include "debug-levels.h"
+
+/**
+ * wimax_report_rfkill_hw - Reports changes in the hardware RF switch
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * @state: New state of the RF Kill switch. %WIMAX_RF_ON radio on,
+ * %WIMAX_RF_OFF radio off.
+ *
+ * When the device detects a change in the state of thehardware RF
+ * switch, it must call this function to let the WiMAX kernel stack
+ * know that the state has changed so it can be properly propagated.
+ *
+ * The WiMAX stack caches the state (the driver doesn't need to). As
+ * well, as the change is propagated it will come back as a request to
+ * change the software state to mirror the hardware state.
+ *
+ * If the device doesn't have a hardware kill switch, just report
+ * it on initialization as always on (%WIMAX_RF_ON, radio on).
+ */
+void wimax_report_rfkill_hw(struct wimax_dev *wimax_dev,
+ enum wimax_rf_state state)
+{
+ int result;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ enum wimax_st wimax_state;
+
+ d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
+ BUG_ON(state == WIMAX_RF_QUERY);
+ BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF);
+
+ mutex_lock(&wimax_dev->mutex);
+ result = wimax_dev_is_ready(wimax_dev);
+ if (result < 0)
+ goto error_not_ready;
+
+ if (state != wimax_dev->rf_hw) {
+ wimax_dev->rf_hw = state;
+ if (wimax_dev->rf_hw == WIMAX_RF_ON &&
+ wimax_dev->rf_sw == WIMAX_RF_ON)
+ wimax_state = WIMAX_ST_READY;
+ else
+ wimax_state = WIMAX_ST_RADIO_OFF;
+
+ result = rfkill_set_hw_state(wimax_dev->rfkill,
+ state == WIMAX_RF_OFF);
+
+ __wimax_state_change(wimax_dev, wimax_state);
+ }
+error_not_ready:
+ mutex_unlock(&wimax_dev->mutex);
+ d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n",
+ wimax_dev, state, result);
+}
+EXPORT_SYMBOL_GPL(wimax_report_rfkill_hw);
+
+
+/**
+ * wimax_report_rfkill_sw - Reports changes in the software RF switch
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * @state: New state of the RF kill switch. %WIMAX_RF_ON radio on,
+ * %WIMAX_RF_OFF radio off.
+ *
+ * Reports changes in the software RF switch state to the WiMAX stack.
+ *
+ * The main use is during initialization, so the driver can query the
+ * device for its current software radio kill switch state and feed it
+ * to the system.
+ *
+ * On the side, the device does not change the software state by
+ * itself. In practice, this can happen, as the device might decide to
+ * switch (in software) the radio off for different reasons.
+ */
+void wimax_report_rfkill_sw(struct wimax_dev *wimax_dev,
+ enum wimax_rf_state state)
+{
+ int result;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ enum wimax_st wimax_state;
+
+ d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
+ BUG_ON(state == WIMAX_RF_QUERY);
+ BUG_ON(state != WIMAX_RF_ON && state != WIMAX_RF_OFF);
+
+ mutex_lock(&wimax_dev->mutex);
+ result = wimax_dev_is_ready(wimax_dev);
+ if (result < 0)
+ goto error_not_ready;
+
+ if (state != wimax_dev->rf_sw) {
+ wimax_dev->rf_sw = state;
+ if (wimax_dev->rf_hw == WIMAX_RF_ON &&
+ wimax_dev->rf_sw == WIMAX_RF_ON)
+ wimax_state = WIMAX_ST_READY;
+ else
+ wimax_state = WIMAX_ST_RADIO_OFF;
+ __wimax_state_change(wimax_dev, wimax_state);
+ rfkill_set_sw_state(wimax_dev->rfkill, state == WIMAX_RF_OFF);
+ }
+error_not_ready:
+ mutex_unlock(&wimax_dev->mutex);
+ d_fnend(3, dev, "(wimax_dev %p state %u) = void [%d]\n",
+ wimax_dev, state, result);
+}
+EXPORT_SYMBOL_GPL(wimax_report_rfkill_sw);
+
+
+/*
+ * Callback for the RF Kill toggle operation
+ *
+ * This function is called by:
+ *
+ * - The rfkill subsystem when the RF-Kill key is pressed in the
+ * hardware and the driver notifies through
+ * wimax_report_rfkill_hw(). The rfkill subsystem ends up calling back
+ * here so the software RF Kill switch state is changed to reflect
+ * the hardware switch state.
+ *
+ * - When the user sets the state through sysfs' rfkill/state file
+ *
+ * - When the user calls wimax_rfkill().
+ *
+ * This call blocks!
+ *
+ * WARNING! When we call rfkill_unregister(), this will be called with
+ * state 0!
+ *
+ * WARNING: wimax_dev must be locked
+ */
+static
+int __wimax_rf_toggle_radio(struct wimax_dev *wimax_dev,
+ enum wimax_rf_state state)
+{
+ int result = 0;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ enum wimax_st wimax_state;
+
+ might_sleep();
+ d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
+ if (wimax_dev->rf_sw == state)
+ goto out_no_change;
+ if (wimax_dev->op_rfkill_sw_toggle != NULL)
+ result = wimax_dev->op_rfkill_sw_toggle(wimax_dev, state);
+ else if (state == WIMAX_RF_OFF) /* No op? can't turn off */
+ result = -ENXIO;
+ else /* No op? can turn on */
+ result = 0; /* should never happen tho */
+ if (result >= 0) {
+ result = 0;
+ wimax_dev->rf_sw = state;
+ wimax_state = state == WIMAX_RF_ON ?
+ WIMAX_ST_READY : WIMAX_ST_RADIO_OFF;
+ __wimax_state_change(wimax_dev, wimax_state);
+ }
+out_no_change:
+ d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n",
+ wimax_dev, state, result);
+ return result;
+}
+
+
+/*
+ * Translate from rfkill state to wimax state
+ *
+ * NOTE: Special state handling rules here
+ *
+ * Just pretend the call didn't happen if we are in a state where
+ * we know for sure it cannot be handled (WIMAX_ST_DOWN or
+ * __WIMAX_ST_QUIESCING). rfkill() needs it to register and
+ * unregister, as it will run this path.
+ *
+ * NOTE: This call will block until the operation is completed.
+ */
+static int wimax_rfkill_set_radio_block(void *data, bool blocked)
+{
+ int result;
+ struct wimax_dev *wimax_dev = data;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ enum wimax_rf_state rf_state;
+
+ d_fnstart(3, dev, "(wimax_dev %p blocked %u)\n", wimax_dev, blocked);
+ rf_state = WIMAX_RF_ON;
+ if (blocked)
+ rf_state = WIMAX_RF_OFF;
+ mutex_lock(&wimax_dev->mutex);
+ if (wimax_dev->state <= __WIMAX_ST_QUIESCING)
+ result = 0;
+ else
+ result = __wimax_rf_toggle_radio(wimax_dev, rf_state);
+ mutex_unlock(&wimax_dev->mutex);
+ d_fnend(3, dev, "(wimax_dev %p blocked %u) = %d\n",
+ wimax_dev, blocked, result);
+ return result;
+}
+
+static const struct rfkill_ops wimax_rfkill_ops = {
+ .set_block = wimax_rfkill_set_radio_block,
+};
+
+/**
+ * wimax_rfkill - Set the software RF switch state for a WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * @state: New RF state.
+ *
+ * Returns:
+ *
+ * >= 0 toggle state if ok, < 0 errno code on error. The toggle state
+ * is returned as a bitmap, bit 0 being the hardware RF state, bit 1
+ * the software RF state.
+ *
+ * 0 means disabled (%WIMAX_RF_ON, radio on), 1 means enabled radio
+ * off (%WIMAX_RF_OFF).
+ *
+ * Description:
+ *
+ * Called by the user when he wants to request the WiMAX radio to be
+ * switched on (%WIMAX_RF_ON) or off (%WIMAX_RF_OFF). With
+ * %WIMAX_RF_QUERY, just the current state is returned.
+ *
+ * NOTE:
+ *
+ * This call will block until the operation is complete.
+ */
+int wimax_rfkill(struct wimax_dev *wimax_dev, enum wimax_rf_state state)
+{
+ int result;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+
+ d_fnstart(3, dev, "(wimax_dev %p state %u)\n", wimax_dev, state);
+ mutex_lock(&wimax_dev->mutex);
+ result = wimax_dev_is_ready(wimax_dev);
+ if (result < 0) {
+ /* While initializing, < 1.4.3 wimax-tools versions use
+ * this call to check if the device is a valid WiMAX
+ * device; so we allow it to proceed always,
+ * considering the radios are all off. */
+ if (result == -ENOMEDIUM && state == WIMAX_RF_QUERY)
+ result = WIMAX_RF_OFF << 1 | WIMAX_RF_OFF;
+ goto error_not_ready;
+ }
+ switch (state) {
+ case WIMAX_RF_ON:
+ case WIMAX_RF_OFF:
+ result = __wimax_rf_toggle_radio(wimax_dev, state);
+ if (result < 0)
+ goto error;
+ rfkill_set_sw_state(wimax_dev->rfkill, state == WIMAX_RF_OFF);
+ break;
+ case WIMAX_RF_QUERY:
+ break;
+ default:
+ result = -EINVAL;
+ goto error;
+ }
+ result = wimax_dev->rf_sw << 1 | wimax_dev->rf_hw;
+error:
+error_not_ready:
+ mutex_unlock(&wimax_dev->mutex);
+ d_fnend(3, dev, "(wimax_dev %p state %u) = %d\n",
+ wimax_dev, state, result);
+ return result;
+}
+EXPORT_SYMBOL(wimax_rfkill);
+
+
+/*
+ * Register a new WiMAX device's RF Kill support
+ *
+ * WARNING: wimax_dev->mutex must be unlocked
+ */
+int wimax_rfkill_add(struct wimax_dev *wimax_dev)
+{
+ int result;
+ struct rfkill *rfkill;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+
+ d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
+ /* Initialize RF Kill */
+ result = -ENOMEM;
+ rfkill = rfkill_alloc(wimax_dev->name, dev, RFKILL_TYPE_WIMAX,
+ &wimax_rfkill_ops, wimax_dev);
+ if (rfkill == NULL)
+ goto error_rfkill_allocate;
+
+ d_printf(1, dev, "rfkill %p\n", rfkill);
+
+ wimax_dev->rfkill = rfkill;
+
+ rfkill_init_sw_state(rfkill, 1);
+ result = rfkill_register(wimax_dev->rfkill);
+ if (result < 0)
+ goto error_rfkill_register;
+
+ /* If there is no SW toggle op, SW RFKill is always on */
+ if (wimax_dev->op_rfkill_sw_toggle == NULL)
+ wimax_dev->rf_sw = WIMAX_RF_ON;
+
+ d_fnend(3, dev, "(wimax_dev %p) = 0\n", wimax_dev);
+ return 0;
+
+error_rfkill_register:
+ rfkill_destroy(wimax_dev->rfkill);
+error_rfkill_allocate:
+ d_fnend(3, dev, "(wimax_dev %p) = %d\n", wimax_dev, result);
+ return result;
+}
+
+
+/*
+ * Deregister a WiMAX device's RF Kill support
+ *
+ * Ick, we can't call rfkill_free() after rfkill_unregister()...oh
+ * well.
+ *
+ * WARNING: wimax_dev->mutex must be unlocked
+ */
+void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
+{
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ d_fnstart(3, dev, "(wimax_dev %p)\n", wimax_dev);
+ rfkill_unregister(wimax_dev->rfkill);
+ rfkill_destroy(wimax_dev->rfkill);
+ d_fnend(3, dev, "(wimax_dev %p)\n", wimax_dev);
+}
+
+
+/*
+ * Exporting to user space over generic netlink
+ *
+ * Parse the rfkill command from user space, return a combination
+ * value that describe the states of the different toggles.
+ *
+ * Only one attribute: the new state requested (on, off or no change,
+ * just query).
+ */
+
+int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info)
+{
+ int result, ifindex;
+ struct wimax_dev *wimax_dev;
+ struct device *dev;
+ enum wimax_rf_state new_state;
+
+ d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
+ result = -ENODEV;
+ if (info->attrs[WIMAX_GNL_RFKILL_IFIDX] == NULL) {
+ pr_err("WIMAX_GNL_OP_RFKILL: can't find IFIDX attribute\n");
+ goto error_no_wimax_dev;
+ }
+ ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_IFIDX]);
+ wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
+ if (wimax_dev == NULL)
+ goto error_no_wimax_dev;
+ dev = wimax_dev_to_dev(wimax_dev);
+ result = -EINVAL;
+ if (info->attrs[WIMAX_GNL_RFKILL_STATE] == NULL) {
+ dev_err(dev, "WIMAX_GNL_RFKILL: can't find RFKILL_STATE "
+ "attribute\n");
+ goto error_no_pid;
+ }
+ new_state = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_STATE]);
+
+ /* Execute the operation and send the result back to user space */
+ result = wimax_rfkill(wimax_dev, new_state);
+error_no_pid:
+ dev_put(wimax_dev->net_dev);
+error_no_wimax_dev:
+ d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
+ return result;
+}
diff --git a/drivers/staging/wimax/op-state-get.c b/drivers/staging/wimax/op-state-get.c
new file mode 100644
index 000000000000..c5bfbed505f5
--- /dev/null
+++ b/drivers/staging/wimax/op-state-get.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux WiMAX
+ * Implement and export a method for getting a WiMAX device current state
+ *
+ * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ *
+ * Based on previous WiMAX core work by:
+ * Copyright (C) 2008 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ */
+
+#include "net-wimax.h"
+#include <net/genetlink.h>
+#include "linux-wimax.h"
+#include <linux/security.h>
+#include "wimax-internal.h"
+
+#define D_SUBMODULE op_state_get
+#include "debug-levels.h"
+
+
+/*
+ * Exporting to user space over generic netlink
+ *
+ * Parse the state get command from user space, return a combination
+ * value that describe the current state.
+ *
+ * No attributes.
+ */
+int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
+{
+ int result, ifindex;
+ struct wimax_dev *wimax_dev;
+
+ d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
+ result = -ENODEV;
+ if (info->attrs[WIMAX_GNL_STGET_IFIDX] == NULL) {
+ pr_err("WIMAX_GNL_OP_STATE_GET: can't find IFIDX attribute\n");
+ goto error_no_wimax_dev;
+ }
+ ifindex = nla_get_u32(info->attrs[WIMAX_GNL_STGET_IFIDX]);
+ wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
+ if (wimax_dev == NULL)
+ goto error_no_wimax_dev;
+ /* Execute the operation and send the result back to user space */
+ result = wimax_state_get(wimax_dev);
+ dev_put(wimax_dev->net_dev);
+error_no_wimax_dev:
+ d_fnend(3, NULL, "(skb %p info %p) = %d\n", skb, info, result);
+ return result;
+}
diff --git a/drivers/staging/wimax/stack.c b/drivers/staging/wimax/stack.c
new file mode 100644
index 000000000000..ace24a6dfd2d
--- /dev/null
+++ b/drivers/staging/wimax/stack.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux WiMAX
+ * Initialization, addition and removal of wimax devices
+ *
+ * Copyright (C) 2005-2006 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This implements:
+ *
+ * - basic life cycle of 'struct wimax_dev' [wimax_dev_*()]; on
+ * addition/registration initialize all subfields and allocate
+ * generic netlink resources for user space communication. On
+ * removal/unregistration, undo all that.
+ *
+ * - device state machine [wimax_state_change()] and support to send
+ * reports to user space when the state changes
+ * [wimax_gnl_re_state_change*()].
+ *
+ * See include/net/wimax.h for rationales and design.
+ *
+ * ROADMAP
+ *
+ * [__]wimax_state_change() Called by drivers to update device's state
+ * wimax_gnl_re_state_change_alloc()
+ * wimax_gnl_re_state_change_send()
+ *
+ * wimax_dev_init() Init a device
+ * wimax_dev_add() Register
+ * wimax_rfkill_add()
+ * wimax_gnl_add() Register all the generic netlink resources.
+ * wimax_id_table_add()
+ * wimax_dev_rm() Unregister
+ * wimax_id_table_rm()
+ * wimax_gnl_rm()
+ * wimax_rfkill_rm()
+ */
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <net/genetlink.h>
+#include <linux/netdevice.h>
+#include "linux-wimax.h"
+#include <linux/module.h>
+#include "wimax-internal.h"
+
+
+#define D_SUBMODULE stack
+#include "debug-levels.h"
+
+static char wimax_debug_params[128];
+module_param_string(debug, wimax_debug_params, sizeof(wimax_debug_params),
+ 0644);
+MODULE_PARM_DESC(debug,
+ "String of space-separated NAME:VALUE pairs, where NAMEs "
+ "are the different debug submodules and VALUE are the "
+ "initial debug value to set.");
+
+/*
+ * Authoritative source for the RE_STATE_CHANGE attribute policy
+ *
+ * We don't really use it here, but /me likes to keep the definition
+ * close to where the data is generated.
+ */
+/*
+static const struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
+ [WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 },
+ [WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 },
+};
+*/
+
+
+/*
+ * Allocate a Report State Change message
+ *
+ * @header: save it, you need it for _send()
+ *
+ * Creates and fills a basic state change message; different code
+ * paths can then add more attributes to the message as needed.
+ *
+ * Use wimax_gnl_re_state_change_send() to send the returned skb.
+ *
+ * Returns: skb with the genl message if ok, IS_ERR() ptr on error
+ * with an errno code.
+ */
+static
+struct sk_buff *wimax_gnl_re_state_change_alloc(
+ struct wimax_dev *wimax_dev,
+ enum wimax_st new_state, enum wimax_st old_state,
+ void **header)
+{
+ int result;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ void *data;
+ struct sk_buff *report_skb;
+
+ d_fnstart(3, dev, "(wimax_dev %p new_state %u old_state %u)\n",
+ wimax_dev, new_state, old_state);
+ result = -ENOMEM;
+ report_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (report_skb == NULL) {
+ dev_err(dev, "RE_STCH: can't create message\n");
+ goto error_new;
+ }
+ /* FIXME: sending a group ID as the seq is wrong */
+ data = genlmsg_put(report_skb, 0, wimax_gnl_family.mcgrp_offset,
+ &wimax_gnl_family, 0, WIMAX_GNL_RE_STATE_CHANGE);
+ if (data == NULL) {
+ dev_err(dev, "RE_STCH: can't put data into message\n");
+ goto error_put;
+ }
+ *header = data;
+
+ result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_OLD, old_state);
+ if (result < 0) {
+ dev_err(dev, "RE_STCH: Error adding OLD attr: %d\n", result);
+ goto error_put;
+ }
+ result = nla_put_u8(report_skb, WIMAX_GNL_STCH_STATE_NEW, new_state);
+ if (result < 0) {
+ dev_err(dev, "RE_STCH: Error adding NEW attr: %d\n", result);
+ goto error_put;
+ }
+ result = nla_put_u32(report_skb, WIMAX_GNL_STCH_IFIDX,
+ wimax_dev->net_dev->ifindex);
+ if (result < 0) {
+ dev_err(dev, "RE_STCH: Error adding IFINDEX attribute\n");
+ goto error_put;
+ }
+ d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %p\n",
+ wimax_dev, new_state, old_state, report_skb);
+ return report_skb;
+
+error_put:
+ nlmsg_free(report_skb);
+error_new:
+ d_fnend(3, dev, "(wimax_dev %p new_state %u old_state %u) = %d\n",
+ wimax_dev, new_state, old_state, result);
+ return ERR_PTR(result);
+}
+
+
+/*
+ * Send a Report State Change message (as created with _alloc).
+ *
+ * @report_skb: as returned by wimax_gnl_re_state_change_alloc()
+ * @header: as returned by wimax_gnl_re_state_change_alloc()
+ *
+ * Returns: 0 if ok, < 0 errno code on error.
+ *
+ * If the message is NULL, pretend it didn't happen.
+ */
+static
+int wimax_gnl_re_state_change_send(
+ struct wimax_dev *wimax_dev, struct sk_buff *report_skb,
+ void *header)
+{
+ int result = 0;
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ d_fnstart(3, dev, "(wimax_dev %p report_skb %p)\n",
+ wimax_dev, report_skb);
+ if (report_skb == NULL) {
+ result = -ENOMEM;
+ goto out;
+ }
+ genlmsg_end(report_skb, header);
+ genlmsg_multicast(&wimax_gnl_family, report_skb, 0, 0, GFP_KERNEL);
+out:
+ d_fnend(3, dev, "(wimax_dev %p report_skb %p) = %d\n",
+ wimax_dev, report_skb, result);
+ return result;
+}
+
+
+static
+void __check_new_state(enum wimax_st old_state, enum wimax_st new_state,
+ unsigned int allowed_states_bm)
+{
+ if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) {
+ pr_err("SW BUG! Forbidden state change %u -> %u\n",
+ old_state, new_state);
+ }
+}
+
+
+/*
+ * Set the current state of a WiMAX device [unlocking version of
+ * wimax_state_change().
+ */
+void __wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
+{
+ struct device *dev = wimax_dev_to_dev(wimax_dev);
+ enum wimax_st old_state = wimax_dev->state;
+ struct sk_buff *stch_skb;
+ void *header;
+
+ d_fnstart(3, dev, "(wimax_dev %p new_state %u [old %u])\n",
+ wimax_dev, new_state, old_state);
+
+ if (WARN_ON(new_state >= __WIMAX_ST_INVALID)) {
+ dev_err(dev, "SW BUG: requesting invalid state %u\n",
+ new_state);
+ goto out;
+ }
+ if (old_state == new_state)
+ goto out;
+ header = NULL; /* gcc complains? can't grok why */
+ stch_skb = wimax_gnl_re_state_change_alloc(
+ wimax_dev, new_state, old_state, &header);
+
+ /* Verify the state transition and do exit-from-state actions */
+ switch (old_state) {
+ case __WIMAX_ST_NULL:
+ __check_new_state(old_state, new_state,
+ 1 << WIMAX_ST_DOWN);
+ break;
+ case WIMAX_ST_DOWN:
+ __check_new_state(old_state, new_state,
+ 1 << __WIMAX_ST_QUIESCING
+ | 1 << WIMAX_ST_UNINITIALIZED
+ | 1 << WIMAX_ST_RADIO_OFF);
+ break;
+ case __WIMAX_ST_QUIESCING:
+ __check_new_state(old_state, new_state, 1 << WIMAX_ST_DOWN);
+ break;
+ case WIMAX_ST_UNINITIALIZED:
+ __check_new_state(old_state, new_state,
+ 1 << __WIMAX_ST_QUIESCING
+ | 1 << WIMAX_ST_RADIO_OFF);
+ break;
+ case WIMAX_ST_RADIO_OFF:
+ __check_new_state(old_state, new_state,
+ 1 << __WIMAX_ST_QUIESCING
+ | 1 << WIMAX_ST_READY);
+ break;
+ case WIMAX_ST_READY:
+ __check_new_state(old_state, new_state,
+ 1 << __WIMAX_ST_QUIESCING
+ | 1 << WIMAX_ST_RADIO_OFF
+ | 1 << WIMAX_ST_SCANNING
+ | 1 << WIMAX_ST_CONNECTING
+ | 1 << WIMAX_ST_CONNECTED);
+ break;
+ case WIMAX_ST_SCANNING:
+ __check_new_state(old_state, new_state,
+ 1 << __WIMAX_ST_QUIESCING
+ | 1 << WIMAX_ST_RADIO_OFF
+ | 1 << WIMAX_ST_READY
+ | 1 << WIMAX_ST_CONNECTING
+ | 1 << WIMAX_ST_CONNECTED);
+ break;
+ case WIMAX_ST_CONNECTING:
+ __check_new_state(old_state, new_state,
+ 1 << __WIMAX_ST_QUIESCING
+ | 1 << WIMAX_ST_RADIO_OFF
+ | 1 << WIMAX_ST_READY
+ | 1 << WIMAX_ST_SCANNING
+ | 1 << WIMAX_ST_CONNECTED);
+ break;
+ case WIMAX_ST_CONNECTED:
+ __check_new_state(old_state, new_state,
+ 1 << __WIMAX_ST_QUIESCING
+ | 1 << WIMAX_ST_RADIO_OFF
+ | 1 << WIMAX_ST_READY);
+ netif_tx_disable(wimax_dev->net_dev);
+ netif_carrier_off(wimax_dev->net_dev);
+ break;
+ case __WIMAX_ST_INVALID:
+ default:
+ dev_err(dev, "SW BUG: wimax_dev %p is in unknown state %u\n",
+ wimax_dev, wimax_dev->state);
+ WARN_ON(1);
+ goto out;
+ }
+
+ /* Execute the actions of entry to the new state */
+ switch (new_state) {
+ case __WIMAX_ST_NULL:
+ dev_err(dev, "SW BUG: wimax_dev %p entering NULL state "
+ "from %u\n", wimax_dev, wimax_dev->state);
+ WARN_ON(1); /* Nobody can enter this state */
+ break;
+ case WIMAX_ST_DOWN:
+ break;
+ case __WIMAX_ST_QUIESCING:
+ break;
+ case WIMAX_ST_UNINITIALIZED:
+ break;
+ case WIMAX_ST_RADIO_OFF:
+ break;
+ case WIMAX_ST_READY:
+ break;
+ case WIMAX_ST_SCANNING:
+ break;
+ case WIMAX_ST_CONNECTING:
+ break;
+ case WIMAX_ST_CONNECTED:
+ netif_carrier_on(wimax_dev->net_dev);
+ netif_wake_queue(wimax_dev->net_dev);
+ break;
+ case __WIMAX_ST_INVALID:
+ default:
+ BUG();
+ }
+ __wimax_state_set(wimax_dev, new_state);
+ if (!IS_ERR(stch_skb))
+ wimax_gnl_re_state_change_send(wimax_dev, stch_skb, header);
+out:
+ d_fnend(3, dev, "(wimax_dev %p new_state %u [old %u]) = void\n",
+ wimax_dev, new_state, old_state);
+}
+
+
+/**
+ * wimax_state_change - Set the current state of a WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor (properly referenced)
+ * @new_state: New state to switch to
+ *
+ * This implements the state changes for the wimax devices. It will
+ *
+ * - verify that the state transition is legal (for now it'll just
+ * print a warning if not) according to the table in
+ * linux/wimax.h's documentation for 'enum wimax_st'.
+ *
+ * - perform the actions needed for leaving the current state and
+ * whichever are needed for entering the new state.
+ *
+ * - issue a report to user space indicating the new state (and an
+ * optional payload with information about the new state).
+ *
+ * NOTE: @wimax_dev must be locked
+ */
+void wimax_state_change(struct wimax_dev *wimax_dev, enum wimax_st new_state)
+{
+ /*
+ * A driver cannot take the wimax_dev out of the
+ * __WIMAX_ST_NULL state unless by calling wimax_dev_add(). If
+ * the wimax_dev's state is still NULL, we ignore any request
+ * to change its state because it means it hasn't been yet
+ * registered.
+ *
+ * There is no need to complain about it, as routines that
+ * call this might be shared from different code paths that
+ * are called before or after wimax_dev_add() has done its
+ * job.
+ */
+ mutex_lock(&wimax_dev->mutex);
+ if (wimax_dev->state > __WIMAX_ST_NULL)
+ __wimax_state_change(wimax_dev, new_state);
+ mutex_unlock(&wimax_dev->mutex);
+}
+EXPORT_SYMBOL_GPL(wimax_state_change);
+
+
+/**
+ * wimax_state_get() - Return the current state of a WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * Returns: Current state of the device according to its driver.
+ */
+enum wimax_st wimax_state_get(struct wimax_dev *wimax_dev)
+{
+ enum wimax_st state;
+ mutex_lock(&wimax_dev->mutex);
+ state = wimax_dev->state;
+ mutex_unlock(&wimax_dev->mutex);
+ return state;
+}
+EXPORT_SYMBOL_GPL(wimax_state_get);
+
+
+/**
+ * wimax_dev_init - initialize a newly allocated instance
+ *
+ * @wimax_dev: WiMAX device descriptor to initialize.
+ *
+ * Initializes fields of a freshly allocated @wimax_dev instance. This
+ * function assumes that after allocation, the memory occupied by
+ * @wimax_dev was zeroed.
+ */
+void wimax_dev_init(struct wimax_dev *wimax_dev)
+{
+ INIT_LIST_HEAD(&wimax_dev->id_table_node);
+ __wimax_state_set(wimax_dev, __WIMAX_ST_NULL);
+ mutex_init(&wimax_dev->mutex);
+ mutex_init(&wimax_dev->mutex_reset);
+}
+EXPORT_SYMBOL_GPL(wimax_dev_init);
+
+/*
+ * There are multiple enums reusing the same values, adding
+ * others is only possible if they use a compatible policy.
+ */
+static const struct nla_policy wimax_gnl_policy[WIMAX_GNL_ATTR_MAX + 1] = {
+ /*
+ * WIMAX_GNL_RESET_IFIDX, WIMAX_GNL_RFKILL_IFIDX,
+ * WIMAX_GNL_STGET_IFIDX, WIMAX_GNL_MSG_IFIDX
+ */
+ [1] = { .type = NLA_U32, },
+ /*
+ * WIMAX_GNL_RFKILL_STATE, WIMAX_GNL_MSG_PIPE_NAME
+ */
+ [2] = { .type = NLA_U32, }, /* enum wimax_rf_state */
+ /*
+ * WIMAX_GNL_MSG_DATA
+ */
+ [3] = { .type = NLA_UNSPEC, }, /* libnl doesn't grok BINARY yet */
+};
+
+static const struct genl_small_ops wimax_gnl_ops[] = {
+ {
+ .cmd = WIMAX_GNL_OP_MSG_FROM_USER,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_ADMIN_PERM,
+ .doit = wimax_gnl_doit_msg_from_user,
+ },
+ {
+ .cmd = WIMAX_GNL_OP_RESET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_ADMIN_PERM,
+ .doit = wimax_gnl_doit_reset,
+ },
+ {
+ .cmd = WIMAX_GNL_OP_RFKILL,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_ADMIN_PERM,
+ .doit = wimax_gnl_doit_rfkill,
+ },
+ {
+ .cmd = WIMAX_GNL_OP_STATE_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .flags = GENL_ADMIN_PERM,
+ .doit = wimax_gnl_doit_state_get,
+ },
+};
+
+
+static
+size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size,
+ unsigned char *addr, size_t addr_len)
+{
+ unsigned int cnt, total;
+
+ for (total = cnt = 0; cnt < addr_len; cnt++)
+ total += scnprintf(addr_str + total, addr_str_size - total,
+ "%02x%c", addr[cnt],
+ cnt == addr_len - 1 ? '\0' : ':');
+ return total;
+}
+
+
+/**
+ * wimax_dev_add - Register a new WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor (as embedded in your @net_dev's
+ * priv data). You must have called wimax_dev_init() on it before.
+ *
+ * @net_dev: net device the @wimax_dev is associated with. The
+ * function expects SET_NETDEV_DEV() and register_netdev() were
+ * already called on it.
+ *
+ * Registers the new WiMAX device, sets up the user-kernel control
+ * interface (generic netlink) and common WiMAX infrastructure.
+ *
+ * Note that the parts that will allow interaction with user space are
+ * setup at the very end, when the rest is in place, as once that
+ * happens, the driver might get user space control requests via
+ * netlink or from debugfs that might translate into calls into
+ * wimax_dev->op_*().
+ */
+int wimax_dev_add(struct wimax_dev *wimax_dev, struct net_device *net_dev)
+{
+ int result;
+ struct device *dev = net_dev->dev.parent;
+ char addr_str[32];
+
+ d_fnstart(3, dev, "(wimax_dev %p net_dev %p)\n", wimax_dev, net_dev);
+
+ /* Do the RFKILL setup before locking, as RFKILL will call
+ * into our functions.
+ */
+ wimax_dev->net_dev = net_dev;
+ result = wimax_rfkill_add(wimax_dev);
+ if (result < 0)
+ goto error_rfkill_add;
+
+ /* Set up user-space interaction */
+ mutex_lock(&wimax_dev->mutex);
+ wimax_id_table_add(wimax_dev);
+ wimax_debugfs_add(wimax_dev);
+
+ __wimax_state_set(wimax_dev, WIMAX_ST_DOWN);
+ mutex_unlock(&wimax_dev->mutex);
+
+ wimax_addr_scnprint(addr_str, sizeof(addr_str),
+ net_dev->dev_addr, net_dev->addr_len);
+ dev_err(dev, "WiMAX interface %s (%s) ready\n",
+ net_dev->name, addr_str);
+ d_fnend(3, dev, "(wimax_dev %p net_dev %p) = 0\n", wimax_dev, net_dev);
+ return 0;
+
+error_rfkill_add:
+ d_fnend(3, dev, "(wimax_dev %p net_dev %p) = %d\n",
+ wimax_dev, net_dev, result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wimax_dev_add);
+
+
+/**
+ * wimax_dev_rm - Unregister an existing WiMAX device
+ *
+ * @wimax_dev: WiMAX device descriptor
+ *
+ * Unregisters a WiMAX device previously registered for use with
+ * wimax_add_rm().
+ *
+ * IMPORTANT! Must call before calling unregister_netdev().
+ *
+ * After this function returns, you will not get any more user space
+ * control requests (via netlink or debugfs) and thus to wimax_dev->ops.
+ *
+ * Reentrancy control is ensured by setting the state to
+ * %__WIMAX_ST_QUIESCING. rfkill operations coming through
+ * wimax_*rfkill*() will be stopped by the quiescing state; ops coming
+ * from the rfkill subsystem will be stopped by the support being
+ * removed by wimax_rfkill_rm().
+ */
+void wimax_dev_rm(struct wimax_dev *wimax_dev)
+{
+ d_fnstart(3, NULL, "(wimax_dev %p)\n", wimax_dev);
+
+ mutex_lock(&wimax_dev->mutex);
+ __wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING);
+ wimax_debugfs_rm(wimax_dev);
+ wimax_id_table_rm(wimax_dev);
+ __wimax_state_change(wimax_dev, WIMAX_ST_DOWN);
+ mutex_unlock(&wimax_dev->mutex);
+ wimax_rfkill_rm(wimax_dev);
+ d_fnend(3, NULL, "(wimax_dev %p) = void\n", wimax_dev);
+}
+EXPORT_SYMBOL_GPL(wimax_dev_rm);
+
+
+/* Debug framework control of debug levels */
+struct d_level D_LEVEL[] = {
+ D_SUBMODULE_DEFINE(debugfs),
+ D_SUBMODULE_DEFINE(id_table),
+ D_SUBMODULE_DEFINE(op_msg),
+ D_SUBMODULE_DEFINE(op_reset),
+ D_SUBMODULE_DEFINE(op_rfkill),
+ D_SUBMODULE_DEFINE(op_state_get),
+ D_SUBMODULE_DEFINE(stack),
+};
+size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+
+
+static const struct genl_multicast_group wimax_gnl_mcgrps[] = {
+ { .name = "msg", },
+};
+
+struct genl_family wimax_gnl_family __ro_after_init = {
+ .name = "WiMAX",
+ .version = WIMAX_GNL_VERSION,
+ .hdrsize = 0,
+ .maxattr = WIMAX_GNL_ATTR_MAX,
+ .policy = wimax_gnl_policy,
+ .module = THIS_MODULE,
+ .small_ops = wimax_gnl_ops,
+ .n_small_ops = ARRAY_SIZE(wimax_gnl_ops),
+ .mcgrps = wimax_gnl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(wimax_gnl_mcgrps),
+};
+
+
+
+/* Shutdown the wimax stack */
+static
+int __init wimax_subsys_init(void)
+{
+ int result;
+
+ d_fnstart(4, NULL, "()\n");
+ d_parse_params(D_LEVEL, D_LEVEL_SIZE, wimax_debug_params,
+ "wimax.debug");
+
+ result = genl_register_family(&wimax_gnl_family);
+ if (unlikely(result < 0)) {
+ pr_err("cannot register generic netlink family: %d\n", result);
+ goto error_register_family;
+ }
+
+ d_fnend(4, NULL, "() = 0\n");
+ return 0;
+
+error_register_family:
+ d_fnend(4, NULL, "() = %d\n", result);
+ return result;
+
+}
+module_init(wimax_subsys_init);
+
+
+/* Shutdown the wimax stack */
+static
+void __exit wimax_subsys_exit(void)
+{
+ wimax_id_table_release();
+ genl_unregister_family(&wimax_gnl_family);
+}
+module_exit(wimax_subsys_exit);
+
+MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
+MODULE_DESCRIPTION("Linux WiMAX stack");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wimax/wimax-internal.h b/drivers/staging/wimax/wimax-internal.h
new file mode 100644
index 000000000000..a6b6990642a1
--- /dev/null
+++ b/drivers/staging/wimax/wimax-internal.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Linux WiMAX
+ * Internal API for kernel space WiMAX stack
+ *
+ * Copyright (C) 2007 Intel Corporation <linux-wimax@intel.com>
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This header file is for declarations and definitions internal to
+ * the WiMAX stack. For public APIs and documentation, see
+ * include/net/wimax.h and include/linux/wimax.h.
+ */
+
+#ifndef __WIMAX_INTERNAL_H__
+#define __WIMAX_INTERNAL_H__
+#ifdef __KERNEL__
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include "net-wimax.h"
+
+
+/*
+ * Decide if a (locked) device is ready for use
+ *
+ * Before using the device structure, it must be locked
+ * (wimax_dev->mutex). As well, most operations need to call this
+ * function to check if the state is the right one.
+ *
+ * An error value will be returned if the state is not the right
+ * one. In that case, the caller should not attempt to use the device
+ * and just unlock it.
+ */
+static inline __must_check
+int wimax_dev_is_ready(struct wimax_dev *wimax_dev)
+{
+ if (wimax_dev->state == __WIMAX_ST_NULL)
+ return -EINVAL; /* Device is not even registered! */
+ if (wimax_dev->state == WIMAX_ST_DOWN)
+ return -ENOMEDIUM;
+ if (wimax_dev->state == __WIMAX_ST_QUIESCING)
+ return -ESHUTDOWN;
+ return 0;
+}
+
+
+static inline
+void __wimax_state_set(struct wimax_dev *wimax_dev, enum wimax_st state)
+{
+ wimax_dev->state = state;
+}
+void __wimax_state_change(struct wimax_dev *, enum wimax_st);
+
+#ifdef CONFIG_DEBUG_FS
+void wimax_debugfs_add(struct wimax_dev *);
+void wimax_debugfs_rm(struct wimax_dev *);
+#else
+static inline void wimax_debugfs_add(struct wimax_dev *wimax_dev) {}
+static inline void wimax_debugfs_rm(struct wimax_dev *wimax_dev) {}
+#endif
+
+void wimax_id_table_add(struct wimax_dev *);
+struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int);
+void wimax_id_table_rm(struct wimax_dev *);
+void wimax_id_table_release(void);
+
+int wimax_rfkill_add(struct wimax_dev *);
+void wimax_rfkill_rm(struct wimax_dev *);
+
+/* generic netlink */
+extern struct genl_family wimax_gnl_family;
+
+/* ops */
+int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info);
+int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info);
+int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info);
+int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info);
+
+#endif /* #ifdef __KERNEL__ */
+#endif /* #ifndef __WIMAX_INTERNAL_H__ */